repo_name
stringlengths 8
130
| hexsha
sequence | file_path
sequence | code
sequence | apis
sequence |
---|---|---|---|---|
LeftThink/pytorch-lighthead | [
"5f4bf1c87b9be77bf7242ad89900239a9d66914c"
] | [
"lib/datasets/adas.py"
] | [
"# coding: utf-8\n# --------------------------------------------------------\n# Fast R-CNN\n# Copyright (c) 2015 Microsoft\n# Licensed under The MIT License [see LICENSE for details]\n# Written by Ross Girshick\n# --------------------------------------------------------\nfrom __future__ import print_function\n\nimport xml.dom.minidom as minidom\n\nimport os\n# import PIL\nimport numpy as np\nimport scipy.sparse\nimport subprocess\n\ntry:\n import cPickle\nexcept ImportError:\n import pickle as cPickle\n\nimport math\nimport glob\nimport uuid\nimport scipy.io as sio\nimport xml.etree.ElementTree as ET\n\nfrom .imdb import imdb\nfrom .imdb import ROOT_DIR\nfrom . import ds_utils\nfrom .adas_eval import adas_eval\n\n# TODO: make fast_rcnn irrelevant\n# >>>> obsolete, because it depends on sth outside of this project\nfrom model.utils.config import cfg\n\n\n# <<<< obsolete\n\n\nclass adas(imdb):\n def __init__(self, image_set, year, devkit_path=None, sub_type='car'):\n imdb.__init__(self, 'adas_' + year + '_' + image_set)\n self._year = year\n self._image_set = image_set\n self._devkit_path = self._get_default_path() if devkit_path is None \\\n else devkit_path\n self._data_path = os.path.join(self._devkit_path, 'ADAS' + self._year)\n\n if sub_type == 'car':\n self._classes = ('__background__', #always index 0\n 'car',)\n elif sub_type == 'tired':\n self._classes = ('__background__', #always index 0\n 'o','s','w')\n\n self._class_to_ind = dict(zip(self.classes, range(self.num_classes)))\n\n self._image_ext = '.jpg'\n self._image_index = self._load_image_set_index()\n # Default to roidb handler\n # self._roidb_handler = self.selective_search_roidb\n self._roidb_handler = self.gt_roidb\n self._salt = str(uuid.uuid4())\n self._comp_id = 'comp4'\n\n # PASCAL specific config options\n self.config = {'cleanup': True,\n 'use_salt': True,\n 'use_diff': False,\n 'matlab_eval': False,\n 'rpn_file': None,\n 'min_size': 2}\n\n assert os.path.exists(self._devkit_path), \\\n 'ADASdevkit path does not exist: {}'.format(self._devkit_path)\n assert os.path.exists(self._data_path), \\\n 'Path does not exist: {}'.format(self._data_path)\n\n def image_path_at(self, i):\n \"\"\"\n Return the absolute path to image i in the image sequence.\n \"\"\"\n return self.image_path_from_index(self._image_index[i])\n\n def image_id_at(self, i):\n \"\"\"\n Return the absolute path to image i in the image sequence.\n \"\"\"\n return i\n\n def image_path_from_index(self, index):\n \"\"\"\n Construct an image path from the image's \"index\" identifier.\n \"\"\"\n image_path = os.path.join(self._data_path, 'JPEGImages',\n index + self._image_ext)\n assert os.path.exists(image_path), \\\n 'Path does not exist: {}'.format(image_path)\n return image_path\n\n def _load_image_set_index(self):\n \"\"\"\n Load the indexes listed in this dataset's image set file.\n \"\"\"\n # Example path to image set file:\n # self._devkit_path + /ADASdevkit2007/ADAS2007/ImageSets/Main/val.txt\n image_set_file = os.path.join(self._data_path, 'ImageSets', 'Main',\n self._image_set + '.txt')\n assert os.path.exists(image_set_file), \\\n 'Path does not exist: {}'.format(image_set_file)\n with open(image_set_file) as f:\n image_index = [x.strip() for x in f.readlines()]\n return image_index\n\n def _get_default_path(self):\n \"\"\"\n Return the default path where PASCAL ADAS is expected to be installed.\n \"\"\"\n return os.path.join(cfg.DATA_DIR, 'ADASdevkit' + self._year)\n\n def gt_roidb(self):\n \"\"\"\n Return the database of ground-truth regions of interest.\n\n This function loads/saves from/to a cache file to speed up future calls.\n \"\"\"\n cache_file = os.path.join(self.cache_path, self.name + '_gt_roidb.pkl')\n if os.path.exists(cache_file):\n print(cache_file)\n with open(cache_file, 'rb') as fid:\n roidb = cPickle.load(fid)\n print('{} gt roidb loaded from {}'.format(self.name, cache_file))\n return roidb\n\n gt_roidb = [self._load_pascal_annotation(index)\n for index in self.image_index]\n with open(cache_file, 'wb') as fid:\n cPickle.dump(gt_roidb, fid, cPickle.HIGHEST_PROTOCOL)\n print('wrote gt roidb to {}'.format(cache_file))\n\n return gt_roidb\n\n def selective_search_roidb(self):\n \"\"\"\n Return the database of selective search regions of interest.\n Ground-truth ROIs are also included.\n\n This function loads/saves from/to a cache file to speed up future calls.\n \"\"\"\n cache_file = os.path.join(self.cache_path,\n self.name + '_selective_search_roidb.pkl')\n\n if os.path.exists(cache_file):\n with open(cache_file, 'rb') as fid:\n roidb = cPickle.load(fid)\n print('{} ss roidb loaded from {}'.format(self.name, cache_file))\n return roidb\n\n if int(self._year) == 2007 or self._image_set != 'test':\n gt_roidb = self.gt_roidb()\n ss_roidb = self._load_selective_search_roidb(gt_roidb)\n roidb = imdb.merge_roidbs(gt_roidb, ss_roidb)\n else:\n roidb = self._load_selective_search_roidb(None)\n with open(cache_file, 'wb') as fid:\n cPickle.dump(roidb, fid, cPickle.HIGHEST_PROTOCOL)\n print('wrote ss roidb to {}'.format(cache_file))\n\n return roidb\n\n def rpn_roidb(self):\n if int(self._year) == 2007 or self._image_set != 'test':\n gt_roidb = self.gt_roidb()\n rpn_roidb = self._load_rpn_roidb(gt_roidb)\n roidb = imdb.merge_roidbs(gt_roidb, rpn_roidb)\n else:\n roidb = self._load_rpn_roidb(None)\n\n return roidb\n\n def _load_rpn_roidb(self, gt_roidb):\n filename = self.config['rpn_file']\n print('loading {}'.format(filename))\n assert os.path.exists(filename), \\\n 'rpn data not found at: {}'.format(filename)\n with open(filename, 'rb') as f:\n box_list = cPickle.load(f)\n return self.create_roidb_from_box_list(box_list, gt_roidb)\n\n def _load_selective_search_roidb(self, gt_roidb):\n filename = os.path.abspath(os.path.join(cfg.DATA_DIR,\n 'selective_search_data',\n self.name + '.mat'))\n assert os.path.exists(filename), \\\n 'Selective search data not found at: {}'.format(filename)\n raw_data = sio.loadmat(filename)['boxes'].ravel()\n\n box_list = []\n for i in range(raw_data.shape[0]):\n boxes = raw_data[i][:, (1, 0, 3, 2)] - 1\n keep = ds_utils.unique_boxes(boxes)\n boxes = boxes[keep, :]\n keep = ds_utils.filter_small_boxes(boxes, self.config['min_size'])\n boxes = boxes[keep, :]\n box_list.append(boxes)\n\n return self.create_roidb_from_box_list(box_list, gt_roidb)\n\n def _load_pascal_annotation(self, index):\n \"\"\"\n Load image and bounding boxes info from XML file in the PASCAL ADAS\n format.\n \"\"\"\n filename = os.path.join(self._data_path, 'Annotations', index + '.xml')\n tree = ET.parse(filename)\n objs = tree.findall('object')\n # if not self.config['use_diff']:\n # # Exclude the samples labeled as difficult\n # non_diff_objs = [\n # obj for obj in objs if int(obj.find('difficult').text) == 0]\n # # if len(non_diff_objs) != len(objs):\n # # print 'Removed {} difficult objects'.format(\n # # len(objs) - len(non_diff_objs))\n # objs = non_diff_objs\n num_objs = len(objs)\n\n boxes = np.zeros((num_objs, 4), dtype=np.uint16)\n gt_classes = np.zeros((num_objs), dtype=np.int32)\n overlaps = np.zeros((num_objs, self.num_classes), dtype=np.float32)\n # \"Seg\" area for pascal is just the box area\n seg_areas = np.zeros((num_objs), dtype=np.float32)\n ishards = np.zeros((num_objs), dtype=np.int32)\n\n # Load object bounding boxes into a data frame.\n for ix, obj in enumerate(objs):\n bbox = obj.find('bndbox')\n # Make pixel indexes 0-based\n x1 = float(bbox.find('xmin').text) - 1\n y1 = float(bbox.find('ymin').text) - 1\n x2 = float(bbox.find('xmax').text) - 1\n y2 = float(bbox.find('ymax').text) - 1\n \n diffc = obj.find('difficult')\n difficult = 0 if diffc == None else int(diffc.text)\n ishards[ix] = difficult\n\n cls = self._class_to_ind[obj.find('name').text.lower().strip()]\n boxes[ix, :] = [x1, y1, x2, y2]\n gt_classes[ix] = cls\n overlaps[ix, cls] = 1.0\n seg_areas[ix] = (x2 - x1 + 1) * (y2 - y1 + 1)\n\n overlaps = scipy.sparse.csr_matrix(overlaps)\n \n return {'boxes': boxes,\n 'gt_classes': gt_classes,\n 'gt_ishard': ishards,\n 'gt_overlaps': overlaps,\n 'flipped': False,\n 'seg_areas': seg_areas}\n\n def _get_comp_id(self):\n comp_id = (self._comp_id + '_' + self._salt if self.config['use_salt']\n else self._comp_id)\n return comp_id\n\n def _get_adas_results_file_template(self):\n # ADASdevkit/results/ADAS2007/Main/<comp_id>_det_test_aeroplane.txt\n filename = self._get_comp_id() + '_det_' + self._image_set + '_{:s}.txt'\n filedir = os.path.join(self._devkit_path, 'results', 'ADAS' + self._year, 'Main')\n if not os.path.exists(filedir):\n os.makedirs(filedir)\n path = os.path.join(filedir, filename)\n return path\n\n def _write_adas_results_file(self, all_boxes):\n for cls_ind, cls in enumerate(self.classes):\n if cls == '__background__':\n continue\n print('Writing {} ADAS results file'.format(cls))\n filename = self._get_adas_results_file_template().format(cls)\n with open(filename, 'wt') as f:\n for im_ind, index in enumerate(self.image_index):\n dets = all_boxes[cls_ind][im_ind]\n if dets == []:\n continue\n # the ADASdevkit expects 1-based indices\n for k in range(dets.shape[0]):\n f.write('{:s} {:.3f} {:.1f} {:.1f} {:.1f} {:.1f}\\n'.\n format(index, dets[k, -1],\n dets[k, 0] + 1, dets[k, 1] + 1,\n dets[k, 2] + 1, dets[k, 3] + 1))\n\n def _do_python_eval(self, output_dir='output'):\n annopath = os.path.join(\n self._devkit_path,\n 'ADAS' + self._year,\n 'Annotations',\n '{:s}.xml')\n imagesetfile = os.path.join(\n self._devkit_path,\n 'ADAS' + self._year,\n 'ImageSets',\n 'Main',\n self._image_set + '.txt')\n cachedir = os.path.join(self._devkit_path, 'annotations_cache')\n aps = []\n\n if not os.path.isdir(output_dir):\n os.mkdir(output_dir)\n for i, cls in enumerate(self._classes):\n if cls == '__background__':\n continue\n filename = self._get_adas_results_file_template().format(cls)\n rec, prec, ap = adas_eval(\n filename, annopath, imagesetfile, cls, cachedir, ovthresh=0.5)\n aps += [ap]\n print('AP for {} = {:.4f}'.format(cls, ap))\n with open(os.path.join(output_dir, cls + '_pr.pkl'), 'w') as f:\n cPickle.dump({'rec': rec, 'prec': prec, 'ap': ap}, f)\n print('Mean AP = {:.4f}'.format(np.mean(aps)))\n print('~~~~~~~~')\n print('Results:')\n for ap in aps:\n print('{:.3f}'.format(ap))\n print('{:.3f}'.format(np.mean(aps)))\n print('~~~~~~~~')\n print('')\n print('--------------------------------------------------------------')\n print('Results computed with the **unofficial** Python eval code.')\n print('Results should be very close to the official MATLAB eval code.')\n print('Recompute with `./tools/reval.py --matlab ...` for your paper.')\n print('-- Thanks, The Management')\n print('--------------------------------------------------------------')\n\n def _do_matlab_eval(self, output_dir='output'):\n print('-----------------------------------------------------')\n print('Computing results with the official MATLAB eval code.')\n print('-----------------------------------------------------')\n path = os.path.join(cfg.ROOT_DIR, 'lib', 'datasets',\n 'ADASdevkit-matlab-wrapper')\n cmd = 'cd {} && '.format(path)\n cmd += '{:s} -nodisplay -nodesktop '.format(cfg.MATLAB)\n cmd += '-r \"dbstop if error; '\n cmd += 'adas_eval(\\'{:s}\\',\\'{:s}\\',\\'{:s}\\',\\'{:s}\\'); quit;\"' \\\n .format(self._devkit_path, self._get_comp_id(),\n self._image_set, output_dir)\n print('Running:\\n{}'.format(cmd))\n status = subprocess.call(cmd, shell=True)\n\n def evaluate_detections(self, all_boxes, output_dir):\n self._write_adas_results_file(all_boxes)\n self._do_python_eval(output_dir)\n if self.config['matlab_eval']:\n self._do_matlab_eval(output_dir)\n if self.config['cleanup']:\n for cls in self._classes:\n if cls == '__background__':\n continue\n filename = self._get_adas_results_file_template().format(cls)\n os.remove(filename)\n\n def competition_mode(self, on):\n if on:\n self.config['use_salt'] = False\n self.config['cleanup'] = False\n else:\n self.config['use_salt'] = True\n self.config['cleanup'] = True\n\n\nif __name__ == '__main__':\n d = adas('trainval', '2017')\n res = d.roidb\n from IPython import embed;\n\n embed()\n"
] | [
[
"numpy.mean",
"scipy.io.loadmat",
"numpy.zeros"
]
] |
mehulfollytobevice/MachineLearning | [
"7d442907df4e8560bf5067d8bac660a3cb303393"
] | [
"K-NN Classification/KNN Classification from scratch/knn_from_scratch.py"
] | [
"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Apr 9 21:03:57 2020\r\n\r\n@author: Mehul\r\n\"\"\"\r\n\r\n#importing the libraries\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport pandas as pd\r\nimport random\r\nimport warnings\r\nfrom matplotlib import style\r\nfrom collections import Counter\r\nfrom math import sqrt\r\nstyle.use('fivethirtyeight')\r\n\r\n#defining knn function\r\ndef k_nearest_neighbors(data,predict,k=3):\r\n\tdistances=[]\r\n\tif(len(data)>=k):\r\n\t\t#this is not an error it is just a warning , the algorithm still works \r\n\t\twarnings.warn('The value of k is less than the number of voting groups.')\r\n \r\n\tfor group in data:\r\n\t\t#data is a dictionary of lists with different groups of classes \r\n\t\tfor features in data[group]:\r\n\t\t\t#features represent the points in the dataset\r\n\t\t\t\r\n\t\t\t#original way\r\n\t\t\t#euclidean_distance=sqrt((features[0]-predict[0])**2+(features[1]-predict[1])**2)\r\n\t\t\t\r\n\t\t\t#faster way\r\n\t\t\teuclidean_distance=np.linalg.norm(np.array(features)-np.array(predict))\r\n\t\t\tdistances.append([euclidean_distance,group])\r\n\t\r\n\t#once we have the distances we dont care about them\r\n\t#we populate the list of votes which has the top k neighbors to the prediction point \r\n\tvotes=[i[1] for i in sorted(distances)[:k] ]\r\n\t#using counter we calculate the most common out of the nearest neighbors\r\n\tvote_result=Counter(votes).most_common(1)[0][0]\r\n\t\r\n\t#we can also give our confidence,confidence is the probability of your prediction being right\r\n\t#confidence=Counter(votes).most_common(1)[0][1]/k\r\n\t\r\n\treturn vote_result\r\n\r\ndef accuracy_of_result(train_set,test_set):\r\n\t#intialising \r\n\tcorrect=0\r\n\ttotal=0\r\n\t\r\n\t#testing and finding accuracy\r\n\tfor group in test_set:\r\n\t\tfor data in test_set[group]:\r\n\t\t\t#iterating through all the data in a class \r\n\t\t\tresult=k_nearest_neighbors(train_set,data,k=5)\r\n\t\t\tif (group==result):\r\n\t\t\t\tcorrect=correct+1\r\n\t\t\ttotal=total+1\r\n\taccuracy=correct/total\r\n\treturn accuracy\r\n\r\n''''\r\n#trial data\r\n#our data is in form of dictionary of lists\r\ndataset={'k':[[1,2],[2,3,],[3,1]],'r':[[6,5],[7,7],[8,6]]}\r\nnew_features=[5,7]\r\n\r\n#plotting the data\r\nplt.scatter(new_features[0],new_features[1],s=50)\r\nfor i in dataset:\r\n\tfor j in dataset[i]:\r\n\t\tprint(j)\r\n\t\tplt.scatter(j[0],j[1],s=100,color=i)\r\n\r\n#applying knn model\r\nresult=k_nearest_neighbors(dataset,new_features,k=3)#result represents the class the prediction point belongs to \r\n\r\n#plotting the prediction\r\nplt.scatter(new_features[0],new_features[1],s=50,color=result)\r\nfor i in dataset:\r\n\tfor j in dataset[i]:\r\n\t\tprint(j)\r\n\t\tplt.scatter(j[0],j[1],s=100,color=i)\r\n'''\r\n\r\n#Implmenting the model on the test dataset\r\n\r\n#importing the dataset\r\ndataset=pd.read_csv('breast-cancer-wisconsin.data.txt')\r\n\r\n#replacing missing instances with large numbers \r\ndataset.replace('?',-99999,inplace=True)\r\ndataset.drop(['id'],1,inplace=True)\r\ndataset=dataset.astype(float).values.tolist()\r\n\r\n#shuffling to data to include some randomness\r\n#this does not change the raltionship between the data\r\n#this is what can be used for cross-validation \r\nrandom.shuffle(dataset)\r\n\r\n#splitting the dataset into test set and train set\r\ntest_size=0.2\r\n\r\n#the train set and the test set are dictionary of lists\r\ntrain_set={2:[],4:[]}\r\ntest_set={2:[],4:[]}\r\n\r\n#slicing the data into train_data and test_data\r\ntrain_data=dataset[:-int(test_size*len(dataset))] #all the data upto the last 20%\r\ntest_data=dataset[-int(test_size*len(dataset)):] #the last 20%\r\n\r\n#populating the dictionary\r\n#here we take the data from the train_data and the test_data and use it to populate our dictionaries\r\n\r\nfor i in train_data:\r\n\ttrain_set[i[-1]].append(i[:-1])# i[-1] represents the class of the particular row\r\n\r\nfor i in test_data:\r\n\ttest_set[i[-1]].append(i[:-1])# i[-1] represents the class of the particular row\r\n\r\n#getting the accuracy of our knn model on the dataset\r\nprint('Accuracy of the result:',accuracy_of_result(train_set,test_set))"
] | [
[
"pandas.read_csv",
"matplotlib.style.use",
"numpy.array"
]
] |
WonMian/coach | [
"67978248927f24ee09df6f1df842a14103aaf11b"
] | [
"rl_coach/agents/actor_critic_agent.py"
] | [
"#\n# Copyright (c) 2017 Intel Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nfrom typing import Union\n\nimport numpy as np\nimport scipy.signal\n\nfrom rl_coach.agents.policy_optimization_agent import PolicyOptimizationAgent, PolicyGradientRescaler\nfrom rl_coach.architectures.tensorflow_components.heads.policy_head import PolicyHeadParameters\nfrom rl_coach.architectures.tensorflow_components.heads.v_head import VHeadParameters\nfrom rl_coach.architectures.tensorflow_components.middlewares.fc_middleware import FCMiddlewareParameters\nfrom rl_coach.base_parameters import AlgorithmParameters, NetworkParameters, \\\n AgentParameters\nfrom rl_coach.logger import screen\nfrom rl_coach.memories.episodic.single_episode_buffer import SingleEpisodeBufferParameters\nfrom rl_coach.spaces import DiscreteActionSpace\nfrom rl_coach.utils import last_sample\nfrom rl_coach.architectures.tensorflow_components.embedders.embedder import InputEmbedderParameters\n\n\nclass ActorCriticAlgorithmParameters(AlgorithmParameters):\n def __init__(self):\n super().__init__()\n self.policy_gradient_rescaler = PolicyGradientRescaler.A_VALUE\n self.apply_gradients_every_x_episodes = 5\n self.beta_entropy = 0\n self.num_steps_between_gradient_updates = 5000 # this is called t_max in all the papers\n self.gae_lambda = 0.96\n self.estimate_state_value_using_gae = False\n\n\nclass ActorCriticNetworkParameters(NetworkParameters):\n def __init__(self):\n super().__init__()\n self.input_embedders_parameters = {'observation': InputEmbedderParameters()}\n self.middleware_parameters = FCMiddlewareParameters()\n self.heads_parameters = [VHeadParameters(), PolicyHeadParameters()]\n self.loss_weights = [0.5, 1.0]\n self.rescale_gradient_from_head_by_factor = [1, 1]\n self.optimizer_type = 'Adam'\n self.clip_gradients = 40.0\n self.async_training = True\n\n\nclass ActorCriticAgentParameters(AgentParameters):\n def __init__(self):\n super().__init__(algorithm=ActorCriticAlgorithmParameters(),\n exploration=None, #TODO this should be different for continuous (ContinuousEntropyExploration)\n # and discrete (CategoricalExploration) action spaces.\n memory=SingleEpisodeBufferParameters(),\n networks={\"main\": ActorCriticNetworkParameters()})\n\n @property\n def path(self):\n return 'rl_coach.agents.actor_critic_agent:ActorCriticAgent'\n\n\n# Actor Critic - https://arxiv.org/abs/1602.01783\nclass ActorCriticAgent(PolicyOptimizationAgent):\n def __init__(self, agent_parameters, parent: Union['LevelManager', 'CompositeAgent']=None):\n super().__init__(agent_parameters, parent)\n self.last_gradient_update_step_idx = 0\n self.action_advantages = self.register_signal('Advantages')\n self.state_values = self.register_signal('Values')\n self.value_loss = self.register_signal('Value Loss')\n self.policy_loss = self.register_signal('Policy Loss')\n\n # Discounting function used to calculate discounted returns.\n def discount(self, x, gamma):\n return scipy.signal.lfilter([1], [1, -gamma], x[::-1], axis=0)[::-1]\n\n def get_general_advantage_estimation_values(self, rewards, values):\n # values contain n+1 elements (t ... t+n+1), rewards contain n elements (t ... t + n)\n bootstrap_extended_rewards = np.array(rewards.tolist() + [values[-1]])\n\n # Approximation based calculation of GAE (mathematically correct only when Tmax = inf,\n # although in practice works even in much smaller Tmax values, e.g. 20)\n deltas = rewards + self.ap.algorithm.discount * values[1:] - values[:-1]\n gae = self.discount(deltas, self.ap.algorithm.discount * self.ap.algorithm.gae_lambda)\n\n if self.ap.algorithm.estimate_state_value_using_gae:\n discounted_returns = np.expand_dims(gae + values[:-1], -1)\n else:\n discounted_returns = np.expand_dims(np.array(self.discount(bootstrap_extended_rewards,\n self.ap.algorithm.discount)), 1)[:-1]\n return gae, discounted_returns\n\n def learn_from_batch(self, batch):\n # batch contains a list of episodes to learn from\n network_keys = self.ap.network_wrappers['main'].input_embedders_parameters.keys()\n\n # get the values for the current states\n\n result = self.networks['main'].online_network.predict(batch.states(network_keys))\n current_state_values = result[0]\n\n self.state_values.add_sample(current_state_values)\n\n # the targets for the state value estimator\n num_transitions = batch.size\n state_value_head_targets = np.zeros((num_transitions, 1))\n\n # estimate the advantage function\n action_advantages = np.zeros((num_transitions, 1))\n\n if self.policy_gradient_rescaler == PolicyGradientRescaler.A_VALUE:\n if batch.game_overs()[-1]:\n R = 0\n else:\n R = self.networks['main'].online_network.predict(last_sample(batch.next_states(network_keys)))[0]\n\n for i in reversed(range(num_transitions)):\n R = batch.rewards()[i] + self.ap.algorithm.discount * R\n state_value_head_targets[i] = R\n action_advantages[i] = R - current_state_values[i]\n\n elif self.policy_gradient_rescaler == PolicyGradientRescaler.GAE:\n # get bootstraps\n bootstrapped_value = self.networks['main'].online_network.predict(last_sample(batch.next_states(network_keys)))[0]\n values = np.append(current_state_values, bootstrapped_value)\n if batch.game_overs()[-1]:\n values[-1] = 0\n\n # get general discounted returns table\n gae_values, state_value_head_targets = self.get_general_advantage_estimation_values(batch.rewards(), values)\n action_advantages = np.vstack(gae_values)\n else:\n screen.warning(\"WARNING: The requested policy gradient rescaler is not available\")\n\n action_advantages = action_advantages.squeeze(axis=-1)\n actions = batch.actions()\n if not isinstance(self.spaces.action, DiscreteActionSpace) and len(actions.shape) < 2:\n actions = np.expand_dims(actions, -1)\n\n # train\n result = self.networks['main'].online_network.accumulate_gradients({**batch.states(network_keys),\n 'output_1_0': actions},\n [state_value_head_targets, action_advantages])\n\n # logging\n total_loss, losses, unclipped_grads = result[:3]\n self.action_advantages.add_sample(action_advantages)\n self.unclipped_grads.add_sample(unclipped_grads)\n self.value_loss.add_sample(losses[0])\n self.policy_loss.add_sample(losses[1])\n\n return total_loss, losses, unclipped_grads\n\n def get_prediction(self, states):\n tf_input_state = self.prepare_batch_for_inference(states, \"main\")\n return self.networks['main'].online_network.predict(tf_input_state)[1:] # index 0 is the state value\n"
] | [
[
"numpy.vstack",
"numpy.expand_dims",
"numpy.append",
"numpy.zeros"
]
] |
kuantan/pandas | [
"e18921eb0cc86f71c84a4aa0bd6d0c1b7de89def",
"e18921eb0cc86f71c84a4aa0bd6d0c1b7de89def"
] | [
"pandas/io/parquet.py",
"pandas/tests/io/test_common.py"
] | [
"\"\"\" parquet compat \"\"\"\nfrom __future__ import annotations\n\nimport io\nimport os\nfrom typing import Any\nfrom warnings import catch_warnings\n\nfrom pandas._typing import (\n FilePath,\n ReadBuffer,\n StorageOptions,\n WriteBuffer,\n)\nfrom pandas.compat._optional import import_optional_dependency\nfrom pandas.errors import AbstractMethodError\nfrom pandas.util._decorators import doc\n\nfrom pandas import (\n DataFrame,\n MultiIndex,\n get_option,\n)\nfrom pandas.core.shared_docs import _shared_docs\nfrom pandas.util.version import Version\n\nfrom pandas.io.common import (\n IOHandles,\n get_handle,\n is_fsspec_url,\n is_url,\n stringify_path,\n)\n\n\ndef get_engine(engine: str) -> BaseImpl:\n \"\"\"return our implementation\"\"\"\n if engine == \"auto\":\n engine = get_option(\"io.parquet.engine\")\n\n if engine == \"auto\":\n # try engines in this order\n engine_classes = [PyArrowImpl, FastParquetImpl]\n\n error_msgs = \"\"\n for engine_class in engine_classes:\n try:\n return engine_class()\n except ImportError as err:\n error_msgs += \"\\n - \" + str(err)\n\n raise ImportError(\n \"Unable to find a usable engine; \"\n \"tried using: 'pyarrow', 'fastparquet'.\\n\"\n \"A suitable version of \"\n \"pyarrow or fastparquet is required for parquet \"\n \"support.\\n\"\n \"Trying to import the above resulted in these errors:\"\n f\"{error_msgs}\"\n )\n\n if engine == \"pyarrow\":\n return PyArrowImpl()\n elif engine == \"fastparquet\":\n return FastParquetImpl()\n\n raise ValueError(\"engine must be one of 'pyarrow', 'fastparquet'\")\n\n\ndef _get_path_or_handle(\n path: FilePath | ReadBuffer[bytes] | WriteBuffer[bytes],\n fs: Any,\n storage_options: StorageOptions = None,\n mode: str = \"rb\",\n is_dir: bool = False,\n) -> tuple[\n FilePath | ReadBuffer[bytes] | WriteBuffer[bytes], IOHandles[bytes] | None, Any\n]:\n \"\"\"File handling for PyArrow.\"\"\"\n path_or_handle = stringify_path(path)\n if is_fsspec_url(path_or_handle) and fs is None:\n fsspec = import_optional_dependency(\"fsspec\")\n\n fs, path_or_handle = fsspec.core.url_to_fs(\n path_or_handle, **(storage_options or {})\n )\n elif storage_options and (not is_url(path_or_handle) or mode != \"rb\"):\n # can't write to a remote url\n # without making use of fsspec at the moment\n raise ValueError(\"storage_options passed with buffer, or non-supported URL\")\n\n handles = None\n if (\n not fs\n and not is_dir\n and isinstance(path_or_handle, str)\n and not os.path.isdir(path_or_handle)\n ):\n # use get_handle only when we are very certain that it is not a directory\n # fsspec resources can also point to directories\n # this branch is used for example when reading from non-fsspec URLs\n handles = get_handle(\n path_or_handle, mode, is_text=False, storage_options=storage_options\n )\n fs = None\n path_or_handle = handles.handle\n return path_or_handle, handles, fs\n\n\nclass BaseImpl:\n @staticmethod\n def validate_dataframe(df: DataFrame):\n\n if not isinstance(df, DataFrame):\n raise ValueError(\"to_parquet only supports IO with DataFrames\")\n\n # must have value column names for all index levels (strings only)\n if isinstance(df.columns, MultiIndex):\n if not all(\n x.inferred_type in {\"string\", \"empty\"} for x in df.columns.levels\n ):\n raise ValueError(\n \"\"\"\n parquet must have string column names for all values in\n each level of the MultiIndex\n \"\"\"\n )\n else:\n if df.columns.inferred_type not in {\"string\", \"empty\"}:\n raise ValueError(\"parquet must have string column names\")\n\n # index level names must be strings\n valid_names = all(\n isinstance(name, str) for name in df.index.names if name is not None\n )\n if not valid_names:\n raise ValueError(\"Index level names must be strings\")\n\n def write(self, df: DataFrame, path, compression, **kwargs):\n raise AbstractMethodError(self)\n\n def read(self, path, columns=None, **kwargs):\n raise AbstractMethodError(self)\n\n\nclass PyArrowImpl(BaseImpl):\n def __init__(self):\n import_optional_dependency(\n \"pyarrow\", extra=\"pyarrow is required for parquet support.\"\n )\n import pyarrow.parquet\n\n # import utils to register the pyarrow extension types\n import pandas.core.arrays._arrow_utils # noqa:F401\n\n self.api = pyarrow\n\n def write(\n self,\n df: DataFrame,\n path: FilePath | WriteBuffer[bytes],\n compression: str | None = \"snappy\",\n index: bool | None = None,\n storage_options: StorageOptions = None,\n partition_cols: list[str] | None = None,\n **kwargs,\n ):\n self.validate_dataframe(df)\n\n from_pandas_kwargs: dict[str, Any] = {\"schema\": kwargs.pop(\"schema\", None)}\n if index is not None:\n from_pandas_kwargs[\"preserve_index\"] = index\n\n table = self.api.Table.from_pandas(df, **from_pandas_kwargs)\n\n path_or_handle, handles, kwargs[\"filesystem\"] = _get_path_or_handle(\n path,\n kwargs.pop(\"filesystem\", None),\n storage_options=storage_options,\n mode=\"wb\",\n is_dir=partition_cols is not None,\n )\n try:\n if partition_cols is not None:\n # writes to multiple files under the given path\n self.api.parquet.write_to_dataset(\n table,\n path_or_handle,\n compression=compression,\n partition_cols=partition_cols,\n **kwargs,\n )\n else:\n # write to single output file\n self.api.parquet.write_table(\n table, path_or_handle, compression=compression, **kwargs\n )\n finally:\n if handles is not None:\n handles.close()\n\n def read(\n self,\n path,\n columns=None,\n use_nullable_dtypes=False,\n storage_options: StorageOptions = None,\n **kwargs,\n ):\n kwargs[\"use_pandas_metadata\"] = True\n\n to_pandas_kwargs = {}\n if use_nullable_dtypes:\n import pandas as pd\n\n mapping = {\n self.api.int8(): pd.Int8Dtype(),\n self.api.int16(): pd.Int16Dtype(),\n self.api.int32(): pd.Int32Dtype(),\n self.api.int64(): pd.Int64Dtype(),\n self.api.uint8(): pd.UInt8Dtype(),\n self.api.uint16(): pd.UInt16Dtype(),\n self.api.uint32(): pd.UInt32Dtype(),\n self.api.uint64(): pd.UInt64Dtype(),\n self.api.bool_(): pd.BooleanDtype(),\n self.api.string(): pd.StringDtype(),\n }\n to_pandas_kwargs[\"types_mapper\"] = mapping.get\n manager = get_option(\"mode.data_manager\")\n if manager == \"array\":\n to_pandas_kwargs[\"split_blocks\"] = True # type: ignore[assignment]\n\n path_or_handle, handles, kwargs[\"filesystem\"] = _get_path_or_handle(\n path,\n kwargs.pop(\"filesystem\", None),\n storage_options=storage_options,\n mode=\"rb\",\n )\n try:\n result = self.api.parquet.read_table(\n path_or_handle, columns=columns, **kwargs\n ).to_pandas(**to_pandas_kwargs)\n if manager == \"array\":\n result = result._as_manager(\"array\", copy=False)\n return result\n finally:\n if handles is not None:\n handles.close()\n\n\nclass FastParquetImpl(BaseImpl):\n def __init__(self):\n # since pandas is a dependency of fastparquet\n # we need to import on first use\n fastparquet = import_optional_dependency(\n \"fastparquet\", extra=\"fastparquet is required for parquet support.\"\n )\n self.api = fastparquet\n\n def write(\n self,\n df: DataFrame,\n path,\n compression=\"snappy\",\n index=None,\n partition_cols=None,\n storage_options: StorageOptions = None,\n **kwargs,\n ):\n self.validate_dataframe(df)\n # thriftpy/protocol/compact.py:339:\n # DeprecationWarning: tostring() is deprecated.\n # Use tobytes() instead.\n\n if \"partition_on\" in kwargs and partition_cols is not None:\n raise ValueError(\n \"Cannot use both partition_on and \"\n \"partition_cols. Use partition_cols for partitioning data\"\n )\n elif \"partition_on\" in kwargs:\n partition_cols = kwargs.pop(\"partition_on\")\n\n if partition_cols is not None:\n kwargs[\"file_scheme\"] = \"hive\"\n\n # cannot use get_handle as write() does not accept file buffers\n path = stringify_path(path)\n if is_fsspec_url(path):\n fsspec = import_optional_dependency(\"fsspec\")\n\n # if filesystem is provided by fsspec, file must be opened in 'wb' mode.\n kwargs[\"open_with\"] = lambda path, _: fsspec.open(\n path, \"wb\", **(storage_options or {})\n ).open()\n elif storage_options:\n raise ValueError(\n \"storage_options passed with file object or non-fsspec file path\"\n )\n\n with catch_warnings(record=True):\n self.api.write(\n path,\n df,\n compression=compression,\n write_index=index,\n partition_on=partition_cols,\n **kwargs,\n )\n\n def read(\n self, path, columns=None, storage_options: StorageOptions = None, **kwargs\n ):\n parquet_kwargs: dict[str, Any] = {}\n use_nullable_dtypes = kwargs.pop(\"use_nullable_dtypes\", False)\n if Version(self.api.__version__) >= Version(\"0.7.1\"):\n # We are disabling nullable dtypes for fastparquet pending discussion\n parquet_kwargs[\"pandas_nulls\"] = False\n if use_nullable_dtypes:\n raise ValueError(\n \"The 'use_nullable_dtypes' argument is not supported for the \"\n \"fastparquet engine\"\n )\n path = stringify_path(path)\n handles = None\n if is_fsspec_url(path):\n fsspec = import_optional_dependency(\"fsspec\")\n\n if Version(self.api.__version__) > Version(\"0.6.1\"):\n parquet_kwargs[\"fs\"] = fsspec.open(\n path, \"rb\", **(storage_options or {})\n ).fs\n else:\n parquet_kwargs[\"open_with\"] = lambda path, _: fsspec.open(\n path, \"rb\", **(storage_options or {})\n ).open()\n elif isinstance(path, str) and not os.path.isdir(path):\n # use get_handle only when we are very certain that it is not a directory\n # fsspec resources can also point to directories\n # this branch is used for example when reading from non-fsspec URLs\n handles = get_handle(\n path, \"rb\", is_text=False, storage_options=storage_options\n )\n path = handles.handle\n\n parquet_file = self.api.ParquetFile(path, **parquet_kwargs)\n\n result = parquet_file.to_pandas(columns=columns, **kwargs)\n\n if handles is not None:\n handles.close()\n return result\n\n\n@doc(storage_options=_shared_docs[\"storage_options\"])\ndef to_parquet(\n df: DataFrame,\n path: FilePath | WriteBuffer[bytes] | None = None,\n engine: str = \"auto\",\n compression: str | None = \"snappy\",\n index: bool | None = None,\n storage_options: StorageOptions = None,\n partition_cols: list[str] | None = None,\n **kwargs,\n) -> bytes | None:\n \"\"\"\n Write a DataFrame to the parquet format.\n\n Parameters\n ----------\n df : DataFrame\n path : str, path object, file-like object, or None, default None\n String, path object (implementing ``os.PathLike[str]``), or file-like\n object implementing a binary ``write()`` function. If None, the result is\n returned as bytes. If a string, it will be used as Root Directory path\n when writing a partitioned dataset. The engine fastparquet does not\n accept file-like objects.\n\n .. versionchanged:: 1.2.0\n\n engine : {{'auto', 'pyarrow', 'fastparquet'}}, default 'auto'\n Parquet library to use. If 'auto', then the option\n ``io.parquet.engine`` is used. The default ``io.parquet.engine``\n behavior is to try 'pyarrow', falling back to 'fastparquet' if\n 'pyarrow' is unavailable.\n compression : {{'snappy', 'gzip', 'brotli', 'lz4', 'zstd', None}},\n default 'snappy'. Name of the compression to use. Use ``None``\n for no compression. The supported compression methods actually\n depend on which engine is used. For 'pyarrow', 'snappy', 'gzip',\n 'brotli', 'lz4', 'zstd' are all supported. For 'fastparquet',\n only 'gzip' and 'snappy' are supported.\n index : bool, default None\n If ``True``, include the dataframe's index(es) in the file output. If\n ``False``, they will not be written to the file.\n If ``None``, similar to ``True`` the dataframe's index(es)\n will be saved. However, instead of being saved as values,\n the RangeIndex will be stored as a range in the metadata so it\n doesn't require much space and is faster. Other indexes will\n be included as columns in the file output.\n partition_cols : str or list, optional, default None\n Column names by which to partition the dataset.\n Columns are partitioned in the order they are given.\n Must be None if path is not a string.\n {storage_options}\n\n .. versionadded:: 1.2.0\n\n kwargs\n Additional keyword arguments passed to the engine\n\n Returns\n -------\n bytes if no path argument is provided else None\n \"\"\"\n if isinstance(partition_cols, str):\n partition_cols = [partition_cols]\n impl = get_engine(engine)\n\n path_or_buf: FilePath | WriteBuffer[bytes] = io.BytesIO() if path is None else path\n\n impl.write(\n df,\n path_or_buf,\n compression=compression,\n index=index,\n partition_cols=partition_cols,\n storage_options=storage_options,\n **kwargs,\n )\n\n if path is None:\n assert isinstance(path_or_buf, io.BytesIO)\n return path_or_buf.getvalue()\n else:\n return None\n\n\n@doc(storage_options=_shared_docs[\"storage_options\"])\ndef read_parquet(\n path,\n engine: str = \"auto\",\n columns=None,\n storage_options: StorageOptions = None,\n use_nullable_dtypes: bool = False,\n **kwargs,\n):\n \"\"\"\n Load a parquet object from the file path, returning a DataFrame.\n\n Parameters\n ----------\n path : str, path object or file-like object\n String, path object (implementing ``os.PathLike[str]``), or file-like\n object implementing a binary ``read()`` function.\n The string could be a URL. Valid URL schemes include http, ftp, s3,\n gs, and file. For file URLs, a host is expected. A local file could be:\n ``file://localhost/path/to/table.parquet``.\n A file URL can also be a path to a directory that contains multiple\n partitioned parquet files. Both pyarrow and fastparquet support\n paths to directories as well as file URLs. A directory path could be:\n ``file://localhost/path/to/tables`` or ``s3://bucket/partition_dir``.\n engine : {{'auto', 'pyarrow', 'fastparquet'}}, default 'auto'\n Parquet library to use. If 'auto', then the option\n ``io.parquet.engine`` is used. The default ``io.parquet.engine``\n behavior is to try 'pyarrow', falling back to 'fastparquet' if\n 'pyarrow' is unavailable.\n columns : list, default=None\n If not None, only these columns will be read from the file.\n\n {storage_options}\n\n .. versionadded:: 1.3.0\n\n use_nullable_dtypes : bool, default False\n If True, use dtypes that use ``pd.NA`` as missing value indicator\n for the resulting DataFrame. (only applicable for the ``pyarrow``\n engine)\n As new dtypes are added that support ``pd.NA`` in the future, the\n output with this option will change to use those dtypes.\n Note: this is an experimental option, and behaviour (e.g. additional\n support dtypes) may change without notice.\n\n .. versionadded:: 1.2.0\n\n **kwargs\n Any additional kwargs are passed to the engine.\n\n Returns\n -------\n DataFrame\n \"\"\"\n impl = get_engine(engine)\n\n return impl.read(\n path,\n columns=columns,\n storage_options=storage_options,\n use_nullable_dtypes=use_nullable_dtypes,\n **kwargs,\n )\n",
"\"\"\"\nTests for the pandas.io.common functionalities\n\"\"\"\nimport codecs\nimport errno\nfrom functools import partial\nfrom io import (\n BytesIO,\n StringIO,\n UnsupportedOperation,\n)\nimport mmap\nimport os\nfrom pathlib import Path\nimport tempfile\n\nimport pytest\n\nfrom pandas.compat import is_platform_windows\nimport pandas.util._test_decorators as td\n\nimport pandas as pd\nimport pandas._testing as tm\n\nimport pandas.io.common as icom\n\n\nclass CustomFSPath:\n \"\"\"For testing fspath on unknown objects\"\"\"\n\n def __init__(self, path):\n self.path = path\n\n def __fspath__(self):\n return self.path\n\n\n# Functions that consume a string path and return a string or path-like object\npath_types = [str, CustomFSPath, Path]\n\ntry:\n from py.path import local as LocalPath\n\n path_types.append(LocalPath)\nexcept ImportError:\n pass\n\nHERE = os.path.abspath(os.path.dirname(__file__))\n\n\n# https://github.com/cython/cython/issues/1720\n@pytest.mark.filterwarnings(\"ignore:can't resolve package:ImportWarning\")\nclass TestCommonIOCapabilities:\n data1 = \"\"\"index,A,B,C,D\nfoo,2,3,4,5\nbar,7,8,9,10\nbaz,12,13,14,15\nqux,12,13,14,15\nfoo2,12,13,14,15\nbar2,12,13,14,15\n\"\"\"\n\n def test_expand_user(self):\n filename = \"~/sometest\"\n expanded_name = icom._expand_user(filename)\n\n assert expanded_name != filename\n assert os.path.isabs(expanded_name)\n assert os.path.expanduser(filename) == expanded_name\n\n def test_expand_user_normal_path(self):\n filename = \"/somefolder/sometest\"\n expanded_name = icom._expand_user(filename)\n\n assert expanded_name == filename\n assert os.path.expanduser(filename) == expanded_name\n\n def test_stringify_path_pathlib(self):\n rel_path = icom.stringify_path(Path(\".\"))\n assert rel_path == \".\"\n redundant_path = icom.stringify_path(Path(\"foo//bar\"))\n assert redundant_path == os.path.join(\"foo\", \"bar\")\n\n @td.skip_if_no(\"py.path\")\n def test_stringify_path_localpath(self):\n path = os.path.join(\"foo\", \"bar\")\n abs_path = os.path.abspath(path)\n lpath = LocalPath(path)\n assert icom.stringify_path(lpath) == abs_path\n\n def test_stringify_path_fspath(self):\n p = CustomFSPath(\"foo/bar.csv\")\n result = icom.stringify_path(p)\n assert result == \"foo/bar.csv\"\n\n def test_stringify_file_and_path_like(self):\n # GH 38125: do not stringify file objects that are also path-like\n fsspec = pytest.importorskip(\"fsspec\")\n with tm.ensure_clean() as path:\n with fsspec.open(f\"file://{path}\", mode=\"wb\") as fsspec_obj:\n assert fsspec_obj == icom.stringify_path(fsspec_obj)\n\n @pytest.mark.parametrize(\"path_type\", path_types)\n def test_infer_compression_from_path(self, compression_format, path_type):\n extension, expected = compression_format\n path = path_type(\"foo/bar.csv\" + extension)\n compression = icom.infer_compression(path, compression=\"infer\")\n assert compression == expected\n\n @pytest.mark.parametrize(\"path_type\", [str, CustomFSPath, Path])\n def test_get_handle_with_path(self, path_type):\n # ignore LocalPath: it creates strange paths: /absolute/~/sometest\n with tempfile.TemporaryDirectory(dir=Path.home()) as tmp:\n filename = path_type(\"~/\" + Path(tmp).name + \"/sometest\")\n with icom.get_handle(filename, \"w\") as handles:\n assert Path(handles.handle.name).is_absolute()\n assert os.path.expanduser(filename) == handles.handle.name\n\n def test_get_handle_with_buffer(self):\n input_buffer = StringIO()\n with icom.get_handle(input_buffer, \"r\") as handles:\n assert handles.handle == input_buffer\n assert not input_buffer.closed\n input_buffer.close()\n\n # Test that BytesIOWrapper(get_handle) returns correct amount of bytes every time\n def test_bytesiowrapper_returns_correct_bytes(self):\n # Test latin1, ucs-2, and ucs-4 chars\n data = \"\"\"a,b,c\n1,2,3\n©,®,®\nLook,a snake,🐍\"\"\"\n with icom.get_handle(StringIO(data), \"rb\", is_text=False) as handles:\n result = b\"\"\n chunksize = 5\n while True:\n chunk = handles.handle.read(chunksize)\n # Make sure each chunk is correct amount of bytes\n assert len(chunk) <= chunksize\n if len(chunk) < chunksize:\n # Can be less amount of bytes, but only at EOF\n # which happens when read returns empty\n assert len(handles.handle.read()) == 0\n result += chunk\n break\n result += chunk\n assert result == data.encode(\"utf-8\")\n\n # Test that pyarrow can handle a file opened with get_handle\n @td.skip_if_no(\"pyarrow\", min_version=\"0.15.0\")\n def test_get_handle_pyarrow_compat(self):\n from pyarrow import csv\n\n # Test latin1, ucs-2, and ucs-4 chars\n data = \"\"\"a,b,c\n1,2,3\n©,®,®\nLook,a snake,🐍\"\"\"\n expected = pd.DataFrame(\n {\"a\": [\"1\", \"©\", \"Look\"], \"b\": [\"2\", \"®\", \"a snake\"], \"c\": [\"3\", \"®\", \"🐍\"]}\n )\n s = StringIO(data)\n with icom.get_handle(s, \"rb\", is_text=False) as handles:\n df = csv.read_csv(handles.handle).to_pandas()\n tm.assert_frame_equal(df, expected)\n assert not s.closed\n\n def test_iterator(self):\n with pd.read_csv(StringIO(self.data1), chunksize=1) as reader:\n result = pd.concat(reader, ignore_index=True)\n expected = pd.read_csv(StringIO(self.data1))\n tm.assert_frame_equal(result, expected)\n\n # GH12153\n with pd.read_csv(StringIO(self.data1), chunksize=1) as it:\n first = next(it)\n tm.assert_frame_equal(first, expected.iloc[[0]])\n tm.assert_frame_equal(pd.concat(it), expected.iloc[1:])\n\n @pytest.mark.parametrize(\n \"reader, module, error_class, fn_ext\",\n [\n (pd.read_csv, \"os\", FileNotFoundError, \"csv\"),\n (pd.read_fwf, \"os\", FileNotFoundError, \"txt\"),\n (pd.read_excel, \"xlrd\", FileNotFoundError, \"xlsx\"),\n (pd.read_feather, \"pyarrow\", OSError, \"feather\"),\n (pd.read_hdf, \"tables\", FileNotFoundError, \"h5\"),\n (pd.read_stata, \"os\", FileNotFoundError, \"dta\"),\n (pd.read_sas, \"os\", FileNotFoundError, \"sas7bdat\"),\n (pd.read_json, \"os\", ValueError, \"json\"),\n (pd.read_pickle, \"os\", FileNotFoundError, \"pickle\"),\n ],\n )\n def test_read_non_existent(self, reader, module, error_class, fn_ext):\n pytest.importorskip(module)\n\n path = os.path.join(HERE, \"data\", \"does_not_exist.\" + fn_ext)\n msg1 = fr\"File (b')?.+does_not_exist\\.{fn_ext}'? does not exist\"\n msg2 = fr\"\\[Errno 2\\] No such file or directory: '.+does_not_exist\\.{fn_ext}'\"\n msg3 = \"Expected object or value\"\n msg4 = \"path_or_buf needs to be a string file path or file-like\"\n msg5 = (\n fr\"\\[Errno 2\\] File .+does_not_exist\\.{fn_ext} does not exist: \"\n fr\"'.+does_not_exist\\.{fn_ext}'\"\n )\n msg6 = fr\"\\[Errno 2\\] 没有那个文件或目录: '.+does_not_exist\\.{fn_ext}'\"\n msg7 = (\n fr\"\\[Errno 2\\] File o directory non esistente: '.+does_not_exist\\.{fn_ext}'\"\n )\n msg8 = fr\"Failed to open local file.+does_not_exist\\.{fn_ext}\"\n\n with pytest.raises(\n error_class,\n match=fr\"({msg1}|{msg2}|{msg3}|{msg4}|{msg5}|{msg6}|{msg7}|{msg8})\",\n ):\n reader(path)\n\n @pytest.mark.parametrize(\n \"method, module, error_class, fn_ext\",\n [\n (pd.DataFrame.to_csv, \"os\", OSError, \"csv\"),\n (pd.DataFrame.to_html, \"os\", OSError, \"html\"),\n (pd.DataFrame.to_excel, \"xlrd\", OSError, \"xlsx\"),\n (pd.DataFrame.to_feather, \"pyarrow\", OSError, \"feather\"),\n (pd.DataFrame.to_parquet, \"pyarrow\", OSError, \"parquet\"),\n (pd.DataFrame.to_stata, \"os\", OSError, \"dta\"),\n (pd.DataFrame.to_json, \"os\", OSError, \"json\"),\n (pd.DataFrame.to_pickle, \"os\", OSError, \"pickle\"),\n ],\n )\n # NOTE: Missing parent directory for pd.DataFrame.to_hdf is handled by PyTables\n def test_write_missing_parent_directory(self, method, module, error_class, fn_ext):\n pytest.importorskip(module)\n\n dummy_frame = pd.DataFrame({\"a\": [1, 2, 3], \"b\": [2, 3, 4], \"c\": [3, 4, 5]})\n\n path = os.path.join(HERE, \"data\", \"missing_folder\", \"does_not_exist.\" + fn_ext)\n\n with pytest.raises(\n error_class,\n match=r\"Cannot save file into a non-existent directory: .*missing_folder\",\n ):\n method(dummy_frame, path)\n\n @pytest.mark.parametrize(\n \"reader, module, error_class, fn_ext\",\n [\n (pd.read_csv, \"os\", FileNotFoundError, \"csv\"),\n (pd.read_table, \"os\", FileNotFoundError, \"csv\"),\n (pd.read_fwf, \"os\", FileNotFoundError, \"txt\"),\n (pd.read_excel, \"xlrd\", FileNotFoundError, \"xlsx\"),\n (pd.read_feather, \"pyarrow\", OSError, \"feather\"),\n (pd.read_hdf, \"tables\", FileNotFoundError, \"h5\"),\n (pd.read_stata, \"os\", FileNotFoundError, \"dta\"),\n (pd.read_sas, \"os\", FileNotFoundError, \"sas7bdat\"),\n (pd.read_json, \"os\", ValueError, \"json\"),\n (pd.read_pickle, \"os\", FileNotFoundError, \"pickle\"),\n ],\n )\n def test_read_expands_user_home_dir(\n self, reader, module, error_class, fn_ext, monkeypatch\n ):\n pytest.importorskip(module)\n\n path = os.path.join(\"~\", \"does_not_exist.\" + fn_ext)\n monkeypatch.setattr(icom, \"_expand_user\", lambda x: os.path.join(\"foo\", x))\n\n msg1 = fr\"File (b')?.+does_not_exist\\.{fn_ext}'? does not exist\"\n msg2 = fr\"\\[Errno 2\\] No such file or directory: '.+does_not_exist\\.{fn_ext}'\"\n msg3 = \"Unexpected character found when decoding 'false'\"\n msg4 = \"path_or_buf needs to be a string file path or file-like\"\n msg5 = (\n fr\"\\[Errno 2\\] File .+does_not_exist\\.{fn_ext} does not exist: \"\n fr\"'.+does_not_exist\\.{fn_ext}'\"\n )\n msg6 = fr\"\\[Errno 2\\] 没有那个文件或目录: '.+does_not_exist\\.{fn_ext}'\"\n msg7 = (\n fr\"\\[Errno 2\\] File o directory non esistente: '.+does_not_exist\\.{fn_ext}'\"\n )\n msg8 = fr\"Failed to open local file.+does_not_exist\\.{fn_ext}\"\n\n with pytest.raises(\n error_class,\n match=fr\"({msg1}|{msg2}|{msg3}|{msg4}|{msg5}|{msg6}|{msg7}|{msg8})\",\n ):\n reader(path)\n\n @pytest.mark.parametrize(\n \"reader, module, path\",\n [\n (pd.read_csv, \"os\", (\"io\", \"data\", \"csv\", \"iris.csv\")),\n (pd.read_table, \"os\", (\"io\", \"data\", \"csv\", \"iris.csv\")),\n (\n pd.read_fwf,\n \"os\",\n (\"io\", \"data\", \"fixed_width\", \"fixed_width_format.txt\"),\n ),\n (pd.read_excel, \"xlrd\", (\"io\", \"data\", \"excel\", \"test1.xlsx\")),\n (\n pd.read_feather,\n \"pyarrow\",\n (\"io\", \"data\", \"feather\", \"feather-0_3_1.feather\"),\n ),\n (\n pd.read_hdf,\n \"tables\",\n (\"io\", \"data\", \"legacy_hdf\", \"datetimetz_object.h5\"),\n ),\n (pd.read_stata, \"os\", (\"io\", \"data\", \"stata\", \"stata10_115.dta\")),\n (pd.read_sas, \"os\", (\"io\", \"sas\", \"data\", \"test1.sas7bdat\")),\n (pd.read_json, \"os\", (\"io\", \"json\", \"data\", \"tsframe_v012.json\")),\n (\n pd.read_pickle,\n \"os\",\n (\"io\", \"data\", \"pickle\", \"categorical.0.25.0.pickle\"),\n ),\n ],\n )\n @pytest.mark.filterwarnings(\n \"ignore:CategoricalBlock is deprecated:DeprecationWarning\"\n )\n @pytest.mark.filterwarnings( # pytables np.object usage\n \"ignore:`np.object` is a deprecated alias:DeprecationWarning\"\n )\n def test_read_fspath_all(self, reader, module, path, datapath):\n pytest.importorskip(module)\n path = datapath(*path)\n\n mypath = CustomFSPath(path)\n result = reader(mypath)\n expected = reader(path)\n\n if path.endswith(\".pickle\"):\n # categorical\n tm.assert_categorical_equal(result, expected)\n else:\n tm.assert_frame_equal(result, expected)\n\n @pytest.mark.filterwarnings(\"ignore:In future versions `DataFrame.to_latex`\")\n @pytest.mark.parametrize(\n \"writer_name, writer_kwargs, module\",\n [\n (\"to_csv\", {}, \"os\"),\n (\"to_excel\", {\"engine\": \"xlwt\"}, \"xlwt\"),\n (\"to_feather\", {}, \"pyarrow\"),\n (\"to_html\", {}, \"os\"),\n (\"to_json\", {}, \"os\"),\n (\"to_latex\", {}, \"os\"),\n (\"to_pickle\", {}, \"os\"),\n (\"to_stata\", {\"time_stamp\": pd.to_datetime(\"2019-01-01 00:00\")}, \"os\"),\n ],\n )\n def test_write_fspath_all(self, writer_name, writer_kwargs, module):\n p1 = tm.ensure_clean(\"string\")\n p2 = tm.ensure_clean(\"fspath\")\n df = pd.DataFrame({\"A\": [1, 2]})\n\n with p1 as string, p2 as fspath:\n pytest.importorskip(module)\n mypath = CustomFSPath(fspath)\n writer = getattr(df, writer_name)\n\n writer(string, **writer_kwargs)\n with open(string, \"rb\") as f:\n expected = f.read()\n\n writer(mypath, **writer_kwargs)\n with open(fspath, \"rb\") as f:\n result = f.read()\n\n assert result == expected\n\n @pytest.mark.filterwarnings( # pytables np.object usage\n \"ignore:`np.object` is a deprecated alias:DeprecationWarning\"\n )\n def test_write_fspath_hdf5(self):\n # Same test as write_fspath_all, except HDF5 files aren't\n # necessarily byte-for-byte identical for a given dataframe, so we'll\n # have to read and compare equality\n pytest.importorskip(\"tables\")\n\n df = pd.DataFrame({\"A\": [1, 2]})\n p1 = tm.ensure_clean(\"string\")\n p2 = tm.ensure_clean(\"fspath\")\n\n with p1 as string, p2 as fspath:\n mypath = CustomFSPath(fspath)\n df.to_hdf(mypath, key=\"bar\")\n df.to_hdf(string, key=\"bar\")\n\n result = pd.read_hdf(fspath, key=\"bar\")\n expected = pd.read_hdf(string, key=\"bar\")\n\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.fixture\ndef mmap_file(datapath):\n return datapath(\"io\", \"data\", \"csv\", \"test_mmap.csv\")\n\n\nclass TestMMapWrapper:\n def test_constructor_bad_file(self, mmap_file):\n non_file = StringIO(\"I am not a file\")\n non_file.fileno = lambda: -1\n\n # the error raised is different on Windows\n if is_platform_windows():\n msg = \"The parameter is incorrect\"\n err = OSError\n else:\n msg = \"[Errno 22]\"\n err = mmap.error\n\n with pytest.raises(err, match=msg):\n icom._MMapWrapper(non_file)\n\n target = open(mmap_file)\n target.close()\n\n msg = \"I/O operation on closed file\"\n with pytest.raises(ValueError, match=msg):\n icom._MMapWrapper(target)\n\n def test_get_attr(self, mmap_file):\n with open(mmap_file) as target:\n wrapper = icom._MMapWrapper(target)\n\n attrs = dir(wrapper.mmap)\n attrs = [attr for attr in attrs if not attr.startswith(\"__\")]\n attrs.append(\"__next__\")\n\n for attr in attrs:\n assert hasattr(wrapper, attr)\n\n assert not hasattr(wrapper, \"foo\")\n\n def test_next(self, mmap_file):\n with open(mmap_file) as target:\n wrapper = icom._MMapWrapper(target)\n lines = target.readlines()\n\n for line in lines:\n next_line = next(wrapper)\n assert next_line.strip() == line.strip()\n\n with pytest.raises(StopIteration, match=r\"^$\"):\n next(wrapper)\n\n def test_unknown_engine(self):\n with tm.ensure_clean() as path:\n df = tm.makeDataFrame()\n df.to_csv(path)\n with pytest.raises(ValueError, match=\"Unknown engine\"):\n pd.read_csv(path, engine=\"pyt\")\n\n def test_binary_mode(self):\n \"\"\"\n 'encoding' shouldn't be passed to 'open' in binary mode.\n\n GH 35058\n \"\"\"\n with tm.ensure_clean() as path:\n df = tm.makeDataFrame()\n df.to_csv(path, mode=\"w+b\")\n tm.assert_frame_equal(df, pd.read_csv(path, index_col=0))\n\n @pytest.mark.parametrize(\"encoding\", [\"utf-16\", \"utf-32\"])\n @pytest.mark.parametrize(\"compression_\", [\"bz2\", \"xz\"])\n def test_warning_missing_utf_bom(self, encoding, compression_):\n \"\"\"\n bz2 and xz do not write the byte order mark (BOM) for utf-16/32.\n\n https://stackoverflow.com/questions/55171439\n\n GH 35681\n \"\"\"\n df = tm.makeDataFrame()\n with tm.ensure_clean() as path:\n with tm.assert_produces_warning(UnicodeWarning):\n df.to_csv(path, compression=compression_, encoding=encoding)\n\n # reading should fail (otherwise we wouldn't need the warning)\n msg = r\"UTF-\\d+ stream does not start with BOM\"\n with pytest.raises(UnicodeError, match=msg):\n pd.read_csv(path, compression=compression_, encoding=encoding)\n\n\ndef test_is_fsspec_url():\n assert icom.is_fsspec_url(\"gcs://pandas/somethingelse.com\")\n assert icom.is_fsspec_url(\"gs://pandas/somethingelse.com\")\n # the following is the only remote URL that is handled without fsspec\n assert not icom.is_fsspec_url(\"http://pandas/somethingelse.com\")\n assert not icom.is_fsspec_url(\"random:pandas/somethingelse.com\")\n assert not icom.is_fsspec_url(\"/local/path\")\n assert not icom.is_fsspec_url(\"relative/local/path\")\n\n\n@pytest.mark.parametrize(\"encoding\", [None, \"utf-8\"])\n@pytest.mark.parametrize(\"format\", [\"csv\", \"json\"])\ndef test_codecs_encoding(encoding, format):\n # GH39247\n expected = tm.makeDataFrame()\n with tm.ensure_clean() as path:\n with codecs.open(path, mode=\"w\", encoding=encoding) as handle:\n getattr(expected, f\"to_{format}\")(handle)\n with codecs.open(path, mode=\"r\", encoding=encoding) as handle:\n if format == \"csv\":\n df = pd.read_csv(handle, index_col=0)\n else:\n df = pd.read_json(handle)\n tm.assert_frame_equal(expected, df)\n\n\ndef test_codecs_get_writer_reader():\n # GH39247\n expected = tm.makeDataFrame()\n with tm.ensure_clean() as path:\n with open(path, \"wb\") as handle:\n with codecs.getwriter(\"utf-8\")(handle) as encoded:\n expected.to_csv(encoded)\n with open(path, \"rb\") as handle:\n with codecs.getreader(\"utf-8\")(handle) as encoded:\n df = pd.read_csv(encoded, index_col=0)\n tm.assert_frame_equal(expected, df)\n\n\n@pytest.mark.parametrize(\n \"io_class,mode,msg\",\n [\n (BytesIO, \"t\", \"a bytes-like object is required, not 'str'\"),\n (StringIO, \"b\", \"string argument expected, got 'bytes'\"),\n ],\n)\ndef test_explicit_encoding(io_class, mode, msg):\n # GH39247; this test makes sure that if a user provides mode=\"*t\" or \"*b\",\n # it is used. In the case of this test it leads to an error as intentionally the\n # wrong mode is requested\n expected = tm.makeDataFrame()\n with io_class() as buffer:\n with pytest.raises(TypeError, match=msg):\n expected.to_csv(buffer, mode=f\"w{mode}\")\n\n\n@pytest.mark.parametrize(\"encoding_errors\", [None, \"strict\", \"replace\"])\n@pytest.mark.parametrize(\"format\", [\"csv\", \"json\"])\ndef test_encoding_errors(encoding_errors, format):\n # GH39450\n msg = \"'utf-8' codec can't decode byte\"\n bad_encoding = b\"\\xe4\"\n\n if format == \"csv\":\n return\n content = bad_encoding + b\"\\n\" + bad_encoding\n reader = pd.read_csv\n else:\n content = (\n b'{\"'\n + bad_encoding * 2\n + b'\": {\"'\n + bad_encoding\n + b'\":\"'\n + bad_encoding\n + b'\"}}'\n )\n reader = partial(pd.read_json, orient=\"index\")\n with tm.ensure_clean() as path:\n file = Path(path)\n file.write_bytes(content)\n\n if encoding_errors != \"replace\":\n with pytest.raises(UnicodeDecodeError, match=msg):\n reader(path, encoding_errors=encoding_errors)\n else:\n df = reader(path, encoding_errors=encoding_errors)\n decoded = bad_encoding.decode(errors=encoding_errors)\n expected = pd.DataFrame({decoded: [decoded]}, index=[decoded * 2])\n tm.assert_frame_equal(df, expected)\n\n\ndef test_bad_encdoing_errors():\n # GH 39777\n with tm.ensure_clean() as path:\n with pytest.raises(LookupError, match=\"unknown error handler name\"):\n icom.get_handle(path, \"w\", errors=\"bad\")\n\n\ndef test_errno_attribute():\n # GH 13872\n with pytest.raises(FileNotFoundError, match=\"\\\\[Errno 2\\\\]\") as err:\n pd.read_csv(\"doesnt_exist\")\n assert err.errno == errno.ENOENT\n\n\ndef test_fail_mmap():\n with pytest.raises(UnsupportedOperation, match=\"fileno\"):\n with BytesIO() as buffer:\n icom.get_handle(buffer, \"rb\", memory_map=True)\n"
] | [
[
"pandas.UInt64Dtype",
"pandas.io.common.is_fsspec_url",
"pandas.util._decorators.doc",
"pandas.UInt16Dtype",
"pandas.UInt32Dtype",
"pandas.Int64Dtype",
"pandas.get_option",
"pandas.io.common.get_handle",
"pandas.StringDtype",
"pandas.UInt8Dtype",
"pandas.io.common.is_url",
"pandas.Int32Dtype",
"pandas.util.version.Version",
"pandas.io.common.stringify_path",
"pandas.Int8Dtype",
"pandas.compat._optional.import_optional_dependency",
"pandas.errors.AbstractMethodError",
"pandas.BooleanDtype",
"pandas.Int16Dtype"
],
[
"pandas._testing.ensure_clean",
"pandas.compat.is_platform_windows",
"pandas._testing.assert_frame_equal",
"pandas._testing.makeDataFrame",
"pandas.io.common.is_fsspec_url",
"pandas.io.common._expand_user",
"pandas.util._test_decorators.skip_if_no",
"pandas.io.common.get_handle",
"pandas._testing.assert_produces_warning",
"pandas.read_json",
"pandas.to_datetime",
"pandas.read_hdf",
"pandas.read_csv",
"pandas._testing.assert_categorical_equal",
"pandas.concat",
"pandas.io.common.stringify_path",
"pandas.DataFrame",
"pandas.io.common._MMapWrapper",
"pandas.io.common.infer_compression"
]
] |
AlumiK/bagel-tensorflow | [
"791a89a54f15aeed0c4e1ea43afb9300f18b60cd"
] | [
"bagel/testing.py"
] | [
"import bagel\nimport numpy as np\n\nfrom sklearn.metrics import precision_recall_curve\nfrom typing import Sequence, Tuple, Dict, Optional\n\n\ndef _adjust_scores(labels: np.ndarray,\n scores: np.ndarray,\n delay: Optional[int] = None,\n inplace: bool = False) -> np.ndarray:\n if np.shape(scores) != np.shape(labels):\n raise ValueError('`labels` and `scores` must have same shape')\n if delay is None:\n delay = len(scores)\n splits = np.where(labels[1:] != labels[:-1])[0] + 1\n is_anomaly = labels[0] == 1\n adjusted_scores = np.copy(scores) if not inplace else scores\n pos = 0\n for part in splits:\n if is_anomaly:\n ptr = min(pos + delay + 1, part)\n adjusted_scores[pos: ptr] = np.max(adjusted_scores[pos: ptr])\n adjusted_scores[ptr: part] = np.maximum(adjusted_scores[ptr: part], adjusted_scores[pos])\n is_anomaly = not is_anomaly\n pos = part\n part = len(labels)\n if is_anomaly:\n ptr = min(pos + delay + 1, part)\n adjusted_scores[pos: part] = np.max(adjusted_scores[pos: ptr])\n return adjusted_scores\n\n\ndef _ignore_missing(series_list: Sequence, missing: np.ndarray) -> Tuple[np.ndarray, ...]:\n ret = []\n for series in series_list:\n series = np.copy(series)\n ret.append(series[missing != 1])\n return tuple(ret)\n\n\ndef _best_f1score(labels: np.ndarray, scores: np.ndarray) -> Tuple[float, float, float, float]:\n precision, recall, thresholds = precision_recall_curve(y_true=labels, probas_pred=scores)\n f1score = 2 * precision * recall / np.clip(precision + recall, a_min=1e-8, a_max=None)\n\n best_threshold = thresholds[np.argmax(f1score)]\n best_precision = precision[np.argmax(f1score)]\n best_recall = recall[np.argmax(f1score)]\n\n return best_threshold, best_precision, best_recall, np.max(f1score)\n\n\ndef get_test_results(labels: np.ndarray,\n scores: np.ndarray,\n missing: np.ndarray,\n window_size: int,\n delay: Optional[int] = None) -> Dict:\n labels = labels[window_size - 1:]\n scores = scores[window_size - 1:]\n missing = missing[window_size - 1:]\n adjusted_scores = _adjust_scores(labels=labels, scores=scores, delay=delay)\n adjusted_labels, adjusted_scores = _ignore_missing([labels, adjusted_scores], missing=missing)\n threshold, precision, recall, f1score = _best_f1score(labels=adjusted_labels, scores=adjusted_scores)\n return {'threshold': threshold,\n 'precision': precision,\n 'recall': recall,\n 'f1score': f1score}\n\n\nclass KPIStats:\n\n def __init__(self, kpi: bagel.data.KPI):\n self.num_points = len(kpi.values)\n self.num_missing = len(kpi.missing[kpi.missing == 1])\n self.num_anomaly = len(kpi.labels[kpi.labels == 1])\n self.missing_rate = self.num_missing / self.num_points\n self.anomaly_rate = self.num_anomaly / self.num_points\n\n\ndef get_kpi_stats(*kpis: bagel.data.KPI) -> Tuple[KPIStats, ...]:\n ret = []\n for kpi in kpis:\n ret.append(KPIStats(kpi))\n return tuple(ret)\n"
] | [
[
"numpy.copy",
"numpy.argmax",
"numpy.max",
"numpy.shape",
"numpy.clip",
"numpy.maximum",
"sklearn.metrics.precision_recall_curve",
"numpy.where"
]
] |
dreamflake/GADA | [
"9891ce06e15e53abc72ce57b144e288799967d8c"
] | [
"_3DDFA_V2/TDDFA.py"
] | [
"# coding: utf-8\n\n__author__ = 'cleardusk'\n\nimport os.path as osp\nimport time\nimport numpy as np\nimport cv2\nimport torch\nfrom torchvision.transforms import Compose\nimport torch.backends.cudnn as cudnn\n\nimport _3DDFA_V2.models as models\nfrom _3DDFA_V2.bfm import BFMModel\nfrom _3DDFA_V2.utils.io import _load\nfrom _3DDFA_V2.utils.functions import (\n crop_img, parse_roi_box_from_bbox, parse_roi_box_from_landmark,\n)\nfrom _3DDFA_V2.utils.tddfa_util import (\n load_model, _parse_param, similar_transform,\n ToTensorGjz, NormalizeGjz\n)\n\nmake_abs_path = lambda fn: osp.join(osp.dirname(osp.realpath(__file__)), fn)\n\n\nclass TDDFA(object):\n \"\"\"TDDFA: named Three-D Dense Face Alignment (TDDFA)\"\"\"\n\n def __init__(self, **kvs):\n torch.set_grad_enabled(False)\n print(make_abs_path('configs/bfm_noneck_v3.pkl'))\n # load BFM\n self.bfm = BFMModel(\n bfm_fp=kvs.get('bfm_fp', make_abs_path('configs/bfm_noneck_v3.pkl')),\n shape_dim=kvs.get('shape_dim', 40),\n exp_dim=kvs.get('exp_dim', 10)\n )\n self.tri = self.bfm.tri\n\n # config\n self.gpu_mode = kvs.get('gpu_mode', False)\n self.gpu_id = kvs.get('gpu_id', 0)\n self.size = kvs.get('size', 120)\n\n param_mean_std_fp = kvs.get(\n 'param_mean_std_fp', make_abs_path(f'configs/param_mean_std_62d_{self.size}x{self.size}.pkl')\n )\n\n # load model, default output is dimension with length 62 = 12(pose) + 40(shape) +10(expression)\n model = getattr(models, kvs.get('arch'))(\n num_classes=kvs.get('num_params', 62),\n widen_factor=kvs.get('widen_factor', 1),\n size=self.size,\n mode=kvs.get('mode', 'small')\n )\n model = load_model(model, kvs.get('checkpoint_fp'))\n\n if self.gpu_mode:\n cudnn.benchmark = True\n model = model.cuda(device=self.gpu_id)\n\n self.model = model\n self.model.eval() # eval mode, fix BN\n\n # data normalization\n transform_normalize = NormalizeGjz(mean=127.5, std=128)\n transform_to_tensor = ToTensorGjz()\n transform = Compose([transform_to_tensor, transform_normalize])\n self.transform = transform\n\n # params normalization config\n r = _load(param_mean_std_fp)\n self.param_mean = r.get('mean')\n self.param_std = r.get('std')\n\n # print('param_mean and param_srd', self.param_mean, self.param_std)\n\n def __call__(self, img_ori, objs, **kvs):\n \"\"\"The main call of TDDFA, given image and box / landmark, return 3DMM params and roi_box\n :param img_ori: the input image\n :param objs: the list of box or landmarks\n :param kvs: options\n :return: param list and roi_box list\n \"\"\"\n # Crop image, forward to get the param\n param_lst = []\n roi_box_lst = []\n\n crop_policy = kvs.get('crop_policy', 'box')\n for obj in objs:\n if crop_policy == 'box':\n # by face box\n roi_box = parse_roi_box_from_bbox(obj)\n elif crop_policy == 'landmark':\n # by landmarks\n roi_box = parse_roi_box_from_landmark(obj)\n else:\n raise ValueError(f'Unknown crop policy {crop_policy}')\n\n roi_box_lst.append(roi_box)\n img = crop_img(img_ori, roi_box)\n img = cv2.resize(img, dsize=(self.size, self.size), interpolation=cv2.INTER_LINEAR)\n inp = self.transform(img).unsqueeze(0)\n\n if self.gpu_mode:\n inp = inp.cuda(device=self.gpu_id)\n\n if kvs.get('timer_flag', False):\n end = time.time()\n param = self.model(inp)\n elapse = f'Inference: {(time.time() - end) * 1000:.1f}ms'\n print(elapse)\n else:\n param = self.model(inp)\n\n param = param.squeeze().cpu().numpy().flatten().astype(np.float32)\n param = param * self.param_std + self.param_mean # re-scale\n # print('output', param)\n param_lst.append(param)\n\n return param_lst, roi_box_lst\n\n def recon_vers(self, param_lst, roi_box_lst, **kvs):\n dense_flag = kvs.get('dense_flag', False)\n size = self.size\n\n ver_lst = []\n for param, roi_box in zip(param_lst, roi_box_lst):\n if dense_flag:\n R, offset, alpha_shp, alpha_exp = _parse_param(param)\n pts3d = R @ (self.bfm.u + self.bfm.w_shp @ alpha_shp + self.bfm.w_exp @ alpha_exp). \\\n reshape(3, -1, order='F') + offset\n pts3d = similar_transform(pts3d, roi_box, size)\n else:\n R, offset, alpha_shp, alpha_exp = _parse_param(param)\n pts3d = R @ (self.bfm.u_base + self.bfm.w_shp_base @ alpha_shp + self.bfm.w_exp_base @ alpha_exp). \\\n reshape(3, -1, order='F') + offset\n pts3d = similar_transform(pts3d, roi_box, size)\n\n ver_lst.append(pts3d)\n\n return ver_lst\n"
] | [
[
"torch.set_grad_enabled"
]
] |
enikon/MACP | [
"2de004d4eaf09f3b02dde3b7041ce6d693d0c25c",
"2de004d4eaf09f3b02dde3b7041ce6d693d0c25c"
] | [
"experiments/experiments/Test6.py",
"multiagent/scenarios/simple_push.py"
] | [
"from experiments.experiments.PubIntegBackground import PubIntegBackground\nimport numpy as np\n\nif __name__ == \"__main__\":\n for i in np.arange(0.0, 10.0, 0.1):\n PubIntegBackground(correlation=False, listing=True, pub='None', intensity=i)\n",
"import numpy as np\nfrom multiagent.core import World, Agent, Landmark\nfrom multiagent import BaseScenario\n\nclass Scenario(BaseScenario):\n def make_world(self):\n world = World()\n # set any world properties first\n world.dim_c = 2\n num_agents = 2\n num_adversaries = 1\n num_landmarks = 2\n # add agents\n world.agents = [Agent() for i in range(num_agents)]\n for i, agent in enumerate(world.agents):\n agent.name = 'agent %d' % i\n agent.collide = True\n agent.silent = True\n if i < num_adversaries:\n agent.adversary = True\n else:\n agent.adversary = False\n # add landmarks\n world.landmarks = [Landmark() for i in range(num_landmarks)]\n for i, landmark in enumerate(world.landmarks):\n landmark.name = 'landmark %d' % i\n landmark.collide = False\n landmark.movable = False\n # make initial conditions\n self.reset_world(world)\n return world\n\n def reset_world(self, world):\n # random properties for landmarks\n for i, landmark in enumerate(world.landmarks):\n landmark.color = np.array([0.1, 0.1, 0.1])\n landmark.color[i + 1] += 0.8\n landmark.index = i\n # set goal landmark\n goal = np.random.choice(world.landmarks)\n for i, agent in enumerate(world.agents):\n agent.goal_a = goal\n agent.color = np.array([0.25, 0.25, 0.25])\n if agent.adversary:\n agent.color = np.array([0.75, 0.25, 0.25])\n else:\n j = goal.index\n agent.color[j + 1] += 0.5\n # set random initial states\n for agent in world.agents:\n agent.state.p_pos = np.random.uniform(-1, +1, world.dim_p)\n agent.state.p_vel = np.zeros(world.dim_p)\n agent.state.c = np.zeros(world.dim_c)\n for i, landmark in enumerate(world.landmarks):\n landmark.state.p_pos = np.random.uniform(-1, +1, world.dim_p)\n landmark.state.p_vel = np.zeros(world.dim_p)\n\n def reward(self, agent, world):\n # Agents are rewarded based on minimum agent distance to each landmark\n return self.adversary_reward(agent, world) if agent.adversary else self.agent_reward(agent, world)\n\n def agent_reward(self, agent, world):\n # the distance to the goal\n return -np.sqrt(np.sum(np.square(agent.state.p_pos - agent.goal_a.state.p_pos)))\n\n def adversary_reward(self, agent, world):\n # keep the nearest good agents away from the goal\n agent_dist = [np.sqrt(np.sum(np.square(a.state.p_pos - a.goal_a.state.p_pos))) for a in world.agents if not a.adversary]\n pos_rew = min(agent_dist)\n #nearest_agent = world.good_agents[np.argmin(agent_dist)]\n #neg_rew = np.sqrt(np.sum(np.square(nearest_agent.state.p_pos - agent.state.p_pos)))\n neg_rew = np.sqrt(np.sum(np.square(agent.goal_a.state.p_pos - agent.state.p_pos)))\n #neg_rew = sum([np.sqrt(np.sum(np.square(a.state.p_pos - agent.state.p_pos))) for a in world.good_agents])\n return pos_rew - neg_rew\n \n def observation(self, agent, world):\n # get positions of all entities in this agent's reference frame\n entity_pos = []\n for entity in world.landmarks: # world.entities:\n entity_pos.append(entity.state.p_pos - agent.state.p_pos)\n # entity colors\n entity_color = []\n for entity in world.landmarks: # world.entities:\n entity_color.append(entity.color)\n # communication of all other agents\n comm = []\n other_pos = []\n for other in world.agents:\n if other is agent: continue\n comm.append(other.state.c)\n other_pos.append(other.state.p_pos - agent.state.p_pos)\n if not agent.adversary:\n return np.concatenate([agent.state.p_vel] + [agent.goal_a.state.p_pos - agent.state.p_pos] + [agent.color] + entity_pos + entity_color + other_pos)\n else:\n #other_pos = list(reversed(other_pos)) if random.uniform(0,1) > 0.5 else other_pos # randomize position of other agents in adversary network\n return np.concatenate([agent.state.p_vel] + entity_pos + other_pos)\n"
] | [
[
"numpy.arange"
],
[
"numpy.random.uniform",
"numpy.zeros",
"numpy.random.choice",
"numpy.array",
"numpy.concatenate",
"numpy.square"
]
] |
JoshuaAnickat/mlflow | [
"6dee5cb250460e8dc7accb487e54df8c95921e0e"
] | [
"mlflow/pytorch/__init__.py"
] | [
"\"\"\"\nThe ``mlflow.pytorch`` module provides an API for logging and loading PyTorch models. This module\nexports PyTorch models with the following flavors:\n\nPyTorch (native) format\n This is the main flavor that can be loaded back into PyTorch.\n:py:mod:`mlflow.pyfunc`\n Produced for use by generic pyfunc-based deployment tools and batch inference.\n\"\"\"\nimport importlib\nimport logging\nimport os\nimport yaml\n\nimport cloudpickle\nimport numpy as np\nimport pandas as pd\nfrom distutils.version import LooseVersion\nimport posixpath\n\nimport mlflow\nimport shutil\nimport mlflow.pyfunc.utils as pyfunc_utils\nfrom mlflow import pyfunc\nfrom mlflow.exceptions import MlflowException\nfrom mlflow.models import Model, ModelSignature\nfrom mlflow.models.model import MLMODEL_FILE_NAME\nfrom mlflow.models.utils import ModelInputExample, _save_example\nfrom mlflow.protos.databricks_pb2 import RESOURCE_DOES_NOT_EXIST\nfrom mlflow.pytorch import pickle_module as mlflow_pytorch_pickle_module\nfrom mlflow.tracking.artifact_utils import _download_artifact_from_uri\nfrom mlflow.utils.annotations import experimental\nfrom mlflow.utils.environment import _mlflow_conda_env\nfrom mlflow.utils.file_utils import _copy_file_or_tree, TempDir\nfrom mlflow.utils.model_utils import _get_flavor_configuration\nfrom mlflow.tracking._model_registry import DEFAULT_AWAIT_MAX_SLEEP_SECONDS\nfrom mlflow.utils.autologging_utils import autologging_integration, safe_patch\n\nFLAVOR_NAME = \"pytorch\"\n\n_SERIALIZED_TORCH_MODEL_FILE_NAME = \"model.pth\"\n_PICKLE_MODULE_INFO_FILE_NAME = \"pickle_module_info.txt\"\n_EXTRA_FILES_KEY = \"extra_files\"\n_REQUIREMENTS_FILE_KEY = \"requirements_file\"\n\n_logger = logging.getLogger(__name__)\n\n\ndef get_default_conda_env():\n \"\"\"\n :return: The default Conda environment as a dictionary for MLflow Models produced by calls to\n :func:`save_model()` and :func:`log_model()`.\n\n .. code-block:: python\n :caption: Example\n\n import mlflow.pytorch\n\n # Log PyTorch model\n with mlflow.start_run() as run:\n mlflow.pytorch.log_model(model, \"model\")\n\n # Fetch the associated conda environment\n env = mlflow.pytorch.get_default_conda_env()\n print(\"conda env: {}\".format(env))\n\n .. code-block:: text\n :caption: Output\n\n conda env {'name': 'mlflow-env',\n 'channels': ['defaults', 'conda-forge', 'pytorch'],\n 'dependencies': ['python=3.7.5', 'pytorch=1.5.1',\n 'torchvision=0.6.1',\n 'pip', {'pip': ['mlflow', 'cloudpickle==1.6.0']}]}\n \"\"\"\n import torch\n import torchvision\n\n return _mlflow_conda_env(\n additional_conda_deps=[\n \"pytorch={}\".format(torch.__version__),\n \"torchvision={}\".format(torchvision.__version__),\n ],\n additional_pip_deps=[\n # We include CloudPickle in the default environment because\n # it's required by the default pickle module used by `save_model()`\n # and `log_model()`: `mlflow.pytorch.pickle_module`.\n \"cloudpickle=={}\".format(cloudpickle.__version__)\n ],\n additional_conda_channels=[\"pytorch\"],\n )\n\n\ndef log_model(\n pytorch_model,\n artifact_path,\n conda_env=None,\n code_paths=None,\n pickle_module=None,\n registered_model_name=None,\n signature: ModelSignature = None,\n input_example: ModelInputExample = None,\n await_registration_for=DEFAULT_AWAIT_MAX_SLEEP_SECONDS,\n requirements_file=None,\n extra_files=None,\n **kwargs\n):\n \"\"\"\n Log a PyTorch model as an MLflow artifact for the current run.\n\n :param pytorch_model: PyTorch model to be saved. Can be either an eager model (subclass of\n ``torch.nn.Module``) or scripted model prepared via ``torch.jit.script``\n or ``torch.jit.trace``.\n\n The model accept a single ``torch.FloatTensor`` as\n input and produce a single output tensor.\n\n If saving an eager model, any code dependencies of the\n model's class, including the class definition itself, should be\n included in one of the following locations:\n\n - The package(s) listed in the model's Conda environment, specified\n by the ``conda_env`` parameter.\n - One or more of the files specified by the ``code_paths`` parameter.\n\n :param artifact_path: Run-relative artifact path.\n :param conda_env: Path to a Conda environment file. If provided, this decsribes the environment\n this model should be run in. At minimum, it should specify the dependencies\n contained in :func:`get_default_conda_env()`. If ``None``, the default\n :func:`get_default_conda_env()` environment is added to the model. The\n following is an *example* dictionary representation of a Conda environment::\n\n {\n 'name': 'mlflow-env',\n 'channels': ['defaults'],\n 'dependencies': [\n 'python=3.7.0',\n 'pytorch=0.4.1',\n 'torchvision=0.2.1'\n ]\n }\n\n :param code_paths: A list of local filesystem paths to Python file dependencies (or directories\n containing file dependencies). These files are *prepended* to the system\n path when the model is loaded.\n :param pickle_module: The module that PyTorch should use to serialize (\"pickle\") the specified\n ``pytorch_model``. This is passed as the ``pickle_module`` parameter\n to ``torch.save()``. By default, this module is also used to\n deserialize (\"unpickle\") the PyTorch model at load time.\n :param registered_model_name: (Experimental) If given, create a model version under\n ``registered_model_name``, also creating a registered model if one\n with the given name does not exist.\n\n :param signature: (Experimental) :py:class:`ModelSignature <mlflow.models.ModelSignature>`\n describes model input and output :py:class:`Schema <mlflow.types.Schema>`.\n The model signature can be :py:func:`inferred <mlflow.models.infer_signature>`\n from datasets with valid model input (e.g. the training dataset with target\n column omitted) and valid model output (e.g. model predictions generated on\n the training dataset), for example:\n\n .. code-block:: python\n\n from mlflow.models.signature import infer_signature\n train = df.drop_column(\"target_label\")\n predictions = ... # compute model predictions\n signature = infer_signature(train, predictions)\n :param input_example: (Experimental) Input example provides one or several instances of valid\n model input. The example can be used as a hint of what data to feed the\n model. The given example will be converted to a Pandas DataFrame and then\n serialized to json using the Pandas split-oriented format. Bytes are\n base64-encoded.\n\n :param await_registration_for: Number of seconds to wait for the model version to finish\n being created and is in ``READY`` status. By default, the function\n waits for five minutes. Specify 0 or None to skip waiting.\n\n :param requirements_file: A string containing the path to requirements file. Remote URIs\n are resolved to absolute filesystem paths.\n For example, consider the following ``requirements_file`` string -\n\n requirements_file = \"s3://my-bucket/path/to/my_file\"\n\n In this case, the ``\"my_file\"`` requirements file is downloaded from S3.\n\n If ``None``, no requirements file is added to the model.\n\n :param extra_files: A list containing the paths to corresponding extra files. Remote URIs\n are resolved to absolute filesystem paths.\n For example, consider the following ``extra_files`` list -\n\n extra_files = [\"s3://my-bucket/path/to/my_file1\",\n \"s3://my-bucket/path/to/my_file2\"]\n\n In this case, the ``\"my_file1 & my_file2\"`` extra file is downloaded from S3.\n\n If ``None``, no extra files are added to the model.\n\n :param kwargs: kwargs to pass to ``torch.save`` method.\n\n .. code-block:: python\n :caption: Example\n\n import numpy as np\n import torch\n import mlflow.pytorch\n\n class LinearNNModel(torch.nn.Module):\n def __init__(self):\n super(LinearNNModel, self).__init__()\n self.linear = torch.nn.Linear(1, 1) # One in and one out\n\n def forward(self, x):\n y_pred = self.linear(x)\n return y_pred\n\n def gen_data():\n # Example linear model modified to use y = 2x\n # from https://github.com/hunkim/PyTorchZeroToAll\n # X training data, y labels\n X = torch.arange(1.0, 25.0).view(-1, 1)\n y = torch.from_numpy(np.array([x * 2 for x in X])).view(-1, 1)\n return X, y\n\n # Define model, loss, and optimizer\n model = LinearNNModel()\n criterion = torch.nn.MSELoss()\n optimizer = torch.optim.SGD(model.parameters(), lr=0.001)\n\n # Training loop\n epochs = 250\n X, y = gen_data()\n for epoch in range(epochs):\n # Forward pass: Compute predicted y by passing X to the model\n y_pred = model(X)\n\n # Compute the loss\n loss = criterion(y_pred, y)\n\n # Zero gradients, perform a backward pass, and update the weights.\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n # Log the model\n with mlflow.start_run() as run:\n mlflow.pytorch.log_model(model, \"model\")\n\n # convert to scripted model and log the model\n scripted_pytorch_model = torch.jit.script(model)\n mlflow.pytorch.log_model(scripted_pytorch_model, \"scripted_model\")\n\n # Fetch the logged model artifacts\n print(\"run_id: {}\".format(run.info.run_id))\n for artifact_path in [\"model/data\", \"scripted_model/data\"]:\n artifacts = [f.path for f in MlflowClient().list_artifacts(run.info.run_id,\n artifact_path)]\n print(\"artifacts: {}\".format(artifacts))\n\n .. code-block:: text\n :caption: Output\n\n run_id: 1a1ec9e413ce48e9abf9aec20efd6f71\n artifacts: ['model/data/model.pth',\n 'model/data/pickle_module_info.txt']\n artifacts: ['scripted_model/data/model.pth',\n 'scripted_model/data/pickle_module_info.txt']\n\n .. figure:: ../_static/images/pytorch_logged_models.png\n\n PyTorch logged models\n \"\"\"\n pickle_module = pickle_module or mlflow_pytorch_pickle_module\n Model.log(\n artifact_path=artifact_path,\n flavor=mlflow.pytorch,\n pytorch_model=pytorch_model,\n conda_env=conda_env,\n code_paths=code_paths,\n pickle_module=pickle_module,\n registered_model_name=registered_model_name,\n signature=signature,\n input_example=input_example,\n await_registration_for=await_registration_for,\n requirements_file=requirements_file,\n extra_files=extra_files,\n **kwargs,\n )\n\n\ndef save_model(\n pytorch_model,\n path,\n conda_env=None,\n mlflow_model=None,\n code_paths=None,\n pickle_module=None,\n signature: ModelSignature = None,\n input_example: ModelInputExample = None,\n requirements_file=None,\n extra_files=None,\n **kwargs\n):\n \"\"\"\n Save a PyTorch model to a path on the local file system.\n\n :param pytorch_model: PyTorch model to be saved. Can be either an eager model (subclass of\n ``torch.nn.Module``) or scripted model prepared via ``torch.jit.script``\n or ``torch.jit.trace``.\n\n The model accept a single ``torch.FloatTensor`` as\n input and produce a single output tensor.\n\n If saving an eager model, any code dependencies of the\n model's class, including the class definition itself, should be\n included in one of the following locations:\n\n - The package(s) listed in the model's Conda environment, specified\n by the ``conda_env`` parameter.\n - One or more of the files specified by the ``code_paths`` parameter.\n\n :param path: Local path where the model is to be saved.\n :param conda_env: Either a dictionary representation of a Conda environment or the path to a\n Conda environment yaml file. If provided, this decsribes the environment\n this model should be run in. At minimum, it should specify the dependencies\n contained in :func:`get_default_conda_env()`. If ``None``, the default\n :func:`get_default_conda_env()` environment is added to the model. The\n following is an *example* dictionary representation of a Conda environment::\n\n {\n 'name': 'mlflow-env',\n 'channels': ['defaults'],\n 'dependencies': [\n 'python=3.7.0',\n 'pytorch=0.4.1',\n 'torchvision=0.2.1'\n ]\n }\n\n :param mlflow_model: :py:mod:`mlflow.models.Model` this flavor is being added to.\n :param code_paths: A list of local filesystem paths to Python file dependencies (or directories\n containing file dependencies). These files are *prepended* to the system\n path when the model is loaded.\n :param pickle_module: The module that PyTorch should use to serialize (\"pickle\") the specified\n ``pytorch_model``. This is passed as the ``pickle_module`` parameter\n to ``torch.save()``. By default, this module is also used to\n deserialize (\"unpickle\") the PyTorch model at load time.\n\n :param signature: (Experimental) :py:class:`ModelSignature <mlflow.models.ModelSignature>`\n describes model input and output :py:class:`Schema <mlflow.types.Schema>`.\n The model signature can be :py:func:`inferred <mlflow.models.infer_signature>`\n from datasets with valid model input (e.g. the training dataset with target\n column omitted) and valid model output (e.g. model predictions generated on\n the training dataset), for example:\n\n .. code-block:: python\n\n from mlflow.models.signature import infer_signature\n train = df.drop_column(\"target_label\")\n predictions = ... # compute model predictions\n signature = infer_signature(train, predictions)\n :param input_example: (Experimental) Input example provides one or several instances of valid\n model input. The example can be used as a hint of what data to feed the\n model. The given example will be converted to a Pandas DataFrame and then\n serialized to json using the Pandas split-oriented format. Bytes are\n base64-encoded.\n\n :param requirements_file: A string containing the path to requirements file. Remote URIs\n are resolved to absolute filesystem paths.\n For example, consider the following ``requirements_file`` string -\n\n requirements_file = \"s3://my-bucket/path/to/my_file\"\n\n In this case, the ``\"my_file\"`` requirements file is downloaded from S3.\n\n If ``None``, no requirements file is added to the model.\n\n :param extra_files: A list containing the paths to corresponding extra files. Remote URIs\n are resolved to absolute filesystem paths.\n For example, consider the following ``extra_files`` list -\n\n extra_files = [\"s3://my-bucket/path/to/my_file1\",\n \"s3://my-bucket/path/to/my_file2\"]\n\n In this case, the ``\"my_file1 & my_file2\"`` extra file is downloaded from S3.\n\n If ``None``, no extra files are added to the model.\n\n :param kwargs: kwargs to pass to ``torch.save`` method.\n\n .. code-block:: python\n :caption: Example\n\n import os\n\n import torch\n import mlflow.pytorch\n\n # Class defined here\n class LinearNNModel(torch.nn.Module):\n ...\n\n # Initialize our model, criterion and optimizer\n ...\n\n # Training loop\n ...\n\n # Save PyTorch models to current working directory\n with mlflow.start_run() as run:\n mlflow.pytorch.save_model(model, \"model\")\n\n # Convert to a scripted model and save it\n scripted_pytorch_model = torch.jit.script(model)\n mlflow.pytorch.save_model(scripted_pytorch_model, \"scripted_model\")\n\n # Load each saved model for inference\n for model_path in [\"model\", \"scripted_model\"]:\n model_uri = \"{}/{}\".format(os.getcwd(), model_path)\n loaded_model = mlflow.pytorch.load_model(model_uri)\n print(\"Loaded {}:\".format(model_path))\n for x in [6.0, 8.0, 12.0, 30.0]:\n X = torch.Tensor([[x]])\n y_pred = loaded_model(X)\n print(\"predict X: {}, y_pred: {:.2f}\".format(x, y_pred.data.item()))\n print(\"--\")\n\n .. code-block:: text\n :caption: Output\n\n Loaded model:\n predict X: 6.0, y_pred: 11.90\n predict X: 8.0, y_pred: 15.92\n predict X: 12.0, y_pred: 23.96\n predict X: 30.0, y_pred: 60.13\n --\n Loaded scripted_model:\n predict X: 6.0, y_pred: 11.90\n predict X: 8.0, y_pred: 15.92\n predict X: 12.0, y_pred: 23.96\n predict X: 30.0, y_pred: 60.13\n \"\"\"\n import torch\n\n pickle_module = pickle_module or mlflow_pytorch_pickle_module\n\n if not isinstance(pytorch_model, torch.nn.Module):\n raise TypeError(\"Argument 'pytorch_model' should be a torch.nn.Module\")\n if code_paths is not None:\n if not isinstance(code_paths, list):\n raise TypeError(\"Argument code_paths should be a list, not {}\".format(type(code_paths)))\n path = os.path.abspath(path)\n if os.path.exists(path):\n raise RuntimeError(\"Path '{}' already exists\".format(path))\n\n if mlflow_model is None:\n mlflow_model = Model()\n\n os.makedirs(path)\n if signature is not None:\n mlflow_model.signature = signature\n if input_example is not None:\n _save_example(mlflow_model, input_example, path)\n\n model_data_subpath = \"data\"\n model_data_path = os.path.join(path, model_data_subpath)\n os.makedirs(model_data_path)\n # Persist the pickle module name as a file in the model's `data` directory. This is necessary\n # because the `data` directory is the only available parameter to `_load_pyfunc`, and it\n # does not contain the MLmodel configuration; therefore, it is not sufficient to place\n # the module name in the MLmodel\n #\n # TODO: Stop persisting this information to the filesystem once we have a mechanism for\n # supplying the MLmodel configuration to `mlflow.pytorch._load_pyfunc`\n pickle_module_path = os.path.join(model_data_path, _PICKLE_MODULE_INFO_FILE_NAME)\n with open(pickle_module_path, \"w\") as f:\n f.write(pickle_module.__name__)\n # Save pytorch model\n model_path = os.path.join(model_data_path, _SERIALIZED_TORCH_MODEL_FILE_NAME)\n if isinstance(pytorch_model, torch.jit.ScriptModule):\n torch.jit.ScriptModule.save(pytorch_model, model_path)\n else:\n torch.save(pytorch_model, model_path, pickle_module=pickle_module, **kwargs)\n\n torchserve_artifacts_config = {}\n\n if requirements_file:\n if not isinstance(requirements_file, str):\n raise TypeError(\"Path to requirements file should be a string\")\n\n with TempDir() as tmp_requirements_dir:\n _download_artifact_from_uri(\n artifact_uri=requirements_file, output_path=tmp_requirements_dir.path()\n )\n rel_path = os.path.basename(requirements_file)\n torchserve_artifacts_config[_REQUIREMENTS_FILE_KEY] = {\"path\": rel_path}\n shutil.move(tmp_requirements_dir.path(rel_path), path)\n\n if extra_files:\n torchserve_artifacts_config[_EXTRA_FILES_KEY] = []\n if not isinstance(extra_files, list):\n raise TypeError(\"Extra files argument should be a list\")\n\n with TempDir() as tmp_extra_files_dir:\n for extra_file in extra_files:\n _download_artifact_from_uri(\n artifact_uri=extra_file, output_path=tmp_extra_files_dir.path()\n )\n rel_path = posixpath.join(_EXTRA_FILES_KEY, os.path.basename(extra_file),)\n torchserve_artifacts_config[_EXTRA_FILES_KEY].append({\"path\": rel_path})\n shutil.move(\n tmp_extra_files_dir.path(), posixpath.join(path, _EXTRA_FILES_KEY),\n )\n\n conda_env_subpath = \"conda.yaml\"\n if conda_env is None:\n conda_env = get_default_conda_env()\n elif not isinstance(conda_env, dict):\n with open(conda_env, \"r\") as f:\n conda_env = yaml.safe_load(f)\n with open(os.path.join(path, conda_env_subpath), \"w\") as f:\n yaml.safe_dump(conda_env, stream=f, default_flow_style=False)\n\n if code_paths is not None:\n code_dir_subpath = \"code\"\n for code_path in code_paths:\n _copy_file_or_tree(src=code_path, dst=path, dst_dir=code_dir_subpath)\n else:\n code_dir_subpath = None\n\n mlflow_model.add_flavor(\n FLAVOR_NAME,\n model_data=model_data_subpath,\n pytorch_version=torch.__version__,\n **torchserve_artifacts_config,\n )\n pyfunc.add_to_model(\n mlflow_model,\n loader_module=\"mlflow.pytorch\",\n data=model_data_subpath,\n pickle_module_name=pickle_module.__name__,\n code=code_dir_subpath,\n env=conda_env_subpath,\n )\n mlflow_model.save(os.path.join(path, MLMODEL_FILE_NAME))\n\n\ndef _load_model(path, **kwargs):\n \"\"\"\n :param path: The path to a serialized PyTorch model.\n :param kwargs: Additional kwargs to pass to the PyTorch ``torch.load`` function.\n \"\"\"\n import torch\n\n if os.path.isdir(path):\n # `path` is a directory containing a serialized PyTorch model and a text file containing\n # information about the pickle module that should be used by PyTorch to load it\n model_path = os.path.join(path, \"model.pth\")\n pickle_module_path = os.path.join(path, _PICKLE_MODULE_INFO_FILE_NAME)\n with open(pickle_module_path, \"r\") as f:\n pickle_module_name = f.read()\n if \"pickle_module\" in kwargs and kwargs[\"pickle_module\"].__name__ != pickle_module_name:\n _logger.warning(\n \"Attempting to load the PyTorch model with a pickle module, '%s', that does not\"\n \" match the pickle module that was used to save the model: '%s'.\",\n kwargs[\"pickle_module\"].__name__,\n pickle_module_name,\n )\n else:\n try:\n kwargs[\"pickle_module\"] = importlib.import_module(pickle_module_name)\n except ImportError as exc:\n raise MlflowException(\n message=(\n \"Failed to import the pickle module that was used to save the PyTorch\"\n \" model. Pickle module name: `{pickle_module_name}`\".format(\n pickle_module_name=pickle_module_name\n )\n ),\n error_code=RESOURCE_DOES_NOT_EXIST,\n ) from exc\n\n else:\n model_path = path\n\n if LooseVersion(torch.__version__) >= LooseVersion(\"1.5.0\"):\n return torch.load(model_path, **kwargs)\n else:\n try:\n # load the model as an eager model.\n return torch.load(model_path, **kwargs)\n except Exception: # pylint: disable=broad-except\n # If fails, assume the model as a scripted model\n return torch.jit.load(model_path)\n\n\ndef load_model(model_uri, **kwargs):\n \"\"\"\n Load a PyTorch model from a local file or a run.\n\n :param model_uri: The location, in URI format, of the MLflow model, for example:\n\n - ``/Users/me/path/to/local/model``\n - ``relative/path/to/local/model``\n - ``s3://my_bucket/path/to/model``\n - ``runs:/<mlflow_run_id>/run-relative/path/to/model``\n - ``models:/<model_name>/<model_version>``\n - ``models:/<model_name>/<stage>``\n\n For more information about supported URI schemes, see\n `Referencing Artifacts <https://www.mlflow.org/docs/latest/concepts.html#\n artifact-locations>`_.\n\n :param kwargs: kwargs to pass to ``torch.load`` method.\n :return: A PyTorch model.\n\n .. code-block:: python\n :caption: Example\n\n import torch\n import mlflow.pytorch\n\n # Class defined here\n class LinearNNModel(torch.nn.Module):\n ...\n\n # Initialize our model, criterion and optimizer\n ...\n\n # Training loop\n ...\n\n # Log the model\n with mlflow.start_run() as run:\n mlflow.pytorch.log_model(model, \"model\")\n\n # Inference after loading the logged model\n model_uri = \"runs:/{}/model\".format(run.info.run_id)\n loaded_model = mlflow.pytorch.load_model(model_uri)\n for x in [4.0, 6.0, 30.0]:\n X = torch.Tensor([[x]])\n y_pred = loaded_model(X)\n print(\"predict X: {}, y_pred: {:.2f}\".format(x, y_pred.data.item()))\n\n .. code-block:: text\n :caption: Output\n\n predict X: 4.0, y_pred: 7.57\n predict X: 6.0, y_pred: 11.64\n predict X: 30.0, y_pred: 60.48\n \"\"\"\n import torch\n\n local_model_path = _download_artifact_from_uri(artifact_uri=model_uri)\n try:\n pyfunc_conf = _get_flavor_configuration(\n model_path=local_model_path, flavor_name=pyfunc.FLAVOR_NAME\n )\n except MlflowException:\n pyfunc_conf = {}\n code_subpath = pyfunc_conf.get(pyfunc.CODE)\n if code_subpath is not None:\n pyfunc_utils._add_code_to_system_path(\n code_path=os.path.join(local_model_path, code_subpath)\n )\n\n pytorch_conf = _get_flavor_configuration(model_path=local_model_path, flavor_name=FLAVOR_NAME)\n if torch.__version__ != pytorch_conf[\"pytorch_version\"]:\n _logger.warning(\n \"Stored model version '%s' does not match installed PyTorch version '%s'\",\n pytorch_conf[\"pytorch_version\"],\n torch.__version__,\n )\n torch_model_artifacts_path = os.path.join(local_model_path, pytorch_conf[\"model_data\"])\n return _load_model(path=torch_model_artifacts_path, **kwargs)\n\n\ndef _load_pyfunc(path, **kwargs):\n \"\"\"\n Load PyFunc implementation. Called by ``pyfunc.load_pyfunc``.\n\n :param path: Local filesystem path to the MLflow Model with the ``pytorch`` flavor.\n \"\"\"\n return _PyTorchWrapper(_load_model(path, **kwargs))\n\n\nclass _PyTorchWrapper(object):\n \"\"\"\n Wrapper class that creates a predict function such that\n predict(data: pd.DataFrame) -> model's output as pd.DataFrame (pandas DataFrame)\n \"\"\"\n\n def __init__(self, pytorch_model):\n self.pytorch_model = pytorch_model\n\n def predict(self, data, device=\"cpu\"):\n import torch\n\n if not isinstance(data, pd.DataFrame):\n raise TypeError(\"Input data should be pandas.DataFrame\")\n self.pytorch_model.to(device)\n self.pytorch_model.eval()\n with torch.no_grad():\n input_tensor = torch.from_numpy(data.values.astype(np.float32)).to(device)\n preds = self.pytorch_model(input_tensor)\n if not isinstance(preds, torch.Tensor):\n raise TypeError(\n \"Expected PyTorch model to output a single output tensor, \"\n \"but got output of type '{}'\".format(type(preds))\n )\n predicted = pd.DataFrame(preds.numpy())\n predicted.index = data.index\n return predicted\n\n\n@experimental\n@autologging_integration(FLAVOR_NAME)\ndef autolog(log_every_n_epoch=1, log_models=True, disable=False): # pylint: disable=unused-argument\n \"\"\"\n Enables (or disables) and configures autologging from `PyTorch Lightning\n <https://pytorch-lightning.readthedocs.io/en/latest>`_ to MLflow.\n\n Autologging is performed when you call the `fit` method of\n `pytorch_lightning.Trainer() \\\n <https://pytorch-lightning.readthedocs.io/en/latest/trainer.html#>`_.\n\n Explore the complete `PyTorch MNIST \\\n <https://github.com/mlflow/mlflow/tree/master/examples/pytorch/MNIST/example1>`_ for\n an expansive example with implementation of additional lightening steps.\n\n **Note**: Autologging is only supported for PyTorch Lightning models,\n i.e., models that subclass\n `pytorch_lightning.LightningModule \\\n <https://pytorch-lightning.readthedocs.io/en/latest/lightning_module.html>`_.\n In particular, autologging support for vanilla PyTorch models that only subclass\n `torch.nn.Module <https://pytorch.org/docs/stable/generated/torch.nn.Module.html>`_\n is not yet available.\n\n :param log_every_n_epoch: If specified, logs metrics once every `n` epochs. By default, metrics\n are logged after every epoch.\n :param log_models: If ``True``, trained models are logged as MLflow model artifacts.\n If ``False``, trained models are not logged.\n :param disable: If ``True``, disables all supported autologging integrations. If ``False``,\n enables all supported autologging integrations.\n\n .. code-block:: python\n :caption: Example\n\n import os\n\n import pytorch_lightning as pl\n import torch\n from torch.nn import functional as F\n from torch.utils.data import DataLoader\n from torchvision import transforms\n from torchvision.datasets import MNIST\n from pytorch_lightning.metrics.functional import accuracy\n\n import mlflow.pytorch\n from mlflow.tracking import MlflowClient\n\n # For brevity, here is the simplest most minimal example with just a training\n # loop step, (no validation, no testing). It illustrates how you can use MLflow\n # to auto log parameters, metrics, and models.\n\n class MNISTModel(pl.LightningModule):\n def __init__(self):\n super(MNISTModel, self).__init__()\n self.l1 = torch.nn.Linear(28 * 28, 10)\n\n def forward(self, x):\n return torch.relu(self.l1(x.view(x.size(0), -1)))\n\n def training_step(self, batch, batch_nb):\n x, y = batch\n loss = F.cross_entropy(self(x), y)\n acc = accuracy(loss, y)\n\n # Use the current of PyTorch logger\n self.log(\"train_loss\", loss, on_epoch=True)\n self.log(\"acc\", acc, on_epoch=True)\n return loss\n\n def configure_optimizers(self):\n return torch.optim.Adam(self.parameters(), lr=0.02)\n\n def print_auto_logged_info(r):\n\n tags = {k: v for k, v in r.data.tags.items() if not k.startswith(\"mlflow.\")}\n artifacts = [f.path for f in MlflowClient().list_artifacts(r.info.run_id, \"model\")]\n print(\"run_id: {}\".format(r.info.run_id))\n print(\"artifacts: {}\".format(artifacts))\n print(\"params: {}\".format(r.data.params))\n print(\"metrics: {}\".format(r.data.metrics))\n print(\"tags: {}\".format(tags))\n\n # Initialize our model\n mnist_model = MNISTModel()\n\n # Initialize DataLoader from MNIST Dataset\n train_ds = MNIST(os.getcwd(), train=True,\n download=True, transform=transforms.ToTensor())\n train_loader = DataLoader(train_ds, batch_size=32)\n\n # Initialize a trainer\n trainer = pl.Trainer(max_epochs=20, progress_bar_refresh_rate=20)\n\n # Auto log all MLflow entities\n mlflow.pytorch.autolog()\n\n # Train the model\n with mlflow.start_run() as run:\n trainer.fit(mnist_model, train_loader)\n\n # fetch the auto logged parameters and metrics\n print_auto_logged_info(mlflow.get_run(run_id=run.info.run_id))\n\n .. code-block:: text\n :caption: Output\n\n run_id: 42caa17b60cb489c8083900fb52506a7\n artifacts: ['model/MLmodel', 'model/conda.yaml', 'model/data']\n params: {'betas': '(0.9, 0.999)',\n 'weight_decay': '0',\n 'epochs': '20',\n 'eps': '1e-08',\n 'lr': '0.02',\n 'optimizer_name': 'Adam', '\n amsgrad': 'False'}\n metrics: {'acc_step': 0.0,\n 'train_loss_epoch': 1.0917967557907104,\n 'train_loss_step': 1.0794280767440796,\n 'train_loss': 1.0794280767440796,\n 'acc_epoch': 0.0033333334140479565,\n 'acc': 0.0}\n tags: {'Mode': 'training'}\n\n .. figure:: ../_static/images/pytorch_lightening_autolog.png\n\n PyTorch autologged MLflow entities\n \"\"\"\n import pytorch_lightning as pl\n from mlflow.pytorch._pytorch_autolog import _create_patch_fit\n\n fit = _create_patch_fit(log_every_n_epoch=log_every_n_epoch, log_models=log_models)\n safe_patch(FLAVOR_NAME, pl.Trainer, \"fit\", fit, manage_run=True)\n"
] | [
[
"torch.jit.ScriptModule.save",
"torch.load",
"torch.no_grad",
"torch.save",
"torch.jit.load"
]
] |
dhruvramani/CodeFunDo-2017 | [
"e102202ef0219c249a1666daa3dd6426ab899800"
] | [
"src/random/weights.py"
] | [
"import os\nimport cv2\nimport imutils\nimport numpy as np\nfrom imutils import contours\nfrom imutils import perspective\nfrom scipy.spatial import distance as dist\n\n\ndef detect_shape(filepath, min_width=15, debug=False):\n image = cv2.imread(filepath, 0)\n\n resized = imutils.resize(image, width=300)\n ratio = image.shape[0] / float(resized.shape[0])\n '''\n blurred = cv2.GaussianBlur(resized, (5, 5), 0)\n thresh = cv2.threshold(blurred, 60, 255, cv2.THRESH_BINARY)[1]\n '''\n gray = cv2.bilateralFilter(resized, 1, 10, 120 )\n edges = cv2.Canny( gray, 10, 250 )\n kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (7, 7))\n closed = cv2.morphologyEx( edges, cv2.MORPH_CLOSE, kernel )\n '''\n cnts = cv2.findContours( closed, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE )\n gray = cv2.GaussianBlur(resized, (7, 7), 0)\n edged = cv2.Canny(gray, 10, 250)\n edged = cv2.dilate(edged, None, iterations=1)\n edged = cv2.erode(edged, None, iterations=1)\n '''\n cnts = cv2.findContours(closed.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n cnts = cnts[0] if imutils.is_cv2() else cnts[1]\n \n shapes = dict()\n print(len(cnts))\n for idx, c in enumerate(cnts):\n try :\n perimeter = cv2.arcLength(c, True)\n approx = cv2.approxPolyDP(c, 0.1 * perimeter, True)\n if len(approx) == 4:\n (x, y, w, h) = cv2.boundingRect(approx)\n shapes[\"rect_{}\".format(idx)] = (x, y, w, h)\n if(debug == True):\n M = cv2.moments(c)\n cX = int((M[\"m10\"] / M[\"m00\"]) * ratio)\n cY = int((M[\"m01\"] / M[\"m00\"]) * ratio)\n c = c.astype(\"float\")\n c *= ratio\n c = c.astype(\"int\")\n cv2.drawContours(image, [c], -1, (0, 255, 0), 2)\n cv2.putText(image, \"square\", (cX, cY), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 2)\n cv2.namedWindow('image', cv2.WINDOW_NORMAL)\n cv2.resizeWindow('image', 300,300)\n cv2.imshow(\"image\", image)\n cv2.waitKey(0)\n except :\n pass\n\n return shapes\n\ndef midpoint(ptA, ptB):\n return ((ptA[0] + ptB[0]) * 0.5, (ptA[1] + ptB[1]) * 0.5)\n\ndef min_dif(list1, list2): \n min_d, ind = 1000000, -1\n for i in range(0, len(list1)):\n for j in range(0, len(list2)):\n if(list1[i]-list2[j] < min_d):\n ind = j\n min_d = list1[i]-list2[j]\n return ind\n\ndef object_size(filepath, left_width=15):\n image = cv2.imread(filepath, 0)\n #gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n gray = cv2.GaussianBlur(image, (7, 7), 0)\n \n edged = cv2.Canny(gray, 50, 100)\n edged = cv2.dilate(edged, None, iterations=1)\n edged = cv2.erode(edged, None, iterations=1)\n\n # NOTE : Contour - Outlines\n cnts = cv2.findContours(edged.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n cnts = cnts[0] if imutils.is_cv2() else cnts[1]\n (cnts, _) = contours.sort_contours(cnts)\n pixelsPerMetric = None\n\n dimensions = list()\n for c in cnts:\n if cv2.contourArea(c) < 100:\n continue\n orig = image.copy()\n box = cv2.minAreaRect(c)\n box = cv2.cv.BoxPoints(box) if imutils.is_cv2() else cv2.boxPoints(box)\n box = np.array(box, dtype=\"int\")\n box = perspective.order_points(box)\n\n (tl, tr, br, bl) = box\n (tltrX, tltrY) = midpoint(tl, tr)\n (blbrX, blbrY) = midpoint(bl, br)\n (tlblX, tlblY) = midpoint(tl, bl)\n (trbrX, trbrY) = midpoint(tr, br)\n\n cv2.circle(orig, (int(tltrX), int(tltrY)), 5, (255, 0, 0), -1)\n cv2.circle(orig, (int(blbrX), int(blbrY)), 5, (255, 0, 0), -1)\n cv2.circle(orig, (int(tlblX), int(tlblY)), 5, (255, 0, 0), -1)\n cv2.circle(orig, (int(trbrX), int(trbrY)), 5, (255, 0, 0), -1)\n \n # draw lines between the midpoints\n cv2.line(orig, (int(tltrX), int(tltrY)), (int(blbrX), int(blbrY)), (255, 0, 255), 2)\n cv2.line(orig, (int(tlblX), int(tlblY)), (int(trbrX), int(trbrY)), (255, 0, 255), 2)\n\n dA = dist.euclidean((tltrX, tltrY), (blbrX, blbrY))\n dB = dist.euclidean((tlblX, tlblY), (trbrX, trbrY))\n\n if pixelsPerMetric is None:\n pixelsPerMetric = dB / left_width\n\n dimA = dA / pixelsPerMetric\n dimB = dB / pixelsPerMetric\n\n cv2.putText(orig, \"{:.1f}in\".format(dimA), (int(tltrX - 15), int(tltrY - 10)), cv2.FONT_HERSHEY_SIMPLEX, 0.65, (255, 255, 255), 2)\n cv2.putText(orig, \"{:.1f}in\".format(dimB), (int(trbrX + 10), int(trbrY)), cv2.FONT_HERSHEY_SIMPLEX, 0.65, (255, 255, 255), 2)\n cv2.namedWindow('image', cv2.WINDOW_NORMAL)\n cv2.resizeWindow('image', 300,300)\n cv2.imshow(\"image\", orig)\n cv2.waitKey(0)\n\n dimensions.append((dimA, dimB))\n\n max_dim = [-1, -1]\n for dims in dimensions:\n if(dims[0] * dims[1] > max_dim[0] * max_dim[1] and left_width not in dims):\n max_dim[0] = dims[0]\n max_dim[1] = dims[1]\n return max_dim\n\ndef weight(file1, file2, left_width=21, const_div=6000.0): # left_width = A4 Size\n size1 = object_size(file1, left_width)\n size2 = object_size(file2, left_width)\n rem_ind = min_dif(size1, size2)\n weight = (size1[0] * size1[1] * size2[1-rem_ind]) / const_div\n return weight\n\nif __name__ == '__main__':\n print(detect_shape(\"img.jpg\", debug=True))\n"
] | [
[
"numpy.array",
"scipy.spatial.distance.euclidean"
]
] |
spitzc32/CropMe | [
"6f3c0c9512cbf56d64b40c5c05a33627d6eaf51d"
] | [
"utils/data_operations.py"
] | [
"import numpy as np\n\n\ndef euclidean_distance(p1,p2):\n\t\"\"\"\n\treturns euclidean distance between matrices\t\n\t@params:\n\t\tp1, p2: np.ndarray\n\t\t\tmatrices to perform operation to.\n\t\"\"\"\n\treturn np.sqrt(np.sum((p1-p2)**2, axis=1))\n\n\ndef entropy(p):\n\t\t\"\"\"\n\t\tWill be our measurement for uncertainty in our construction \n\t\tof descision tree\n\t\t@params:\n\t\t\tp: float\n\n\t\t\"\"\"\n\t\tif p == 0:\n\t\t\treturn 0\n\t\telif p == 1:\n\t\t\treturn 0\n\t\telse:\n\t\t\treturn -(p * np.log2(p) + (1 - p) * np.log2(1 - p))\n\n\ndef information_gain(left_child, right_child):\n\t\t\"\"\"\n\t\tmeasurement of how much info we gained when splitting a node\n\t\tusing our entropy method.\n\t\t@def:\n\t\t\ttakes in a list of classes from left and right child to return\n\t\t\tthe information gain of our curr split\n\t\t@params:\n\t\t\tleft_child: np.ndarray\n\t\t\t\tcurr left child arr\n\t\t\tright_child: np.ndarray\n\t\t\t\tcurr left child arr\n\t\t\"\"\"\n\t\tparent = left_child + right_child\n\t\tp_par = parent.count(1) / len(parent) if len(parent) > 0 else 0\n\t\tp_left = left_child.count(1) / len(left_child) if len(left_child) \\\n\t\t> 0 else 0\n\t\tp_right = right_child.count(1) / len(right_child) if len(right_child) \\\n\t\t> 0 else 0\n\n\t\tinfogain_p = self.entropy(p_par)\n\t\tinfogain_l = self.entropy(p_left)\n\t\tinfogain_r = self.entropy(p_right)\n\n\t\treturn infogain_p - len(left_child) / len(parent) * infogain_l - \\\n\t\tlen(right_child) / len(parent) * infogain_r\n"
] | [
[
"numpy.sum",
"numpy.log2"
]
] |
linamnt/PySyft | [
"4b60a86c003acbe1967d6c3d611df3d5f2d377ee"
] | [
"test/generic/test_object_storage.py"
] | [
"import torch\n\nfrom syft.generic import object_storage\n\n\ndef test_clear_objects():\n obj_storage = object_storage.ObjectStorage()\n\n x = torch.tensor(1)\n obj_storage.set_obj(x)\n\n objs = obj_storage.current_objects()\n\n assert len(objs) == 1\n assert objs[x.id] == x\n\n ret_val = obj_storage.clear_objects()\n\n objs = obj_storage.current_objects()\n assert len(objs) == 0\n assert ret_val == obj_storage\n\n\ndef test_clear_objects_return_None():\n obj_storage = object_storage.ObjectStorage()\n\n x = torch.tensor(1)\n obj_storage.set_obj(x)\n\n objs = obj_storage.current_objects()\n\n assert len(objs) == 1\n assert objs[x.id] == x\n\n ret_val = obj_storage.clear_objects(return_self=False)\n\n objs = obj_storage.current_objects()\n assert len(objs) == 0\n assert ret_val is None\n"
] | [
[
"torch.tensor"
]
] |
xuyuandong/sequence_behavior_ctr_model | [
"e1bb71b4579456b1c6fbf3b432a84a3cb52611b7"
] | [
"script/utils.py"
] | [
"import tensorflow as tf\n#from tensorflow.python.ops.rnn_cell import *\n#from tensorflow.python.ops.rnn_cell_impl import _Linear\nfrom tensorflow.contrib.rnn.python.ops.core_rnn_cell import *\n#from tensorflow import keras\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import init_ops\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import variable_scope as vs\n#from keras import backend as K\n\ndef din_attention(query, facts, attention_size, mask=None, stag='null', mode='SUM', softmax_stag=1, time_major=False, return_alphas=False):\n if isinstance(facts, tuple):\n # In case of Bi-RNN, concatenate the forward and the backward RNN outputs.\n facts = tf.concat(facts, 2)\n print (\"query_size mismatch\")\n query = tf.concat(values = [\n query,\n query,\n ], axis=1)\n\n if time_major:\n # (T,B,D) => (B,T,D)\n facts = tf.array_ops.transpose(facts, [1, 0, 2])\n facts_size = facts.get_shape().as_list()[-1] # D value - hidden size of the RNN layer\n querry_size = query.get_shape().as_list()[-1]\n queries = tf.tile(query, [1, tf.shape(facts)[1]])\n queries = tf.reshape(queries, tf.shape(facts))\n din_all = tf.concat([queries, facts, queries-facts, queries*facts], axis=-1)\n d_layer_1_all = tf.layers.dense(din_all, 80, activation=tf.nn.sigmoid, name='f1_att' + stag)\n d_layer_2_all = tf.layers.dense(d_layer_1_all, 40, activation=tf.nn.sigmoid, name='f2_att' + stag)\n d_layer_3_all = tf.layers.dense(d_layer_2_all, 1, activation=None, name='f3_att' + stag)\n d_layer_3_all = tf.reshape(d_layer_3_all, [-1, 1, tf.shape(facts)[1]])\n scores = d_layer_3_all\n\n if mask is not None:\n mask = tf.equal(mask, tf.ones_like(mask))\n key_masks = tf.expand_dims(mask, 1) # [B, 1, T]\n paddings = tf.ones_like(scores) * (-2 ** 32 + 1)\n scores = tf.where(key_masks, scores, paddings) # [B, 1, T]\n\n # Activation\n if softmax_stag:\n scores = tf.nn.softmax(scores) # [B, 1, T]\n\n # Weighted sum\n if mode == 'SUM':\n output = tf.matmul(scores, facts) # [B, 1, H]\n # output = tf.reshape(output, [-1, tf.shape(facts)[-1]])\n else:\n scores = tf.reshape(scores, [-1, tf.shape(facts)[1]])\n output = facts * tf.expand_dims(scores, -1)\n output = tf.reshape(output, tf.shape(facts))\n \n if return_alphas:\n return output, scores\n \n return output\n\n\nclass VecAttGRUCell(RNNCell):\n \"\"\"Gated Recurrent Unit cell (cf. http://arxiv.org/abs/1406.1078).\n Args:\n num_units: int, The number of units in the GRU cell.\n activation: Nonlinearity to use. Default: `tanh`.\n reuse: (optional) Python boolean describing whether to reuse variables\n in an existing scope. If not `True`, and the existing scope already has\n the given variables, an error is raised.\n kernel_initializer: (optional) The initializer to use for the weight and\n projection matrices.\n bias_initializer: (optional) The initializer to use for the bias.\n \"\"\"\n\n def __init__(self,\n num_units,\n activation=None,\n reuse=None,\n kernel_initializer=None,\n bias_initializer=None):\n super(VecAttGRUCell, self).__init__(_reuse=reuse)\n self._num_units = num_units\n self._activation = activation or math_ops.tanh\n self._kernel_initializer = kernel_initializer\n self._bias_initializer = bias_initializer\n self._gate_linear = None\n self._candidate_linear = None\n\n @property\n def state_size(self):\n return self._num_units\n\n @property\n def output_size(self):\n return self._num_units\n def __call__(self, inputs, state, att_score):\n return self.call(inputs, state, att_score)\n def call(self, inputs, state, att_score=None):\n \"\"\"Gated recurrent unit (GRU) with nunits cells.\"\"\"\n if self._gate_linear is None:\n bias_ones = self._bias_initializer\n if self._bias_initializer is None:\n bias_ones = init_ops.constant_initializer(1.0, dtype=inputs.dtype)\n with vs.variable_scope(\"gates\"): # Reset gate and update gate.\n self._gate_linear = _Linear(\n [inputs, state],\n 2 * self._num_units,\n True,\n bias_initializer=bias_ones,\n kernel_initializer=self._kernel_initializer)\n\n value = math_ops.sigmoid(self._gate_linear([inputs, state]))\n r, u = array_ops.split(value=value, num_or_size_splits=2, axis=1)\n\n r_state = r * state\n if self._candidate_linear is None:\n with vs.variable_scope(\"candidate\"):\n self._candidate_linear = _Linear(\n [inputs, r_state],\n self._num_units,\n True,\n bias_initializer=self._bias_initializer,\n kernel_initializer=self._kernel_initializer)\n c = self._activation(self._candidate_linear([inputs, r_state]))\n u = (1.0 - att_score) * u\n new_h = u * state + (1 - u) * c\n return new_h, new_h\n\ndef prelu(_x, scope=''):\n \"\"\"parametric ReLU activation\"\"\"\n with tf.variable_scope(name_or_scope=scope, default_name=\"prelu\"):\n _alpha = tf.get_variable(\"prelu_\"+scope, shape=_x.get_shape()[-1],\n dtype=_x.dtype, initializer=tf.constant_initializer(0.1))\n return tf.maximum(0.0, _x) + _alpha * tf.minimum(0.0, _x)\n\ndef calc_auc(raw_arr):\n \"\"\"Summary\n\n Args:\n raw_arr (TYPE): Description\n\n Returns:\n TYPE: Description\n \"\"\"\n\n arr = sorted(raw_arr, key=lambda d:d[0], reverse=True)\n pos, neg = 0., 0.\n for record in arr:\n if record[1] == 1.:\n pos += 1\n else:\n neg += 1\n\n fp, tp = 0., 0.\n xy_arr = []\n for record in arr:\n if record[1] == 1.:\n tp += 1\n else:\n fp += 1\n xy_arr.append([fp/neg, tp/pos])\n\n auc = 0.\n prev_x = 0.\n prev_y = 0.\n for x, y in xy_arr:\n if x != prev_x:\n auc += ((x - prev_x) * (y + prev_y) / 2.)\n prev_x = x\n prev_y = y\n\n return auc\n\ndef calc_gauc(raw_arr, nick_index):\n \"\"\"Summary\n\n Args:\n raw_arr (TYPE): Description\n\n Returns:\n TYPE: Description\n \"\"\"\n last_index = 0\n gauc = 0.\n pv_sum = 0\n for idx in xrange(len(nick_index)):\n if nick_index[idx] != nick_index[last_index]:\n input_arr = raw_arr[last_index:idx]\n auc_val=calc_auc(input_arr)\n if auc_val >= 0.0:\n gauc += auc_val * len(input_arr)\n pv_sum += len(input_arr)\n else:\n pv_sum += len(input_arr) \n last_index = idx\n return gauc / pv_sum\n \n\n\n\ndef attention(query, facts, attention_size, mask, stag='null', mode='LIST', softmax_stag=1, time_major=False, return_alphas=False):\n if isinstance(facts, tuple):\n # In case of Bi-RNN, concatenate the forward and the backward RNN outputs.\n facts = tf.concat(facts, 2)\n\n if time_major:\n # (T,B,D) => (B,T,D)\n facts = tf.array_ops.transpose(facts, [1, 0, 2])\n\n mask = tf.equal(mask, tf.ones_like(mask))\n hidden_size = facts.get_shape().as_list()[-1] # D value - hidden size of the RNN layer\n input_size = query.get_shape().as_list()[-1]\n\n # Trainable parameters\n w1 = tf.Variable(tf.random_normal([hidden_size, attention_size], stddev=0.1))\n w2 = tf.Variable(tf.random_normal([input_size, attention_size], stddev=0.1))\n b = tf.Variable(tf.random_normal([attention_size], stddev=0.1))\n v = tf.Variable(tf.random_normal([attention_size], stddev=0.1))\n\n with tf.name_scope('v'):\n # Applying fully connected layer with non-linear activation to each of the B*T timestamps;\n # the shape of `tmp` is (B,T,D)*(D,A)=(B,T,A), where A=attention_size\n tmp1 = tf.tensordot(facts, w1, axes=1)\n tmp2 = tf.tensordot(query, w2, axes=1)\n tmp2 = tf.reshape(tmp2, [-1, 1, tf.shape(tmp2)[-1]])\n tmp = tf.tanh((tmp1 + tmp2) + b)\n\n # For each of the timestamps its vector of size A from `tmp` is reduced with `v` vector\n v_dot_tmp = tf.tensordot(tmp, v, axes=1, name='v_dot_tmp') # (B,T) shape\n key_masks = mask # [B, 1, T]\n # key_masks = tf.expand_dims(mask, 1) # [B, 1, T]\n paddings = tf.ones_like(v_dot_tmp) * (-2 ** 32 + 1)\n v_dot_tmp = tf.where(key_masks, v_dot_tmp, paddings) # [B, 1, T]\n alphas = tf.nn.softmax(v_dot_tmp, name='alphas') # (B,T) shape\n\n # Output of (Bi-)RNN is reduced with attention vector; the result has (B,D) shape\n #output = tf.reduce_sum(facts * tf.expand_dims(alphas, -1), 1)\n output = facts * tf.expand_dims(alphas, -1)\n output = tf.reshape(output, tf.shape(facts))\n # output = output / (facts.get_shape().as_list()[-1] ** 0.5)\n if not return_alphas:\n return output\n else:\n return output, alphas\n\n\ndef din_fcn_attention(query, facts, attention_size, mask, stag='null', mode='SUM', softmax_stag=1, time_major=False, return_alphas=False, forCnn=False):\n if isinstance(facts, tuple):\n # In case of Bi-RNN, concatenate the forward and the backward RNN outputs.\n facts = tf.concat(facts, 2)\n if len(facts.get_shape().as_list()) == 2:\n facts = tf.expand_dims(facts, 1)\n\n if time_major:\n # (T,B,D) => (B,T,D)\n facts = tf.array_ops.transpose(facts, [1, 0, 2])\n # Trainable parameters\n facts_size = facts.get_shape().as_list()[-1] # D value - hidden size of the RNN layer\n querry_size = query.get_shape().as_list()[-1]\n query = tf.layers.dense(query, facts_size, activation=None, name='f1' + stag)\n query = prelu(query)\n queries = tf.tile(query, [1, tf.shape(facts)[1]])\n queries = tf.reshape(queries, tf.shape(facts))\n din_all = tf.concat([queries, facts, queries-facts, queries*facts], axis=-1)\n d_layer_1_all = tf.layers.dense(din_all, 80, activation=tf.nn.sigmoid, name='f1_att' + stag)\n d_layer_2_all = tf.layers.dense(d_layer_1_all, 40, activation=tf.nn.sigmoid, name='f2_att' + stag)\n d_layer_3_all = tf.layers.dense(d_layer_2_all, 1, activation=None, name='f3_att' + stag)\n d_layer_3_all = tf.reshape(d_layer_3_all, [-1, 1, tf.shape(facts)[1]])\n scores = d_layer_3_all\n # Mask\n if mask is not None:\n # key_masks = tf.sequence_mask(facts_length, tf.shape(facts)[1]) # [B, T]\n key_masks = tf.expand_dims(mask, 1) # [B, 1, T]\n paddings = tf.ones_like(scores) * (-2 ** 32 + 1)\n if not forCnn:\n scores = tf.where(key_masks, scores, paddings) # [B, 1, T]\n\n # Scale\n # scores = scores / (facts.get_shape().as_list()[-1] ** 0.5)\n\n # Activation\n if softmax_stag:\n scores = tf.nn.softmax(scores) # [B, 1, T]\n\n # Weighted sum\n if mode == 'SUM':\n output = tf.matmul(scores, facts) # [B, 1, H]\n # output = tf.reshape(output, [-1, tf.shape(facts)[-1]])\n else:\n scores = tf.reshape(scores, [-1, tf.shape(facts)[1]])\n output = facts * tf.expand_dims(scores, -1)\n output = tf.reshape(output, tf.shape(facts))\n if return_alphas:\n return output, scores\n return output\n\ndef self_attention(facts, ATTENTION_SIZE, mask, stag='null'):\n if len(facts.get_shape().as_list()) == 2:\n facts = tf.expand_dims(facts, 1)\n\n def cond(batch, output, i):\n return tf.less(i, tf.shape(batch)[1])\n\n def body(batch, output, i):\n self_attention_tmp = din_fcn_attention(batch[:, i, :], batch[:, 0:i+1, :],\n ATTENTION_SIZE, mask[:, 0:i+1], softmax_stag=1, stag=stag,\n mode='LIST')\n self_attention_tmp = tf.reduce_sum(self_attention_tmp, 1)\n output = output.write(i, self_attention_tmp)\n return batch, output, i + 1\n\n output_ta = tf.TensorArray(dtype=tf.float32,\n size=0,\n dynamic_size=True,\n element_shape=(facts[:, 0, :].get_shape()))\n _, output_op, _ = tf.while_loop(cond, body, [facts, output_ta, 0])\n self_attention = output_op.stack()\n self_attention = tf.transpose(self_attention, perm = [1, 0, 2])\n return self_attention\n\ndef self_all_attention(facts, ATTENTION_SIZE, mask, stag='null'):\n if len(facts.get_shape().as_list()) == 2:\n facts = tf.expand_dims(facts, 1)\n\n def cond(batch, output, i):\n return tf.less(i, tf.shape(batch)[1])\n\n def body(batch, output, i):\n self_attention_tmp = din_fcn_attention(batch[:, i, :], batch,\n ATTENTION_SIZE, mask, softmax_stag=1, stag=stag,\n mode='LIST')\n self_attention_tmp = tf.reduce_sum(self_attention_tmp, 1)\n output = output.write(i, self_attention_tmp)\n return batch, output, i + 1\n\n output_ta = tf.TensorArray(dtype=tf.float32,\n size=0,\n dynamic_size=True,\n element_shape=(facts[:, 0, :].get_shape()))\n _, output_op, _ = tf.while_loop(cond, body, [facts, output_ta, 0])\n self_attention = output_op.stack()\n self_attention = tf.transpose(self_attention, perm = [1, 0, 2])\n return self_attention\n\ndef din_fcn_shine(query, facts, attention_size, mask, stag='null', mode='SUM', softmax_stag=1, time_major=False, return_alphas=False):\n if isinstance(facts, tuple):\n # In case of Bi-RNN, concatenate the forward and the backward RNN outputs.\n facts = tf.concat(facts, 2)\n\n if time_major:\n # (T,B,D) => (B,T,D)\n facts = tf.array_ops.transpose(facts, [1, 0, 2])\n # Trainable parameters\n mask = tf.equal(mask, tf.ones_like(mask))\n facts_size = facts.get_shape().as_list()[-1] # D value - hidden size of the RNN layer\n querry_size = query.get_shape().as_list()[-1]\n query = tf.layers.dense(query, facts_size, activation=None, name='f1_trans_shine' + stag)\n query = prelu(query)\n queries = tf.tile(query, [1, tf.shape(facts)[1]])\n queries = tf.reshape(queries, tf.shape(facts))\n din_all = tf.concat([queries, facts, queries-facts, queries*facts], axis=-1)\n d_layer_1_all = tf.layers.dense(din_all, facts_size, activation=tf.nn.sigmoid, name='f1_shine_att' + stag)\n d_layer_2_all = tf.layers.dense(d_layer_1_all, facts_size, activation=tf.nn.sigmoid, name='f2_shine_att' + stag)\n d_layer_2_all = tf.reshape(d_layer_2_all, tf.shape(facts))\n output = d_layer_2_all\n return output\n\n"
] | [
[
"tensorflow.variable_scope",
"tensorflow.python.ops.variable_scope.variable_scope",
"tensorflow.matmul",
"tensorflow.name_scope",
"tensorflow.concat",
"tensorflow.random_normal",
"tensorflow.nn.softmax",
"tensorflow.reduce_sum",
"tensorflow.array_ops.transpose",
"tensorflow.minimum",
"tensorflow.python.ops.init_ops.constant_initializer",
"tensorflow.transpose",
"tensorflow.constant_initializer",
"tensorflow.shape",
"tensorflow.ones_like",
"tensorflow.tanh",
"tensorflow.expand_dims",
"tensorflow.tensordot",
"tensorflow.while_loop",
"tensorflow.layers.dense",
"tensorflow.python.ops.array_ops.split",
"tensorflow.where",
"tensorflow.maximum"
]
] |
boutproject/VECMA-hackathon | [
"07632a267fcaff582bf410eba13f7bc81d8ea6eb"
] | [
"workflows/sc_adaptive_restartable/example_restartable_sc_adaptive.py"
] | [
"#!/usr/bin/env python3\n\nimport argparse\nimport boutvecma\nimport easyvvuq as uq\nimport chaospy\nimport os\nimport numpy as np\nimport time\nimport matplotlib.pyplot as plt\n\n\nCAMPAIGN_NAME = \"Conduction.\"\n\n\ndef refine_sampling_plan(campaign, analysis, number_of_refinements):\n \"\"\"\n Refine the sampling plan.\n\n Parameters\n ----------\n number_of_refinements (int)\n The number of refinement iterations that must be performed.\n\n Returns\n -------\n None. The new accepted indices are stored in analysis.l_norm and the admissible indices\n in sampler.admissible_idx.\n \"\"\"\n\n sampler = campaign.get_active_sampler()\n\n for _ in range(number_of_refinements):\n # compute the admissible indices\n sampler.look_ahead(analysis.l_norm)\n\n print(f\"Code will be evaluated {sampler.n_new_points[-1]} times\")\n # run the ensemble\n campaign.execute().collate(progress_bar=True)\n\n # accept one of the multi indices of the new admissible set\n data_frame = campaign.get_collation_result()\n analysis.adapt_dimension(\"T\", data_frame)\n analysis.save_state(f\"{campaign.campaign_dir}/analysis.state\")\n\n\ndef plot_grid_2D(campaign, analysis, i, filename=\"out.pdf\"):\n fig = plt.figure(figsize=[12, 4])\n ax1 = fig.add_subplot(121)\n ax2 = fig.add_subplot(122)\n accepted_grid = campaign.get_active_sampler().generate_grid(analysis.l_norm)\n ax1.plot(accepted_grid[:, 0], accepted_grid[:, 1], \"o\")\n ax2.plot(accepted_grid[:, 2], accepted_grid[:, 3], \"o\")\n ax1.set_title(f\"iteration {i}\")\n\n fig.tight_layout()\n fig.savefig(filename)\n\n\ndef custom_moments_plot(results, filename, i):\n fig, ax = plt.subplots()\n xvalues = np.arange(len(results.describe(\"T\", \"mean\")))\n ax.fill_between(\n xvalues,\n results.describe(\"T\", \"mean\") - results.describe(\"T\", \"std\"),\n results.describe(\"T\", \"mean\") + results.describe(\"T\", \"std\"),\n label=\"std\",\n alpha=0.2,\n )\n ax.plot(xvalues, results.describe(\"T\", \"mean\"), label=\"mean\")\n try:\n ax.plot(xvalues, results.describe(\"T\", \"1%\"), \"--\", label=\"1%\", color=\"black\")\n ax.plot(xvalues, results.describe(\"T\", \"99%\"), \"--\", label=\"99%\", color=\"black\")\n except RuntimeError:\n pass\n ax.grid(True)\n ax.set_ylabel(\"T\")\n ax.set_xlabel(r\"$\\rho$\")\n ax.set_title(\"iteration \" + str(i))\n ax.legend()\n fig.savefig(filename)\n\n\ndef first_time_setup():\n encoder = boutvecma.BOUTEncoder(\n template_input=\"../../models/conduction/data/BOUT.inp\"\n )\n # decoder = boutvecma.LogDataBOUTDecoder(variables=[\"T\"])\n decoder = boutvecma.SimpleBOUTDecoder(variables=[\"T\"])\n params = {\n \"conduction:chi\": {\"type\": \"float\", \"min\": 0.0, \"max\": 1e3, \"default\": 1.0},\n \"T:scale\": {\"type\": \"float\", \"min\": 0.0, \"max\": 1e3, \"default\": 1.0},\n \"T:gauss_width\": {\"type\": \"float\", \"min\": 0.0, \"max\": 1e3, \"default\": 0.2},\n \"T:gauss_centre\": {\n \"type\": \"float\",\n \"min\": 0.0,\n \"max\": 2 * np.pi,\n \"default\": np.pi,\n },\n }\n actions = uq.actions.local_execute(\n encoder,\n os.path.abspath(\n \"../../build/models/conduction/conduction -q -q -q -q -d . |& tee run.log\"\n ),\n decoder,\n root=\".\",\n )\n campaign = uq.Campaign(name=CAMPAIGN_NAME, actions=actions, params=params)\n\n vary = {\n \"conduction:chi\": chaospy.Uniform(0.2, 4.0),\n \"T:scale\": chaospy.Uniform(0.5, 1.5),\n \"T:gauss_width\": chaospy.Uniform(0.5, 1.5),\n \"T:gauss_centre\": chaospy.Uniform(0.5 * np.pi, 1.5 * np.pi),\n }\n\n sampler = uq.sampling.SCSampler(\n vary=vary,\n polynomial_order=1,\n quadrature_rule=\"C\",\n sparse=True,\n growth=True,\n midpoint_level1=True,\n dimension_adaptive=True,\n )\n campaign.set_sampler(sampler)\n\n print(f\"Output will be in {campaign.campaign_dir}\")\n\n sampler = campaign.get_active_sampler()\n\n print(f\"Computing {sampler.n_samples} samples\")\n\n time_start = time.time()\n campaign.execute().collate(progress_bar=True)\n\n # Create an analysis class and run the analysis.\n analysis = create_analysis(campaign)\n campaign.apply_analysis(analysis)\n analysis.save_state(f\"{campaign.campaign_dir}/analysis.state\")\n plot_grid_2D(campaign, analysis, 0, f\"{campaign.campaign_dir}/grid0.png\")\n\n for i in np.arange(1, 10):\n refine_once(campaign, analysis, i)\n time_end = time.time()\n\n print(f\"Finished, took {time_end - time_start}\")\n\n return campaign\n\n\ndef create_analysis(campaign):\n return uq.analysis.SCAnalysis(sampler=campaign.get_active_sampler(), qoi_cols=[\"T\"])\n\n\ndef refine_once(campaign, analysis, iteration):\n refine_sampling_plan(campaign, analysis, 1)\n campaign.apply_analysis(analysis)\n analysis.save_state(f\"{campaign.campaign_dir}/analysis.state\")\n\n results = campaign.last_analysis\n plot_grid_2D(\n campaign,\n analysis,\n iteration,\n f\"{campaign.campaign_dir}/grid{iteration:02}.png\",\n )\n moment_plot_filename = os.path.join(\n f\"{campaign.campaign_dir}\", f\"moments{iteration:02}.png\"\n )\n sobols_plot_filename = os.path.join(\n f\"{campaign.campaign_dir}\", f\"sobols_first{iteration:02}.png\"\n )\n results.plot_sobols_first(\n \"T\",\n ylabel=f\"iteration{iteration}\",\n xlabel=r\"$\\rho$\",\n filename=sobols_plot_filename,\n )\n plt.ylim(0, 1)\n plt.savefig(f\"{campaign.campaign_dir}/sobols{iteration:02}.png\")\n\n custom_moments_plot(results, moment_plot_filename, iteration)\n\n with open(f\"{campaign.campaign_dir}/last_iteration\", \"w\") as f:\n f.write(f\"{iteration}\")\n\n\ndef plot_results(campaign, moment_plot_filename, sobols_plot_filename):\n results = campaign.get_last_analysis()\n\n results.plot_sobols_first(\"T\", xlabel=r\"$\\rho$\", filename=sobols_plot_filename)\n\n fig, ax = plt.subplots()\n xvalues = np.arange(len(results.describe(\"T\", \"mean\")))\n ax.fill_between(\n xvalues,\n results.describe(\"T\", \"mean\") - results.describe(\"T\", \"std\"),\n results.describe(\"T\", \"mean\") + results.describe(\"T\", \"std\"),\n label=\"std\",\n alpha=0.2,\n )\n ax.plot(xvalues, results.describe(\"T\", \"mean\"), label=\"mean\")\n try:\n ax.plot(xvalues, results.describe(\"T\", \"1%\"), \"--\", label=\"1%\", color=\"black\")\n ax.plot(xvalues, results.describe(\"T\", \"99%\"), \"--\", label=\"99%\", color=\"black\")\n except RuntimeError:\n pass\n ax.grid(True)\n ax.set_ylabel(\"T\")\n ax.set_xlabel(r\"$\\rho$\")\n ax.legend()\n fig.savefig(moment_plot_filename)\n\n print(f\"Results are in:\\n\\t{moment_plot_filename}\\n\\t{sobols_plot_filename}\")\n\n\ndef reload_campaign(directory):\n \"\"\"Reload a campaign from a directory\n\n Returns the campaign, analysis, and last iteration number\n \"\"\"\n\n campaign = uq.Campaign(\n name=CAMPAIGN_NAME,\n db_location=f\"sqlite:///{os.path.abspath(directory)}/campaign.db\",\n )\n analysis = create_analysis(campaign)\n analysis.load_state(f\"{campaign.campaign_dir}/analysis.state\")\n\n with open(f\"{campaign.campaign_dir}/last_iteration\", \"r\") as f:\n iteration = int(f.read())\n\n return campaign, analysis, iteration\n\n\nif __name__ == \"__main__\":\n\n parser = argparse.ArgumentParser(\n \"conduction_sc\",\n description=\"Adaptive dimension refinement for 1D conduction model\",\n )\n parser.add_argument(\n \"--restart\", type=str, help=\"Restart previous campaign\", default=None\n )\n parser.add_argument(\n \"-n\", \"--refinement-num\", type=int, default=1, help=\"Number of refinements\"\n )\n\n args = parser.parse_args()\n\n if args.restart is None:\n first_time_setup()\n else:\n campaign, analysis, last_iteration = reload_campaign(args.restart)\n for iteration in range(\n last_iteration + 1, last_iteration + args.refinement_num + 1\n ):\n refine_once(campaign, analysis, iteration)\n"
] | [
[
"matplotlib.pyplot.figure",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.subplots",
"numpy.arange",
"matplotlib.pyplot.ylim"
]
] |
mchelem/cref2 | [
"3324c34892dfaba2c99a0a564ede9f0c40ad65a5"
] | [
"cref/structure/plot.py"
] | [
"import os\nfrom collections import OrderedDict\n\nimport matplotlib.pyplot as plt\nimport pandas\n\n\n_ramachandran_densities = pandas.read_csv(\n 'data/rama500-general.data',\n skiprows=6,\n delimiter=' ',\n names=['phi', 'psi', 'value']\n)\n\n\"\"\"\nDSSP output:\n H = α-helix\n B = residue in isolated β-bridge\n E = extended strand, participates in β ladder\n G = 3-helix (310 helix)\n I = 5 helix (π-helix)\n T = hydrogen bonded turn\n S = bend\n\nColors extracted from rcsb.org.\n\"\"\"\n\nDSSP_to_color = {\n 'H': '#ED6161',\n 'B': '#CCA200',\n 'E': '#FFFB00',\n 'G': '#FFC2C2',\n 'I': '#900000',\n 'T': '#990099',\n 'S': '#0000FF',\n '-': 'black',\n}\n\n\ndef ramachandran_surface():\n \"\"\"\n Plot density surface for generic ramachandran\n \"\"\"\n fontsize = 18\n ticks = [-180, -90, 0, 90, 180]\n plt.contourf(\n list(OrderedDict.fromkeys(_ramachandran_densities['phi'])),\n list(OrderedDict.fromkeys(_ramachandran_densities['psi'])),\n _ramachandran_densities['value'].values.reshape(180, 180).T,\n levels=[0, 0.0005, 0.02, 1],\n colors=['#FFFFFF', '#B3E8FF', '#7FD9FF']\n )\n plt.xlabel('$\\phi$', fontsize=fontsize)\n plt.ylabel('$\\psi$', fontsize=fontsize)\n plt.xticks(ticks)\n plt.yticks(ticks)\n plt.tick_params(direction=\"out\")\n plt.margins(0.05)\n ax = plt.axes()\n ax.spines['right'].set_color('none')\n ax.spines['top'].set_color('none')\n ax.spines['left'].set_smart_bounds(True)\n ax.spines['bottom'].set_smart_bounds(True)\n ax.xaxis.set_ticks_position('bottom')\n ax.yaxis.set_ticks_position('left')\n\n\ndef ramachandran(torsion_angles, fragment, target_pdb=None,\n output_writer=None, output_dir=None):\n \"\"\"\n Plot ramachandran of a set of torsion angles for a given fragment\n\n :param torsion_angles: Dictionary with torsion angles phi and psi\n :param fragment: Fragment identifier, used for displaying purposes\n \"\"\"\n target_pdb = None\n plt.figure()\n ramachandran_surface()\n plt.title('Ramachandran plot for ' + fragment)\n plt.scatter(\n x=torsion_angles['phi'],\n y=torsion_angles['psi'],\n s=[1.05 ** x for x in torsion_angles['identity']],\n c=[DSSP_to_color[ss] for ss in torsion_angles['central_ss']],\n marker='o',\n alpha=0.5,\n )\n if target_pdb and (target_pdb in list(torsion_angles['pdb'])):\n i = list(torsion_angles['pdb']).index(target_pdb)\n plt.scatter(\n x=torsion_angles['phi'][i],\n y=torsion_angles['psi'][i],\n marker='D',\n c='red',\n s=50\n )\n if output_writer:\n output_writer.savefig(dpi=150)\n if output_dir:\n plt.savefig(\n os.path.join(output_dir, 'ramachandran', fragment + '.svg'),\n format='svg', dpi=300\n )\n plt.close()\n"
] | [
[
"matplotlib.pyplot.tick_params",
"matplotlib.pyplot.xticks",
"pandas.read_csv",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.axes",
"matplotlib.pyplot.title",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.margins",
"matplotlib.pyplot.close",
"matplotlib.pyplot.yticks",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.scatter"
]
] |
buctlab/NIO | [
"094e688dd1cd3def7f31cd16ff927d4324651422"
] | [
"visualizer/plot_mf_param_opt/plot_time_cost_bar.py"
] | [
"import matplotlib.pyplot as plt\nimport pandas as pd\nfrom numpy import arange, array\nimport os\nimport logging\n\nlogging.basicConfig()\nlogger = logging.getLogger('PlotTimeCost')\nlogger.setLevel('INFO')\n\n\nclass PlotTimeCostBar:\n\n def __init__(self, data, path, show=False):\n self.data = data\n self.path = path\n self.show_flag = show\n (filepath, tempfilename) = os.path.split(path)\n if not os.path.exists(filepath):\n os.makedirs(filepath)\n (filename, extension) = os.path.splitext(tempfilename)\n self.format = extension[1:]\n\n def plot(self):\n data = array([0, 0, 0])\n data[1:] = self.data['Time Cost'].values\n\n fig = plt.figure(figsize=(6, 6))\n ax = fig.add_subplot(111)\n width = 0.5\n\n xticks = self.data.index\n n = data.shape[0]\n ind = arange(n)\n data = data / 3600\n colors = ['black', 'tab:blue', 'tab:green', 'tab:red', 'tab:purple', 'tab:brown']\n plt.bar(x=ind, height=data, width=width, color=colors)\n\n ax.set_xticks(ind[1:])\n ax.set_xticklabels(xticks)\n\n # ax.set_xlabel('Multi-fidelity control strategy', fontsize=16)\n ax.tick_params(labelsize=12)\n ax.set_ylabel('Time Cost (h)', fontsize=16)\n\n if self.show_flag:\n plt.show()\n fig.savefig(self.path, format=self.format, dpi=80, bbox_inches='tight')\n"
] | [
[
"matplotlib.pyplot.figure",
"numpy.arange",
"matplotlib.pyplot.show",
"numpy.array",
"matplotlib.pyplot.bar"
]
] |
bernardolemos/Automatic_Face_Blurt | [
"7f9127763b391dacc0f89b62a05fe149f02a065b"
] | [
"blur_faces.py"
] | [
"import os\nimport cv2\nimport time\nimport argparse\nimport numpy as np\nfrom mtcnn import detect_face\nimport tensorflow as tf\nfrom PIL import Image, ImageDraw\n\n## MTCNN face localizer\ndef mtcnn_localize_faces(image, pnet, rnet, onet, minsize=20, threshold=[0.7, 0.8, 0.85], factor=0.75):\n \"\"\"\n Localize faces & its landmarks in image using MTCNN\n \n Params\n :image\n :minsize - min. face size\n :threshold - a list/array with 3 values. The thresholds for pnet, rnet & onet, respectively \n :factor - sclaing factor for image octave\n\n Return\n :bbs - list of bounding boxes\n :lds - list of face landmarks\n \"\"\"\n \n\n image = image[:, :, 0:3]\n bounding_boxes, landmarks = detect_face.detect_face(image, minsize, pnet, rnet, onet, threshold, factor)\n nrof_faces = bounding_boxes.shape[0]\n\n bbs = list()\n lds = list()\n if nrof_faces > 0:\n det = bounding_boxes[:, 0:4]\n \n bb = np.zeros((nrof_faces,4), dtype=np.int32)\n lands = np.zeros((nrof_faces,10), dtype=np.int32)\n landmarks = np.reshape(landmarks, (nrof_faces, 10))\n for i in range(nrof_faces):\n ## Convert to int32\n lands[i] = np.ravel(landmarks[i])\n bb[i] = np.ravel(det[i])\n # inner exception\n if bb[i][0] <= 0 or bb[i][1] <= 0 or bb[i][2] >= len(image[0]) or bb[i][3] >= len(image):\n print('face is inner of range!')\n continue\n else:\n ## get as top, right, bottom, left\n bbs.append((bb[i][1], bb[i][2], bb[i][3], bb[i][0]))\n lds.append(lands[i])\n \n return bbs, lds\n\n\ndef load_images(images_path):\n \"\"\"\n Read images from directory\n\n Params\n :images_path - path to images\n\n Return\n :image_l - list of images as arrays\n : images_name - list of images' file names\n \"\"\"\n # list of images, as arrays\n images_l = []\n # get images\n images_name = os.listdir(images_path)\n # read images\n for i in images_name:\n image = cv2.imread(os.path.join(images_path, i))\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n # if image.endswith(\".png\"):\n # images_l.append(image)\n images_l.append(image)\n \n return images_l, images_name\n\ndef main(args):\n st = time.time()\n #check if input directory exists\n if not os.path.exists(args.input_directory):\n print(\"Error! No input direcotory\", args.input_directory)\n return -1\n\n # read images\n images_l, images_paths = load_images(args.input_directory)\n\n #create tensorflow session\n # init. tensorflow session\n with tf.Graph().as_default():\n gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.75)\n sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options, log_device_placement=False))\n with sess.as_default():\n pnet, rnet, onet = detect_face.create_mtcnn(sess, './mtcnn')\n #localize and blur faces, iterate over images\n for image, image_path in zip(images_l, images_paths):\n print(\"Processing\", image_path + \"...\")\n\n bbs, lds = mtcnn_localize_faces(image, pnet, rnet, onet, minsize=20, threshold=[0.7, 0.8, 0.85], factor=0.75)\n\n # jumpt iteration if there's no face\n if len(bbs) == 0:\n print(\"Couldn't find faces!\")\n continue\n\n #get faces\n for bb, ld in zip(bbs, lds):\n #get bounding box\n #top, righ, bottom, left\n top = bb[0]\n right = bb[1]\n bottom = bb[2]\n left = bb[3]\n # build landmarks' x, y pairs\n points = []\n for x, y in zip(ld[:5], ld[5:]):\n points.append(x)\n points.append(y)\n\n #get face thumbnail\n face_image = image[top:bottom, left:right]\n #blur face thumbnail\n if args.blur > 0:\n face_image = cv2.GaussianBlur(face_image, (105, 105), args.blur)\n #black\n else:\n face_image = np.zeros(face_image.shape)\n \n #write blured face to image\n image[top:bottom, left:right] = face_image\n\n #PIL image \n # pil_image = Image.fromarray(image)\n # pil_image_face = Image.fromarray(face_image)\n\n #eyes' landmarks: first two pairs\n # get larger rectangle\n # points[0] = points[0] * 0.9\n # points[1] = points[1] * 0.9\n # points[2] = points[2] * 1.1\n # points[3] = points[3] * 1.1\n # draw = ImageDraw.Draw(pil_image)\n #cover eyes with rectangle\n # draw.rectangle(points[:4], fill=\"black\")\n\n #create output directory if it doesn't exist\n if not os.path.exists(args.output_directory):\n os.makedirs(args.output_directory)\n\n #save image\n pil_image = Image.fromarray(image)\n pil_image.save(os.path.join(args.output_directory, image_path))\n\n print(\"Total running time:\", time.time() - st, \"sec.\")\n \n return 0\n\nif __name__==\"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument('-id', '--input_directory', type=str, nargs='?', default=\"./images\")\n parser.add_argument('-od', '--output_directory', type=str, nargs='?', default=\"./blurs\")\n parser.add_argument('-b', '--blur', type=int, nargs='?', default=46)\n args = parser.parse_args()\n\n main(args)"
] | [
[
"numpy.zeros",
"numpy.reshape",
"numpy.ravel",
"tensorflow.Graph",
"tensorflow.GPUOptions",
"tensorflow.ConfigProto"
]
] |
mcuntz/pyjams | [
"1393c68a9e21a1e7b88291229120641fdaddc998"
] | [
"tests/test_gridcellarea.py"
] | [
"#!/usr/bin/env python\n\"\"\"\nThis is the unittest for gridcellarea module.\n\npython -m unittest -v tests/test_gridcellarea.py\npython -m pytest --cov=pyjams --cov-report term-missing -v tests/test_gridcellarea.py\n\n\"\"\"\nimport unittest\n\n\ndef _flatten(itr):\n import numpy as np\n fitr = np.array(itr).flatten()\n if len(fitr) == 0:\n return list(fitr)\n else:\n if isinstance(fitr[0], str):\n return [ i for i in fitr ]\n else:\n return [ i if np.isfinite(i) else np.finfo(float).max\n for i in fitr ]\n\n\nclass TestGridcellarea(unittest.TestCase):\n \"\"\"\n Tests for gridcellarea.py\n \"\"\"\n\n def test_gridcellarea(self):\n import numpy as np\n from pyjams import gridcellarea\n\n lat = [0., 2.5, 5.0]\n lon = [0., 3.75, 7.5]\n\n rearth = 6371009.\n fsoll = [[1.15906555e+11, 1.15906555e+11, 1.15906555e+11],\n [1.15796237e+11, 1.15796237e+11, 1.15796237e+11],\n [1.15465495e+11, 1.15465495e+11, 1.15465495e+11]]\n\n rearth1 = 6371000.\n fsoll1 = [[1.15906227e+11, 1.15906227e+11, 1.15906227e+11],\n [1.15795910e+11, 1.15795910e+11, 1.15795910e+11],\n [1.15465169e+11, 1.15465169e+11, 1.15465169e+11]]\n\n # descending latitudes\n dlat = [0., -2.5, -5.0]\n\n # meridian within longitudes\n lon360 = [360., 3.75, 7.5]\n # dateline within longitudes\n lon180 = [180., -180.+3.75, -180.+7.5]\n\n # list\n fout = gridcellarea(lat, lon)\n assert isinstance(fout, np.ndarray)\n self.assertEqual(_flatten(np.around(fout, -3)), _flatten(fsoll))\n\n # tuple, list\n fout = gridcellarea(tuple(lat), lon)\n assert isinstance(fout, np.ndarray)\n self.assertEqual(_flatten(np.around(fout, -3)), _flatten(fsoll))\n\n # 2 tuple\n fout = gridcellarea(tuple(lat), tuple(lon))\n assert isinstance(fout, np.ndarray)\n self.assertEqual(_flatten(np.around(fout, -3)), _flatten(fsoll))\n\n # array, list\n fout = gridcellarea(np.array(lat), lon)\n assert isinstance(fout, np.ndarray)\n self.assertEqual(_flatten(np.around(fout, -3)), _flatten(fsoll))\n\n # 2 array\n fout = gridcellarea(np.array(lat), np.array(lon))\n assert isinstance(fout, np.ndarray)\n self.assertEqual(_flatten(np.around(fout, -3)), _flatten(fsoll))\n\n # rearth\n fout = gridcellarea(lat, lon, rearth=rearth)\n assert isinstance(fout, np.ndarray)\n self.assertEqual(_flatten(np.around(fout, -3)), _flatten(fsoll))\n\n # rearth classic\n fout = gridcellarea(lat, lon, rearth=rearth1)\n assert isinstance(fout, np.ndarray)\n self.assertEqual(_flatten(np.around(fout, -3)), _flatten(fsoll1))\n\n # globe\n fout = gridcellarea(lat, lon, globe=True)\n fsoll2 = [[3.79774834e+12, 3.79774834e+12, 3.79774834e+12],\n [1.15796240e+11, 1.15796240e+11, 1.15796240e+11],\n [3.61823239e+12, 3.61823239e+12, 3.61823239e+12]]\n assert isinstance(fout, np.ndarray)\n self.assertEqual(_flatten(np.around(fout, -4)), _flatten(fsoll2))\n\n # descending lats\n fout = gridcellarea(dlat, lon, globe=True)\n assert isinstance(fout, np.ndarray)\n self.assertEqual(_flatten(np.around(fout, -4)), _flatten(fsoll2))\n\n # meridian in lon\n fout = gridcellarea(lat, lon360)\n assert isinstance(fout, np.ndarray)\n self.assertEqual(_flatten(np.around(fout, -3)), _flatten(fsoll))\n\n # date line in lon\n fout = gridcellarea(lat, lon180)\n assert isinstance(fout, np.ndarray)\n self.assertEqual(_flatten(np.around(fout, -3)), _flatten(fsoll))\n\n # errors\n # lat > 90\n lat1 = [0., 2.5, 95.0]\n self.assertRaises(AssertionError, gridcellarea, lat1, lon)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n"
] | [
[
"numpy.array",
"numpy.around",
"numpy.finfo",
"numpy.isfinite"
]
] |
sudheernaidu53/other_utils | [
"8e7f32ff0a3ded3910a957d821d6f4eb15bae3d8"
] | [
"loan_estimator/loan_estimator.py"
] | [
"# This file is to get a rough estimation of how much you need to pay or how many months you need to pay for a loan\n\nimport pandas as pd\nimport numpy as np\nfrom IPython.display import display\n\ndef group(number):\n \"\"\"show money in laks and crores (indian way of presenting money)\"\"\"\n s = '%d' % number\n groups = []\n groups.append(s[-3:])\n s = s[:-3]\n while s and s[-1].isdigit():\n groups.append(s[-2:])\n s = s[:-2]\n return s + ','.join(reversed(groups))\n\n\nclass loan:\n def __init__(self, R=8.1, principal=30, years=5):\n \"\"\"R is yearly interest\n principal is principal amount in lakhs\n years = number of years\n \"\"\"\n self.R = R * 0.01\n self.r = R * 0.01 * (1 / 12)\n self.principal = principal * 100000\n self.years = years\n self.num_months = self.years * 12\n self.months = {\"Jan\": 31, \"Feb\": 28, \"Mar\": 31, \"Apr\": 30, \"May\": 31, \"June\": 30, \"Jul\": 31, \"Aug\": 31,\n \"Sep\": 30, \"Oct\": 31, \"Nov\": 30, \"Dec\": 31}\n\n def find_monthly_emi_flat(self, print_=True):\n \"\"\" find how much emi need to be paid given some principal, interest, and number of months when the interest scheme is flat\"\"\"\n\n total = self.principal * (1 + self.R * (self.num_months / 12))\n if print_:\n print(\"------------- flat interest -------------------\")\n print(\"total amount you are paying over full period:\", total)\n print(\"monthly installment/emi : {}\".format(total / self.num_months))\n return total, total / self.num_months\n\n def num_months_emi_diminishing(self, emi, principal=0, interest=0, print_=True):\n \"\"\"find the number of months you need to pay for, if you are paying emi every month\"\"\"\n \"\"\"emi is in rupees, principal is in lakhs, interest is yearly interest\"\"\"\n \"\"\"n = np.log((E/r)/(E/r -P))/np.log(1+r) \"\"\"\n\n if not principal:\n principal = self.principal\n if not interest:\n interest = self.r\n num_months = np.log((emi / interest) / (emi / interest - principal)) / np.log(1 + interest)\n if print_:\n print(\"------------- diminishing interest -------------------\")\n print(\"you need to pay {} monthly, for {} months\".format(emi, num_months))\n return num_months\n\n def find_monthly_emi_diminishing(self, num_months=0, principal=0, print_=True):\n \"\"\" find how much emi need to be paid given some principal, interest, and number of months when the interest scheme is flat\"\"\"\n \"\"\"P*r*(1 + 1/(np.power(1+r,60)-1))\"\"\"\n\n if not num_months:\n num_months = self.num_months\n if not principal:\n principal = self.principal\n else:\n principal *= 100000\n monthly_emi = principal * self.r * (1 + 1 / (np.power(1 + self.r, num_months) - 1))\n if print_:\n print(\"------------- diminishing interest -------------------\")\n print(\" you need to pay {} monthly, for {} months\".format(monthly_emi, num_months))\n print(\"total amount you will pay over full period is roughly {}\".format(monthly_emi * num_months))\n return monthly_emi\n\n def confirm_diminishing(self, emi, print_=False):\n \"\"\" function to confirm if the interest scheme is dimishing\"\"\"\n principal = self.principal\n i = 1\n while principal > 0:\n principal += ((self.r) * principal - emi)\n if print_:\n print(i, principal)\n i += 1\n if abs(principal / self.principal) < 0.001:\n print(\"final net amount is {} after {} months\".format(principal, i - 1))\n return principal, i\n\n\n## Usage\nR = 10.5 #10.5 % monthly interest rate\nprincipal = 30 # principal is 30 lakhs\nyears = 4.5 # loan term period is 4.5 years\nloan1 = loan(R,principal,years) # initialize a loan instance\n\nloan1.find_monthly_emi_flat()\nloan1.num_months_emi_diminishing(35000)\nloan1.find_monthly_emi_diminishing()\n\n#-----------output-----------------------\n# ------------- flat interest -------------------\n# total amount you are paying over full period: 4417500.0\n# monthly installment/emi : 81805.55555555556\n# ------------- diminishing interest -------------------\n# you need to pay 35000 monthly, for 159.1257820098328 months\n# ------------- diminishing interest -------------------\n# you need to pay 69948.58010333449 monthly, for 54.0 months\n# total amount you will pay over full period is roughly 3777223.3255800623\n\ndef get_df():\n # make a table to find how much emi to be paid for different principals over different tenure/periods\n\n loan1 = loan(10.5,principal = 30, years =5)\n # print(loan1.find_monthly_emi_diminishing())\n\n years = [2,3,4,5]\n amounts = [15,20,25]\n yearss = [str(x)+'y' for x in years]\n df = pd.DataFrame(columns=yearss)\n total = pd.DataFrame(columns = yearss)\n for amount in amounts:\n arr=[]\n arr1 = []\n for year in years:\n temp = loan1.find_monthly_emi_diminishing(num_months=year*12, principal=amount,print_ = False)\n arr.append(group(round(int(temp),-2))) # rounding to closest hundred\n arr1.append(group(round(int(temp*year*12),-2)))\n df.loc[str(amount)+'Lks']=arr\n total.loc[str(amount)+'Lks']=arr1\n\n print(\"--------------------- emi ------------------\")\n display(df)\n\n print(\"---------------------- total ---------------------\")\n display(total)\n\n# get_df()"
] | [
[
"numpy.log",
"pandas.DataFrame",
"numpy.power"
]
] |
hmhuy2000/Reinforcement-Learning-SuttonBartoI | [
"97ca9dc11c4cb4fda74b144e658c3eac756131ff"
] | [
"chap 5/5_5.py"
] | [
"import numpy as np \nimport matplotlib.pyplot as plt\nfrom tqdm import trange\nimport seaborn as sns\nimport random\n\n# ========================== CFG =======================\n\nclass CFG:\n HIT = 1\n STOP = 0\n actions = [STOP, HIT]\n WIN = 1\n DRAW = 0\n LOSE = -1\n\n\n# ======================== function ======================\n\ndef random_card():\n card = np.random.randint(13) + 1\n card = min(card, 10)\n return card\n\ndef value_card(card):\n if (card == 1):\n return 11\n else:\n return card\n\ndef random_play(policy_player, policy_dealer, init_state = None, debug = False):\n player_ace = 0\n player_ace_1 = 0\n dealer_ace = 0\n dealer_ace_1 = 0\n player_sum = 0\n dealer_sum = 0\n dealer_show = 0\n his = []\n if (init_state):\n (player_ace, dealer_show, player_sum, action) = init_state\n if (debug):\n print(f'player init {player_sum} dealer show {dealer_show} action {action}')\n\n if (dealer_show == 1):\n dealer_ace += 1\n dealer_sum += value_card(dealer_show)\n\n card = random_card()\n if (card == 1):\n dealer_ace += 1\n dealer_sum += value_card(card)\n if (dealer_sum > 21):\n dealer_sum -= 10\n dealer_ace_1 += 1\n\n his.append((player_ace > player_ace_1, player_sum, dealer_show, action))\n if (action == CFG.HIT):\n card = random_card()\n if (debug):\n print(f'player {player_sum} {card}')\n if (card == 1):\n player_ace += 1\n player_sum += value_card(card)\n if (player_sum > 21 and player_ace > player_ace_1):\n player_sum -= 10\n player_ace_1 += 1\n \n\n else:\n while(player_sum <12):\n card = random_card()\n if (card == 1):\n player_ace += 1\n player_sum += value_card(card)\n if (player_sum > 21):\n player_sum -= 10\n player_ace_1 += 1\n \n if (True):\n card = random_card()\n dealer_show = card\n if (card == 1):\n dealer_ace += 1\n dealer_sum += value_card(card)\n\n card = random_card()\n if (card == 1):\n dealer_ace += 1\n dealer_sum += value_card(card)\n if (dealer_sum > 21):\n dealer_sum -= 10\n dealer_ace_1 += 1\n\n while(True):\n if (player_sum > 21):\n if (debug):\n print(f'quát {player_sum}')\n return his, -1\n action = policy_player[int(player_ace > player_ace_1), player_sum, dealer_show]\n his.append((player_ace > player_ace_1, player_sum, dealer_show, action))\n if (action == CFG.STOP):\n break\n card = random_card()\n if (debug):\n print(f'player {player_sum} {card}')\n if (card == 1):\n player_ace += 1\n player_sum += value_card(card)\n if (player_sum > 21 and player_ace > player_ace_1):\n player_sum -= 10\n player_ace_1 += 1\n \n while(True):\n if (dealer_sum == 21):\n if(debug):\n print(f'player {player_sum} dealer {dealer_sum}')\n if (player_sum == 21):\n return his, 0\n else:\n return his, -1\n if (dealer_sum > 21):\n return his, 1\n action = policy_dealer[dealer_sum]\n if (action == CFG.STOP):\n break\n card = random_card()\n if(debug):\n print(f'dealer {dealer_sum} {card}')\n if (card == 1):\n dealer_ace += 1\n dealer_sum += value_card(card)\n if(dealer_sum > 21 and dealer_ace > dealer_ace_1):\n dealer_sum -= 10\n dealer_ace_1 += 1\n \n if(debug):\n print(f'player sum {player_sum} dealer sum {dealer_sum}')\n if (player_sum < dealer_sum):\n return his, -1\n if (player_sum == dealer_sum):\n return his, 0\n if (player_sum > dealer_sum):\n return his, 1\n \n\ndef MonteCarloPrediction(Num_iter, debug = False):\n\n # ========================== init =======================\n\n policy_dealer = np.zeros((22))\n policy_dealer[:17] = CFG.HIT\n policy_dealer[17:] = CFG.STOP\n\n policy_player = np.zeros((2, 22, 11), dtype = int)\n for i in range(2):\n for j in range(22):\n for k in range(11):\n policy_player[i,j,k] = random.choice(CFG.actions)\n\n \n\n value_action = np.zeros((2, 10, 10, 2))\n cnt = np.ones((2, 10, 10, 2))\n\n for iter in trange(Num_iter):\n if (debug):\n print(f'---------------- {iter} -------------------------')\n check = set()\n init_usable = random.choice(range(2))\n init_show = random_card()\n init_player_sum = random.choice(range(12,22))\n init_action = random.choice(CFG.actions)\n\n his, reward = random_play(policy_player, policy_dealer,\n (init_usable, init_show, init_player_sum, init_action), debug)\n if (debug):\n print(his, reward)\n for (usable, player_sum, dealer_show, action) in his:\n if ((usable, player_sum, dealer_show, action) in check):\n continue\n check.add((usable, player_sum, dealer_show, action))\n\n value_action[int(usable), player_sum - 12, dealer_show - 1, action] += reward\n cnt[int(usable), player_sum - 12, dealer_show - 1, action] += 1\n Q = np.zeros((2))\n Q[0] = value_action[int(usable), player_sum - 12, dealer_show - 1, 0]/cnt[int(usable), player_sum - 12, dealer_show - 1, 0]\n Q[1] = value_action[int(usable), player_sum - 12, dealer_show - 1, 1]/cnt[int(usable), player_sum - 12, dealer_show - 1, 1]\n policy_player[int(usable), player_sum, dealer_show] = np.argmax(Q)\n arr = value_action/cnt\n return policy_player[0, 12:,1:], policy_player[1, 12:,1:], arr\n\n# ======================== main ==========================\n\nNoUsable500k, Usable500k, arr = MonteCarloPrediction(10000000)\n\nvalue = np.zeros((2,10,10))\n\nfor i in range(2):\n for j in range(10):\n for k in range(10):\n value[i,j,k] = np.max(arr[i,j,k,:])\n\n\nax = sns.heatmap(value[0,...], cmap=\"YlGnBu\", xticklabels=range(1, 11)\n ,yticklabels=list(range(12, 22)))\nplt.savefig('figure_5_5_value_NoUsable.png')\nplt.close()\n\nax = sns.heatmap(value[1,...], cmap=\"YlGnBu\", xticklabels=range(1, 11)\n ,yticklabels=list(range(12, 22)))\nplt.savefig('figure_5_5_value_Usable.png')\nplt.close()\n\nax = sns.heatmap(NoUsable500k, cmap=\"YlGnBu\", xticklabels=range(1, 11)\n ,yticklabels=list(range(12, 22)))\nplt.savefig('figure_5_5_policy_NoUsable.png')\nplt.close()\n\n\nax = sns.heatmap(Usable500k, cmap=\"YlGnBu\", xticklabels=range(1, 11)\n ,yticklabels=list(range(12, 22)))\nplt.savefig('figure_5_5_policy_Usable.png')\nplt.close()"
] | [
[
"numpy.ones",
"numpy.zeros",
"matplotlib.pyplot.savefig",
"numpy.argmax",
"numpy.max",
"matplotlib.pyplot.close",
"numpy.random.randint"
]
] |
SebastianMM-96/regex-wordToken | [
"1e707f03638ebe9365974bcced8ab8b0d42c1295"
] | [
"fake-news/training-testing-classification-model/fakeNewsModel-CountVectorizer.py"
] | [
"# Import the necessary modules\nfrom sklearn.naive_bayes import MultinomialNB\nfrom sklearn import metrics\n\n# Instantiate a Multinomial Naive Bayes classifier: nb_classifier\nnb_classifier = MultinomialNB()\n\n# Fit the classifier to the training data\nnb_classifier.fit(count_train, y_train)\n\n# Create the predicted tags: pred\npred = nb_classifier.predict(count_test)\n\n# Calculate the accuracy score: score\nscore = metrics.accuracy_score(y_test, pred)\nprint(score)\n\n# Calculate the confusion matrix: cm\ncm = metrics.confusion_matrix(y_test, pred, labels=['FAKE', 'REAL'])\nprint(cm)"
] | [
[
"sklearn.naive_bayes.MultinomialNB",
"sklearn.metrics.accuracy_score",
"sklearn.metrics.confusion_matrix"
]
] |
MagiaSN/pytorch | [
"7513455c743d3d644b45a804902c1a0d14b69f45",
"7513455c743d3d644b45a804902c1a0d14b69f45",
"7513455c743d3d644b45a804902c1a0d14b69f45"
] | [
"torch/nn/quantized/modules/__init__.py",
"torch/nn/modules/lazy.py",
"torch/utils/tensorboard/_pytorch_graph.py"
] | [
"import torch\nfrom torch.nn.modules.pooling import MaxPool2d\n\nfrom .activation import ReLU6, Hardswish, ELU, LeakyReLU, Sigmoid\nfrom .batchnorm import BatchNorm2d, BatchNorm3d\nfrom .normalization import LayerNorm, GroupNorm, InstanceNorm1d, \\\n InstanceNorm2d, InstanceNorm3d\nfrom .conv import _ConvNd, Conv1d, Conv2d, Conv3d\nfrom .conv import ConvTranspose1d, ConvTranspose2d, ConvTranspose3d\nfrom .linear import Linear\nfrom .embedding_ops import Embedding, EmbeddingBag\n\nfrom .functional_modules import FloatFunctional, FXFloatFunctional, QFunctional\n\n\nclass Quantize(torch.nn.Module):\n r\"\"\"Quantizes an incoming tensor\n\n Args:\n `scale`: scale of the output Quantized Tensor\n `zero_point`: zero_point of output Quantized Tensor\n `dtype`: data type of output Quantized Tensor\n\n Attributes:\n `scale`, `zero_point`, `dtype`\n\n Examples::\n >>> t = torch.tensor([[1., -1.], [1., -1.]])\n >>> scale, zero_point, dtype = 1.0, 2, torch.qint8\n >>> qm = Quantize(scale, zero_point, dtype)\n >>> qt = qm(t)\n >>> print(qt)\n tensor([[ 1., -1.],\n [ 1., -1.]], size=(2, 2), dtype=torch.qint8, scale=1.0, zero_point=2)\n \"\"\"\n\n scale: torch.Tensor\n zero_point: torch.Tensor\n\n def __init__(self, scale, zero_point, dtype):\n super(Quantize, self).__init__()\n self.register_buffer('scale', torch.tensor([scale]))\n self.register_buffer('zero_point', torch.tensor([zero_point], dtype=torch.long))\n self.dtype = dtype\n\n def forward(self, X):\n return torch.quantize_per_tensor(X, float(self.scale),\n int(self.zero_point), self.dtype)\n\n @staticmethod\n def from_float(mod):\n assert hasattr(mod, 'activation_post_process')\n scale, zero_point = mod.activation_post_process.calculate_qparams()\n return Quantize(scale.float().item(), zero_point.long().item(), mod.activation_post_process.dtype)\n\n def extra_repr(self):\n return 'scale={}, zero_point={}, dtype={}'.format(self.scale, self.zero_point, self.dtype)\n\n\nclass DeQuantize(torch.nn.Module):\n r\"\"\"Dequantizes an incoming tensor\n\n Examples::\n >>> input = torch.tensor([[1., -1.], [1., -1.]])\n >>> scale, zero_point, dtype = 1.0, 2, torch.qint8\n >>> qm = Quantize(scale, zero_point, dtype)\n >>> quantized_input = qm(input)\n >>> dqm = DeQuantize()\n >>> dequantized = dqm(quantized_input)\n >>> print(dequantized)\n tensor([[ 1., -1.],\n [ 1., -1.]], dtype=torch.float32)\n \"\"\"\n\n def __init__(self):\n super(DeQuantize, self).__init__()\n\n def forward(self, Xq):\n return Xq.dequantize()\n\n @staticmethod\n def from_float(mod):\n return DeQuantize()\n\n__all__ = [\n 'BatchNorm2d',\n 'BatchNorm3d',\n '_ConvNd',\n 'Conv1d',\n 'Conv2d',\n 'Conv3d',\n 'ConvTranspose1d',\n 'ConvTranspose2d',\n 'ConvTranspose3d',\n 'DeQuantize',\n 'ELU',\n 'Embedding',\n 'EmbeddingBag',\n 'GroupNorm',\n 'Hardswish',\n 'InstanceNorm1d',\n 'InstanceNorm2d',\n 'InstanceNorm3d',\n 'LayerNorm',\n 'LeakyReLU',\n 'Linear',\n 'MaxPool2d',\n 'Quantize',\n 'ReLU6',\n 'Sigmoid',\n # Wrapper modules\n 'FloatFunctional',\n 'FXFloatFunctional',\n 'QFunctional',\n]\n",
"import itertools\nfrom typing_extensions import Protocol\nimport warnings\n\nimport torch\nfrom ..parameter import is_lazy\n\n\nclass _LazyProtocol(Protocol):\n \"\"\"This is to avoid errors with mypy checks for\n The attributes in a mixin:\n https://mypy.readthedocs.io/en/latest/more_types.html#mixin-classes\n \"\"\"\n def _register_load_state_dict_pre_hook(self, hook):\n ...\n\n def register_forward_pre_hook(self, hook):\n ...\n\n def _lazy_load_hook(\n self, state_dict, prefix, local_metadata, strict,\n missing_keys, unexpected_keys, error_msgs):\n ...\n\n def _get_name(self):\n ...\n\n def _infer_parameters(self, module, input):\n ...\n\n @property\n def _parameters(self):\n ...\n\n @property\n def _buffers(self):\n ...\n\n @property\n def _non_persistent_buffers_set(self):\n ...\n\n @property\n def _load_hook(self):\n ...\n\n @property\n def _initialize_hook(self):\n ...\n\n\nclass LazyModuleMixin:\n r\"\"\"A mixin for modules that lazily initialize parameters, also known as \"lazy modules.\"\n\n .. warning:\n Lazy modules are an experimental new feature under active development,\n and their API is likely to change.\n\n Modules that lazily initialize parameters, or \"lazy modules\",\n derive the shapes of their parameters from the first input(s)\n to their forward method. Until that first forward they contain\n :class:`torch.nn.UninitializedParameter` s that should not be accessed\n or used, and afterward they contain regular :class:`torch.nn.Parameter` s.\n Lazy modules are convenient since they don't require computing some\n module arguments, like the :attr:`in_features` argument of a\n typical :class:`torch.nn.Linear`.\n\n After construction, networks with lazy modules should first\n be converted to the desired dtype and placed on the expected device.\n This is because lazy modules only perform shape inference so the usual dtype\n and device placement behavior applies.\n The lazy modules should then perform \"dry runs\" to initialize all the components in the module.\n These \"dry runs\" send inputs of the correct size, dtype, and device through\n the network and to each one of its lazy modules. After this the network can be used as usual.\n\n >>> class LazyMLP(torch.nn.Module):\n ... def __init__(self):\n ... super().__init__()\n ... self.fc1 = torch.nn.LazyLinear(10)\n ... self.relu1 = torch.nn.ReLU()\n ... self.fc2 = torch.nn.LazyLinear(1)\n ... self.relu2 = torch.nn.ReLU()\n ...\n ... def forward(self, input):\n ... x = self.relu1(self.fc1(input))\n ... y = self.relu2(self.fc2(x))\n ... return y\n >>> # constructs a network with lazy modules\n >>> lazy_mlp = LazyMLP()\n >>> # transforms the network's device and dtype\n >>> # NOTE: these transforms can and should be applied after construction and before any 'dry runs'\n >>> lazy_mlp = mlp.cuda().double()\n >>> lazy_mlp\n LazyMLP( (fc1): LazyLinear(in_features=0, out_features=10, bias=True)\n (relu1): ReLU()\n (fc2): LazyLinear(in_features=0, out_features=1, bias=True)\n (relu2): ReLU()\n )\n >>> # performs a dry run to initialize the network's lazy modules\n >>> lazy_mlp(torch.ones(10,10).cuda())\n >>> # after initialization, LazyLinear modules become regular Linear modules\n >>> lazy_mlp\n LazyMLP(\n (fc1): Linear(in_features=10, out_features=10, bias=True)\n (relu1): ReLU()\n (fc2): Linear(in_features=10, out_features=1, bias=True)\n (relu2): ReLU()\n )\n >>> # attaches an optimizer, since parameters can now be used as usual\n >>> optim = torch.optim.SGD(mlp.parameters(), lr=0.01)\n\n A final caveat when using lazy modules is that the order of initialization of a network's\n parameters may change, since the lazy modules are always initialized after other modules.\n For example, if the LazyMLP class defined above had a :class:`torch.nn.LazyLinear` module\n first and then a regular :class:`torch.nn.Linear` second, the second module would be\n initialized on construction and the first module would be initialized during the first dry run.\n This can cause the parameters of a network using lazy modules to be initialized differently\n than the parameters of a network without lazy modules as the order of parameter initializations,\n which often depends on a stateful random number generator, is different.\n Check :doc:`/notes/randomness` for more details.\n\n Lazy modules can be serialized with a state dict like other modules. For example:\n\n >>> lazy_mlp = LazyMLP()\n >>> # The state dict shows the uninitialized parameters\n >>> lazy_mlp.state_dict()\n OrderedDict([('fc1.weight', Uninitialized parameter),\n ('fc1.bias',\n tensor([-1.8832e+25, 4.5636e-41, -1.8832e+25, 4.5636e-41, -6.1598e-30,\n 4.5637e-41, -1.8788e+22, 4.5636e-41, -2.0042e-31, 4.5637e-41])),\n ('fc2.weight', Uninitialized parameter),\n ('fc2.bias', tensor([0.0019]))])\n\n\n Lazy modules can load regular :class:`torch.nn.Parameter` s (i.e. you can serialize/deserialize\n initialized LazyModules and they will remain initialized)\n\n\n >>> full_mlp = LazyMLP()\n >>> # Dry run to initialize another module\n >>> full_mlp.forward(torch.ones(10, 1))\n >>> # Load an initialized state into a lazy module\n >>> lazy_mlp.load_state_dict(full_mlp.state_dict())\n >>> # The state dict now holds valid values\n >>> lazy_mlp.state_dict()\n OrderedDict([('fc1.weight',\n tensor([[-0.3837],\n [ 0.0907],\n [ 0.6708],\n [-0.5223],\n [-0.9028],\n [ 0.2851],\n [-0.4537],\n [ 0.6813],\n [ 0.5766],\n [-0.8678]])),\n ('fc1.bias',\n tensor([-1.8832e+25, 4.5636e-41, -1.8832e+25, 4.5636e-41, -6.1598e-30,\n 4.5637e-41, -1.8788e+22, 4.5636e-41, -2.0042e-31, 4.5637e-41])),\n ('fc2.weight',\n tensor([[ 0.1320, 0.2938, 0.0679, 0.2793, 0.1088, -0.1795, -0.2301, 0.2807,\n 0.2479, 0.1091]])),\n ('fc2.bias', tensor([0.0019]))])\n\n Note, however, that the loaded parameters will not be replaced when doing a \"dry run\" if they are initialized\n when the state is loaded. This prevents using initialized modules in different contexts.\n \"\"\"\n\n # modules inheriting from this will change their __class__ to the specified\n # one after they are fully initialized\n cls_to_become = None\n\n def __init__(self: _LazyProtocol, *args, **kwargs):\n # Mypy doesnt like this super call in a mixin\n super().__init__(*args, **kwargs) # type: ignore\n self._load_hook = self._register_load_state_dict_pre_hook(self._lazy_load_hook)\n self._initialize_hook = self.register_forward_pre_hook(self._infer_parameters)\n warnings.warn('Lazy modules are a new feature under heavy development '\n 'so changes to the API or functionality can happen at any moment.')\n\n def _save_to_state_dict(self: _LazyProtocol, destination, prefix, keep_vars):\n # This should be ideally implemented as a hook,\n # but we should override `detach` in the UninitializedParameter to return itself\n # which is not clean\n for name, param in self._parameters.items():\n if param is not None:\n if not (is_lazy(param) or keep_vars):\n param = param.detach()\n destination[prefix + name] = param\n for name, buf in self._buffers.items():\n if buf is not None and name not in self._non_persistent_buffers_set:\n if not (is_lazy(buf) or keep_vars):\n buf = buf.detach()\n destination[prefix + name] = buf\n\n def _lazy_load_hook(\n self: _LazyProtocol, state_dict, prefix, local_metadata, strict,\n missing_keys, unexpected_keys, error_msgs):\n \"\"\"load_state_dict pre-hook function for lazy buffers and parameters.\n\n The purpose of this hook is to adjust the current state and/or\n ``state_dict`` being loaded so that a module instance serialized in\n both un/initialized state can be deserialized onto both un/initialized\n module instance.\n See comment in ``torch.nn.Module._register_load_state_dict_pre_hook``\n for the details of the hook specification.\n \"\"\"\n for name, param in itertools.chain(self._parameters.items(), self._buffers.items()):\n key = prefix + name\n if key in state_dict and param is not None:\n input_param = state_dict[key]\n if is_lazy(param):\n # The current parameter is not initialized but the one being loaded one is\n # create a new parameter based on the uninitialized one\n if not is_lazy(input_param):\n with torch.no_grad():\n param.materialize(input_param.shape)\n\n def initialize_parameters(self: _LazyProtocol, *args, **kwargs):\n r\"\"\"Initialize parameters according to the input batch properties.\n This adds an interface to isolate parameter initialization from the\n forward pass when doing parameter shape inference.\n \"\"\"\n raise NotImplementedError('initialize_parameters is not implemented for {}'.format(self.__class__.__name__))\n\n def has_uninitialized_params(self: _LazyProtocol):\n r\"\"\"Check if a module has parameters that are not initialized\n \"\"\"\n # This is to avoid the JIT to track this parameter and force\n # custom modules __setstate__ to add it\n params = self._parameters.values()\n buffers = self._buffers.values()\n for param in itertools.chain(params, buffers):\n if is_lazy(param):\n return True\n return False\n\n def _infer_parameters(self: _LazyProtocol, module, input):\n r\"\"\"Infers the size and initializes the parameters according to the\n provided input batch.\n Given a module that contains parameters that were declared inferrable\n using :class:`torch.nn.parameter.ParameterMode.Infer`, runs a forward pass\n in the complete module using the provided input to initialize all the parameters\n as needed.\n The module is set into evaluation mode before running the forward pass in order\n to avoid saving statistics or calculating gradients\n \"\"\"\n module.initialize_parameters(*input)\n if module.has_uninitialized_params():\n raise RuntimeError('module {} has not been fully initialized'.format(self._get_name()))\n module._initialize_hook.remove()\n module._load_hook.remove()\n delattr(module, '_initialize_hook')\n delattr(module, '_load_hook')\n if module.cls_to_become is not None:\n module.__class__ = module.cls_to_become\n\n\n def _replicate_for_data_parallel(self: _LazyProtocol):\n raise RuntimeError('Modules with uninitialized parameters can\\'t be used with `DataParallel`. '\n 'Run a dummy forward pass to correctly initialize the modules')\n",
"from collections import OrderedDict\nfrom typing import Dict, Any\n\nfrom tensorboard.compat.proto.config_pb2 import RunMetadata\nfrom tensorboard.compat.proto.graph_pb2 import GraphDef\nfrom tensorboard.compat.proto.step_stats_pb2 import StepStats, DeviceStepStats\nfrom tensorboard.compat.proto.versions_pb2 import VersionDef\n\nimport torch\nfrom ._proto_graph import node_proto\n\nmethods_OP = ['attributeNames', 'hasMultipleOutputs', 'hasUses', 'inputs',\n 'kind', 'outputs', 'outputsSize', 'scopeName']\n# Some additional methods to explure for methods_IO are\n#\n# 'unique' (type int)\n# 'type' (type <Tensor<class 'torch._C.Type'>>)\n#\n# But the below are sufficient for now.\nmethods_IO = ['node', 'offset', 'debugName']\n\nGETATTR_KIND = 'prim::GetAttr'\nCLASSTYPE_KIND = 'ClassType'\n\nclass NodeBase(object):\n def __init__(self, debugName=None, inputs=None, scope=None, tensor_size=None, op_type='UnSpecified', attributes=''):\n # TODO; Specify a __slots__ for this class or potentially\n # used namedtuple instead\n self.debugName = debugName\n self.inputs = inputs\n self.tensor_size = tensor_size\n self.kind = op_type\n self.attributes = attributes\n self.scope = scope\n\n def __repr__(self):\n repr = []\n repr.append(str(type(self)))\n for m in dir(self):\n if '__' not in m:\n repr.append(m + ': ' + str(getattr(self, m)) + str(type(getattr(self, m))))\n return '\\n'.join(repr) + '\\n\\n'\n\n\nclass NodePy(NodeBase):\n def __init__(self, node_cpp, valid_methods):\n super(NodePy, self).__init__(node_cpp)\n valid_methods = valid_methods[:]\n self.inputs = []\n\n for m in valid_methods:\n if m == 'inputs' or m == 'outputs':\n list_of_node = list(getattr(node_cpp, m)())\n io_unique_names = []\n io_tensor_sizes = []\n for n in list_of_node:\n io_unique_names.append(n.debugName())\n if n.isCompleteTensor():\n io_tensor_sizes.append(n.type().sizes())\n else:\n io_tensor_sizes.append(None)\n\n setattr(self, m, io_unique_names)\n setattr(self, m + 'tensor_size', io_tensor_sizes)\n\n else:\n setattr(self, m, getattr(node_cpp, m)())\n\n\nclass NodePyIO(NodePy):\n def __init__(self, node_cpp, input_or_output=None):\n super(NodePyIO, self).__init__(node_cpp, methods_IO)\n try:\n tensor_size = node_cpp.type().sizes()\n except RuntimeError:\n tensor_size = [1, ] # fail when constant model is used.\n self.tensor_size = tensor_size\n # Kind attribute string is purely descriptive and will be shown\n # in detailed information for the node in TensorBoard's graph plugin.\n #\n # NodePyOP nodes get this from their kind() method.\n self.kind = 'Parameter'\n if input_or_output:\n self.input_or_output = input_or_output\n self.kind = 'IO Node'\n\n\nclass NodePyOP(NodePy):\n def __init__(self, node_cpp):\n super(NodePyOP, self).__init__(node_cpp, methods_OP)\n # Replace single quote which causes strange behavior in TensorBoard\n # TODO: See if we can remove this in the future\n self.attributes = str({k: node_cpp[k] for k in node_cpp.attributeNames()}).replace(\"'\", ' ')\n self.kind = node_cpp.kind()\n\n\nclass GraphPy(object):\n \"\"\"Helper class to convert torch.nn.Module to GraphDef proto and visualization\n with TensorBoard.\n\n GraphDef generation operates in two passes:\n\n In the first pass, all nodes are read and saved to two lists.\n One list is for input/output nodes (nodes_io), which only have inbound\n or outbound connections, but not both. Another list is for internal\n operator nodes (nodes_op). The first pass also saves all scope name\n appeared in the nodes in scope_name_appeared list for later processing.\n\n In the second pass, scope names are fully applied to all nodes.\n debugNameToScopedName is a mapping from a node's ID to its fully qualified\n scope name. e.g. Net1/Linear[0]/1. Unfortunately torch.jit doesn't have\n totally correct scope output, so this is nontrivial. The function\n populate_namespace_from_OP_to_IO and find_common_root are used to\n assign scope name to a node based on the connection between nodes\n in a heuristic kind of way. Bookkeeping is done with shallowest_scope_name\n and scope_name_appeared.\n \"\"\"\n def __init__(self):\n self.nodes_op = []\n self.nodes_io = OrderedDict()\n self.unique_name_to_scoped_name = {}\n self.shallowest_scope_name = 'default'\n self.scope_name_appeared = []\n\n def append(self, x):\n if isinstance(x, NodePyIO):\n self.nodes_io[x.debugName] = x\n if isinstance(x, NodePyOP):\n self.nodes_op.append(x)\n\n def printall(self):\n print('all nodes')\n for node in self.nodes_op:\n print(node)\n for key in self.nodes_io:\n print(self.nodes_io[key])\n\n def find_common_root(self):\n for fullscope in self.scope_name_appeared:\n if fullscope:\n self.shallowest_scope_name = fullscope.split('/')[0]\n\n def populate_namespace_from_OP_to_IO(self):\n for node in self.nodes_op:\n for node_output, outputSize in zip(node.outputs, node.outputstensor_size):\n self.scope_name_appeared.append(node.scopeName)\n self.nodes_io[node_output] = NodeBase(node_output,\n node.inputs,\n node.scopeName,\n outputSize,\n op_type=node.kind,\n attributes=node.attributes)\n\n self.find_common_root()\n\n for node in self.nodes_op:\n for input_node_id in node.inputs:\n self.unique_name_to_scoped_name[input_node_id] = node.scopeName + '/' + input_node_id\n\n for key, node in self.nodes_io.items():\n if type(node) == NodeBase:\n self.unique_name_to_scoped_name[key] = node.scope + '/' + node.debugName\n if hasattr(node, 'input_or_output'):\n self.unique_name_to_scoped_name[key] = node.input_or_output + '/' + node.debugName\n\n if hasattr(node, 'scope') and node.scope is not None:\n self.unique_name_to_scoped_name[key] = node.scope + '/' + node.debugName\n if node.scope == '' and self.shallowest_scope_name:\n self.unique_name_to_scoped_name[node.debugName] = self.shallowest_scope_name + '/' + node.debugName\n\n # replace name\n for key, node in self.nodes_io.items():\n self.nodes_io[key].inputs = [self.unique_name_to_scoped_name[node_input_id] for node_input_id in node.inputs]\n if node.debugName in self.unique_name_to_scoped_name:\n self.nodes_io[key].debugName = self.unique_name_to_scoped_name[node.debugName]\n\n def to_proto(self):\n \"\"\"\n Converts graph representation of GraphPy object to TensorBoard\n required format.\n \"\"\"\n # TODO: compute correct memory usage and CPU time once\n # PyTorch supports it\n nodes = []\n for v in self.nodes_io.values():\n nodes.append(node_proto(v.debugName,\n input=v.inputs,\n outputsize=v.tensor_size,\n op=v.kind,\n attributes=v.attributes))\n return nodes\n\n\ndef parse(graph, trace, args=None, omit_useless_nodes=True):\n \"\"\"This method parses an optimized PyTorch model graph and produces\n a list of nodes and node stats for eventual conversion to TensorBoard\n protobuf format.\n\n Args:\n graph (PyTorch module): The model graph to be parsed.\n trace (PyTorch JIT TracedModule): The model trace to be parsed.\n args (tuple): input tensor[s] for the model.\n omit_useless_nodes (boolean): Whether to remove nodes from the graph.\n \"\"\"\n n_inputs = len(args)\n\n scope = {}\n nodes_py = GraphPy()\n for node in graph.inputs():\n if omit_useless_nodes:\n if len(node.uses()) == 0: # number of user of the node (= number of outputs/ fanout)\n continue\n\n if node.type().kind() != CLASSTYPE_KIND:\n nodes_py.append(NodePyIO(node, 'input'))\n\n attr_to_scope: Dict[Any, str] = dict()\n for node in graph.nodes():\n if node.kind() == GETATTR_KIND:\n attr_name = node.s('name')\n parent = node.input().node()\n if parent.kind() == GETATTR_KIND: # If the parent node is not the top-level \"self\" node\n parent_attr_name = parent.s('name')\n parent_scope = attr_to_scope[parent_attr_name]\n attr_scope = parent_scope.split('/')[-1]\n attr_to_scope[attr_name] = '{}/{}.{}'.format(parent_scope, attr_scope, attr_name)\n else:\n attr_to_scope[attr_name] = '__module.{}'.format(attr_name)\n # We don't need classtype nodes; scope will provide this information\n if node.output().type().kind() != CLASSTYPE_KIND:\n node_py = NodePyOP(node)\n node_py.scopeName = attr_to_scope[attr_name] # type: ignore\n nodes_py.append(node_py)\n else:\n nodes_py.append(NodePyOP(node))\n\n for i, node in enumerate(graph.outputs()): # Create sink nodes for output ops\n node_pyio = NodePyIO(node, 'output')\n node_pyio.debugName = \"output.{}\".format(i + 1)\n node_pyio.inputs = [node.debugName()]\n nodes_py.append(node_pyio)\n\n def parse_traced_name(module):\n if isinstance(module, torch.jit.TracedModule):\n module_name = module._name\n else:\n module_name = getattr(module, 'original_name', \"Module\")\n return module_name\n\n alias_to_name = dict()\n base_name = parse_traced_name(trace)\n for name, module in trace.named_modules(prefix='__module'):\n mod_name = parse_traced_name(module)\n attr_name = name.split('.')[-1]\n alias_to_name[name] = '{}[{}]'.format(mod_name, attr_name)\n\n for node in nodes_py.nodes_op:\n module_aliases = node.scopeName.split('/')\n replacements = [\n alias_to_name[alias]\n if alias in alias_to_name\n else alias.split('.')[-1]\n for alias in module_aliases\n ]\n node.scopeName = base_name\n if any(replacements):\n node.scopeName += '/' + '/'.join(replacements)\n\n nodes_py.populate_namespace_from_OP_to_IO()\n return nodes_py.to_proto()\n\n\ndef graph(model, args, verbose=False):\n \"\"\"\n This method processes a PyTorch model and produces a `GraphDef` proto\n that can be logged to TensorBoard.\n\n Args:\n model (PyTorch module): The model to be parsed.\n args (tuple): input tensor[s] for the model.\n verbose (bool): Whether to print out verbose information while\n processing.\n \"\"\"\n with torch.onnx.select_model_mode_for_export(model, torch.onnx.TrainingMode.EVAL): # TODO: move outside of torch.onnx?\n try:\n trace = torch.jit.trace(model, args)\n graph = trace.graph\n torch._C._jit_pass_inline(graph)\n except RuntimeError as e:\n print(e)\n print('Error occurs, No graph saved')\n raise e\n\n if verbose:\n print(graph)\n list_of_nodes = parse(graph, trace, args)\n # We are hardcoding that this was run on CPU even though it might have actually\n # run on GPU. Note this is what is shown in TensorBoard and has no bearing\n # on actual execution.\n # TODO: See if we can extract GPU vs CPU information from the PyTorch model\n # and pass it correctly to TensorBoard.\n #\n # Definition of StepStats and DeviceStepStats can be found at\n # https://github.com/tensorflow/tensorboard/blob/master/tensorboard/plugins/graph/tf_graph_common/test/graph-test.ts\n # and\n # https://github.com/tensorflow/tensorboard/blob/master/tensorboard/compat/proto/step_stats.proto\n stepstats = RunMetadata(step_stats=StepStats(dev_stats=[DeviceStepStats(device=\"/device:CPU:0\")]))\n return GraphDef(node=list_of_nodes, versions=VersionDef(producer=22)), stepstats\n # The producer version has been reverse engineered from standard\n # TensorBoard logged data.\n"
] | [
[
"torch.tensor"
],
[
"torch.no_grad"
],
[
"torch.onnx.select_model_mode_for_export",
"torch._C._jit_pass_inline",
"torch.jit.trace"
]
] |
hirano1412/bdpy | [
"cee6f36dcdf4f4d29fc3a6980777e1c3d7c66cbb"
] | [
"test/test_preproc.py"
] | [
"'''Tests for bdpy.preprocessor'''\n\n\nfrom unittest import TestCase, TestLoader, TextTestRunner\n\nimport numpy as np\nfrom scipy.signal import detrend\n\nfrom bdpy import preproc\n\n\nclass TestPreprocessor(TestCase):\n '''Tests of 'preprocessor' module'''\n\n @classmethod\n def test_average_sample(cls):\n '''Test for average_sample'''\n\n x = np.random.rand(10, 100)\n group = np.array([1, 1, 1, 1, 1, 2, 2, 2, 2, 2])\n\n exp_output_x = np.vstack((np.average(x[0:5, :], axis=0),\n np.average(x[5:10, :], axis=0)))\n exp_output_ind = np.array([0, 5])\n\n test_output_x, test_output_ind = preproc.average_sample(x, group,\n verbose=True)\n\n np.testing.assert_array_equal(test_output_x, exp_output_x)\n np.testing.assert_array_equal(test_output_ind, exp_output_ind)\n\n @classmethod\n def test_detrend_sample_default(cls):\n '''Test for detrend_sample (default)'''\n\n x = np.random.rand(20, 10)\n group = np.array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1,\n 2, 2, 2, 2, 2, 2, 2, 2, 2, 2])\n\n exp_output = np.vstack((detrend(x[0:10, :], axis=0, type='linear')\n + np.mean(x[0:10, :], axis=0),\n detrend(x[10:20, :], axis=0, type='linear')\n + np.mean(x[10:20, :], axis=0)))\n\n test_output = preproc.detrend_sample(x, group, verbose=True)\n\n np.testing.assert_array_equal(test_output, exp_output)\n\n @classmethod\n def test_detrend_sample_nokeepmean(cls):\n '''Test for detrend_sample (keep_mean=False)'''\n\n x = np.random.rand(20, 10)\n group = np.array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1,\n 2, 2, 2, 2, 2, 2, 2, 2, 2, 2])\n\n exp_output = np.vstack((detrend(x[0:10, :], axis=0, type='linear'),\n detrend(x[10:20, :], axis=0, type='linear')))\n\n test_output = preproc.detrend_sample(x, group, keep_mean=False,\n verbose=True)\n\n np.testing.assert_array_equal(test_output, exp_output)\n\n @classmethod\n def test_normalize_sample(cls):\n '''Test for normalize_sample (default)'''\n\n x = np.random.rand(20, 10)\n group = np.array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1,\n 2, 2, 2, 2, 2, 2, 2, 2, 2, 2])\n\n mean_a = np.mean(x[0:10, :], axis=0)\n mean_b = np.mean(x[10:20, :], axis=0)\n\n exp_output = np.vstack((100 * (x[0:10, :] - mean_a) / mean_a,\n 100 * (x[10:20, :] - mean_b) / mean_b))\n\n test_output = preproc.normalize_sample(x, group, verbose=True)\n\n np.testing.assert_array_equal(test_output, exp_output)\n\n @classmethod\n def test_shift_sample_singlegroup(cls):\n '''Test for shift_sample (single group, shift_size=1)'''\n\n x = np.array([[1, 2, 3],\n [11, 12, 13],\n [21, 22, 23],\n [31, 32, 33],\n [41, 42, 43]])\n grp = np.array([1, 1, 1, 1, 1])\n\n exp_output_data = np.array([[11, 12, 13],\n [21, 22, 23],\n [31, 32, 33],\n [41, 42, 43]])\n exp_output_ind = [0, 1, 2, 3]\n\n # Default shift_size = 1\n test_output_data, test_output_ind = preproc.shift_sample(x, grp,\n verbose=True)\n\n np.testing.assert_array_equal(test_output_data, exp_output_data)\n np.testing.assert_array_equal(test_output_ind, exp_output_ind)\n\n @classmethod\n def test_shift_sample_twogroup(cls):\n '''Test for shift_sample (two groups, shift_size=1)'''\n\n x = np.array([[1, 2, 3],\n [11, 12, 13],\n [21, 22, 23],\n [31, 32, 33],\n [41, 42, 43],\n [51, 52, 53]])\n grp = np.array([1, 1, 1, 2, 2, 2])\n\n exp_output_data = np.array([[11, 12, 13],\n [21, 22, 23],\n [41, 42, 43],\n [51, 52, 53]])\n exp_output_ind = [0, 1, 3, 4]\n\n # Default shift_size=1\n test_output_data, test_output_ind = preproc.shift_sample(x, grp,\n verbose=True)\n\n np.testing.assert_array_equal(test_output_data, exp_output_data)\n np.testing.assert_array_equal(test_output_ind, exp_output_ind)\n\n @classmethod\n def test_select_top_default(cls):\n '''Test for select_top (default, axis=0)'''\n\n test_data = np.array([[1, 2, 3, 4, 5],\n [11, 12, 13, 14, 15],\n [21, 22, 23, 24, 25],\n [31, 32, 33, 34, 35],\n [41, 42, 43, 44, 45]])\n test_value = np.array([15, 3, 6, 20, 0])\n test_num = 3\n\n exp_output_data = np.array([[1, 2, 3, 4, 5],\n [21, 22, 23, 24, 25],\n [31, 32, 33, 34, 35]])\n exp_output_index = np.array([0, 2, 3])\n\n test_output_data, test_output_index = preproc.select_top(test_data,\n test_value,\n test_num)\n\n np.testing.assert_array_equal(test_output_data, exp_output_data)\n np.testing.assert_array_equal(test_output_index, exp_output_index)\n\n @classmethod\n def test_select_top_axisone(cls):\n '''Test for select_top (axis=1)'''\n\n test_data = np.array([[1, 2, 3, 4, 5],\n [11, 12, 13, 14, 15],\n [21, 22, 23, 24, 25],\n [31, 32, 33, 34, 35],\n [41, 42, 43, 44, 45]])\n test_value = np.array([15, 3, 6, 20, 0])\n test_num = 3\n\n exp_output_data = np.array([[1, 3, 4],\n [11, 13, 14],\n [21, 23, 24],\n [31, 33, 34],\n [41, 43, 44]])\n exp_output_index = np.array([0, 2, 3])\n\n test_output_data, test_output_index = preproc.select_top(test_data,\n test_value,\n test_num,\n axis=1)\n\n np.testing.assert_array_equal(test_output_data, exp_output_data)\n np.testing.assert_array_equal(test_output_index, exp_output_index)\n\n\nif __name__ == '__main__':\n test_suite = TestLoader().loadTestsFromTestCase(TestPreprocessor)\n TextTestRunner(verbosity=2).run(test_suite)\n"
] | [
[
"numpy.vstack",
"scipy.signal.detrend",
"numpy.testing.assert_array_equal",
"numpy.random.rand",
"numpy.array",
"numpy.average",
"numpy.mean"
]
] |
ogarokpeter/gene_network_sirius_2019 | [
"419cc430dbde4332acf5cd6eb5cfa669270c53af"
] | [
"RankAggregation/SimpleRankAggregation.py"
] | [
"# RUN WITH /usr/bin/python3 minet.py (python 3.6)\n\nimport sys\nimport numpy as np\nfrom sklearn.metrics import roc_curve, auc\nimport pandas as pd\n\n\ndef compute_aggregated_matrix(matrixfiles_num, matrixfiles, savematrixfile, saveresultfile, coeffs=[1, 1, 1, 1]):\n # matrixfiles_num = int(sys.argv[1])\n # matrixfiles = [sys.argv[i] for i in range(2, matrixfiles_num + 2)]\n # savematrixfile = sys.argv[matrixfiles_num + 2]\n # saveresultfile = sys.argv[matrixfiles_num + 3]\n matrices = [pd.read_csv(f, index_col=0, sep='\\t') for f in matrixfiles]\n genes = matrices[0].index\n # print(genes)\n\n # print(matrices)\n sz = len(matrices[0])\n for matrix in matrices:\n assert len(matrix) == sz\n\n for matrix in matrices:\n for column in matrix:\n temp = matrix[column].argsort()\n ranks = np.empty_like(temp)\n ranks[temp] = np.arange(len(matrix[column]))\n matrix[column] = ranks\n \n res = np.zeros(shape=(sz, sz))\n for s in range(sz):\n for i, matrix in enumerate(matrices):\n res[s] += matrix.iloc[:, s].values * coeffs[i]\n res[s] /= len(matrices)\n\n for row in res:\n row /= row.sum()\n\n result_df = pd.DataFrame(res, columns=genes, index=genes)\n \n result_df.to_csv(saveresultfile, index=True, header=True, sep='\\t')\n # print(result_df)\n return result_df\n\n\nmatricesdirname = \"/home/user/Sirius/gene_network_sirius_2019/Matrices_1\"\nsavematricesdirname = \"/home/user/Sirius/gene_network_sirius_2019/Matrices_6\"\npredictedfilename = matricesdirname + \"/{1}_{0}_predicted.txt\"\ntruefilename = matricesdirname + \"/{1}_{0}_true.txt\"\nsavematricesfilename = savematricesdirname + \"/{0}_predicted.txt\"\n# datalist = ['exps_10', 'exps_10_2', 'exps_10_bgr', 'exps_50', 'exps_50_2', 'exps_50_bgr', 'exps_100', 'exps_100_2', 'exps_100_bgr', 'genes_200_exps_10_bgr', 'genes_400_exps_10_bgr', 'genes_600_exps_10_bgr', 'genes_700_exps_10_bgr', 'genes_1000_exps_10_bgr']\ndatalist = ['genes_200_exps_10_bgr', 'genes_200_exps_20_bgr', 'genes_200_exps_40_bgr', 'genes_400_exps_10_bgr', 'genes_400_exps_40_bgr', 'genes_400_exps_80_bgr', 'genes_500_exps_10_bgr', 'genes_500_exps_50_bgr', 'genes_500_exps_100_bgr']\nalgolist = ['aracne', 'mrnet', 'mrnetb']\nsaveresultsfile = \"/home/user/Sirius/gene_network_sirius_2019/RankAggregation/res_arrgeg_on_petr_big_data_many_exps.txt\"\ntmpfile = \"/home/user/Sirius/gene_network_sirius_2019/RankAggregation/data/tmp5.txt\"\n\n\nif __name__ == \"__main__\":\n results = np.zeros(shape=(len(datalist)))\n\n for i, dataname in enumerate(datalist):\n\n true_df = pd.read_csv(truefilename.format(dataname, algolist[1]), index_col=0, sep='\\t')\n predicted_df = compute_aggregated_matrix(len(algolist), [predictedfilename.format(dataname, algo) for algo in algolist], tmpfile, savematricesfilename.format(dataname))\n true_df.to_csv(savematricesdirname + \"/{0}_true.txt\".format(dataname), index=True, header=True, sep='\\t')\n # print(true_df)\n\n true_array = true_df.values[np.triu_indices(true_df.values.shape[0], k=1)]\n predicted_array = predicted_df.values[np.triu_indices(predicted_df.values.shape[0], k=1)]\n \n roc_auc = 0\n # try:\n # fpr, tpr, thresholds = roc_curve(true_array, predicted_array)\n # roc_auc = auc(fpr, tpr)\n # except:\n # print(\"error\", dataname, algo)\n fpr, tpr, thresholds = roc_curve(true_array, predicted_array)\n roc_auc = auc(fpr, tpr)\n results[i] = roc_auc\n\n with open(savematricesdirname + \"/{0}_auc.txt\".format(dataname), 'w') as f:\n f.write(str(roc_auc) + '\\n')\n print(\"done\", dataname, results[i])\n with open(saveresultsfile, \"a\") as f:\n f.write(\"done \" + dataname + str(results[i]))\n \n # print(\"done\", dataname, algo)\n\n print(results)\n\n"
] | [
[
"numpy.zeros",
"pandas.read_csv",
"sklearn.metrics.roc_curve",
"sklearn.metrics.auc",
"pandas.DataFrame",
"numpy.triu_indices",
"numpy.empty_like"
]
] |
Yoshi-0921/MAEXP | [
"cc03fdd46db9b1838df8f7782b4bd1b2bb3f11d5"
] | [
"core/agents/models/customs/da3.py"
] | [
"\"\"\"Source code for distributed attentional actor architecture (DA3) model.\n\nAuthor: Yoshinari Motokawa <yoshinari.moto@fuji.waseda.jp>\n\"\"\"\nfrom typing import List\n\nimport torch\nfrom core.utils.logging import initialize_logging\nfrom omegaconf import DictConfig\nfrom torch import nn\n\nfrom ..hard_shrink_attention import HardShrinkBlock\nfrom ..vit import Block, PatchEmbed\n\nlogger = initialize_logging(__name__)\n\n\nclass DA3(nn.Module):\n def __init__(self, config: DictConfig, input_shape: List[int], output_size: int):\n super().__init__()\n patched_size_x = input_shape[1] // config.model.patch_size\n patched_size_y = input_shape[2] // config.model.patch_size\n self.view_method = config.observation_area_mask\n\n self.patch_embed = PatchEmbed(\n patch_size=config.model.patch_size,\n in_chans=input_shape[0],\n embed_dim=config.model.embed_dim,\n )\n\n self.saliency_vector = nn.Parameter(torch.zeros(1, 1, config.model.embed_dim))\n self.pos_embed = nn.Parameter(\n torch.zeros(1, patched_size_x * patched_size_y + 1, config.model.embed_dim)\n )\n\n block = HardShrinkBlock if config.model.attention == \"hard\" else Block\n self.blocks = nn.ModuleList(\n [\n block(\n dim=config.model.embed_dim,\n num_heads=config.model.num_heads,\n mlp_ratio=config.model.mlp_ratio,\n **{\"af_lambd\": config.model.af_lambd}\n )\n for _ in range(config.model.block_loop)\n ]\n )\n\n self.norm = nn.LayerNorm(config.model.embed_dim)\n self.head = nn.Linear(config.model.embed_dim, output_size)\n\n def forward(self, state):\n x = self.state_encoder(state)\n\n out = self.patch_embed(x)\n saliency_vector = self.saliency_vector.expand(out.shape[0], -1, -1)\n out = torch.cat((saliency_vector, out), dim=1)\n out = out + self.pos_embed\n\n for blk in self.blocks:\n out = blk(out)\n\n out = self.norm(out)\n out = out[:, 0]\n\n out = self.head(out)\n\n return out\n\n def forward_attn(self, state):\n x = self.state_encoder(state)\n\n out = self.patch_embed(x)\n saliency_vector = self.saliency_vector.expand(out.shape[0], -1, -1)\n out = torch.cat((saliency_vector, out), dim=1)\n out = out + self.pos_embed\n\n attns = list()\n for blk in self.blocks:\n out, attn = blk.forward_attn(out)\n attns.append(attn.detach())\n\n out = self.norm(out)\n out = out[:, 0]\n\n out = self.head(out)\n\n return out, [attns]\n\n def state_encoder(self, state):\n\n return state[self.view_method]\n"
] | [
[
"torch.zeros",
"torch.nn.LayerNorm",
"torch.nn.Linear",
"torch.cat"
]
] |
joeferg425/ws281x_lightberries | [
"c6a5a3ffeeb3642b34e3e6e3b759af9e4725efce"
] | [
"LightBerries/LightStrings.py"
] | [
"\"\"\"Defines basic light string data and functions.\"\"\"\nimport os\nimport sys\nimport atexit\nimport inspect\nimport time\nimport logging\nfrom typing import Any, Optional, Sequence, Union, overload\nfrom nptyping import NDArray\nimport numpy as np\nfrom LightBerries.LightBerryExceptions import LightStringException\nfrom LightBerries.RpiWS281xPatch import rpi_ws281x\nfrom LightBerries.LightPixels import Pixel, PixelColors\n\nLOGGER = logging.getLogger(\"LightBerries\")\n\n\nclass LightString(Sequence[np.int_]):\n \"\"\"Defines basic LED array data and functions.\"\"\"\n\n def __init__(\n self,\n ledCount: Optional[int] = None,\n pixelStrip: rpi_ws281x.PixelStrip = None,\n simulate: bool = False,\n ) -> None:\n \"\"\"Creates a pixel array using the rpipixelStrip library and Pixels.\n\n Args:\n ledCount: the number of LEDs desired in the LightString\n pixelStrip: the ws281x object that actually controls the LED signaling\n simulate: dont use GPIO\n\n Raises:\n Warning: if something unexpected could happen\n SystemExit: if exiting\n KeyboardInterrupt: if user quits\n LightStringException: if something bad happens\n \"\"\"\n # cant run GPIO stuff without root, tell the user if they forgot\n # linux check is just for debugging with fake GPIO on windows\n if sys.platform == \"linux\" and not os.getuid() == 0: # pylint: disable = no-member\n raise LightStringException(\n \"GPIO functionality requires root privilege. Please run command again as root\"\n )\n\n # catch error cases first\n if ledCount is None and pixelStrip is None and simulate is False:\n raise LightStringException(\n \"Cannot create LightString object without ledCount or \" + \"pixelStrip object being specified\"\n )\n # catch error cases first\n # if ledCount is not None and pixelStrip is not None:\n # raise Warning(\n # \"ledCount is overridden when pixelStrip is and ledcount \"\n # + \"are both passed to LightString constructor\"\n # )\n\n try:\n self.simulate = simulate\n # use passed led count if it is valid\n if ledCount is not None:\n self._ledCount = ledCount\n\n # used passed pixel strip if it is not none\n if pixelStrip is not None:\n self.pixelStrip = pixelStrip\n self.pixelStrip.begin()\n self._ledCount = self.pixelStrip.numPixels()\n LOGGER.debug(\n \"%s.%s Created WS281X object\",\n self.__class__.__name__,\n inspect.stack()[0][3],\n )\n except SystemExit: # pylint:disable=try-except-raise\n raise\n except KeyboardInterrupt: # pylint:disable=try-except-raise\n raise\n except Exception as ex:\n LOGGER.exception(\n \"%s.%s Exception: %s\",\n self.__class__.__name__,\n inspect.stack()[0][3],\n ex,\n )\n raise LightStringException(str(ex)).with_traceback(ex.__traceback__)\n\n try:\n # validate led count\n if not isinstance(self._ledCount, int):\n raise LightStringException(\n f'Cannot create LightString object with LED count \"{self._ledCount}\"',\n )\n # if led count is good, create our pixel sequence\n self.rgbArray: NDArray[(3, Any), np.int32] = np.zeros((self._ledCount, 3))\n self.rgbArray[:] = np.array([Pixel().array for i in range(self._ledCount)])\n LOGGER.debug(\n \"%s.%s Created Numpy Light array\",\n self.__class__.__name__,\n inspect.stack()[0][3],\n )\n except SystemExit: # pylint:disable=try-except-raise\n raise\n except KeyboardInterrupt: # pylint:disable=try-except-raise\n raise\n except Exception as ex:\n LOGGER.exception(\n \"%s.%s Exception: %s\",\n self.__class__.__name__,\n inspect.stack()[0][3],\n ex,\n )\n raise LightStringException(str(ex)).with_traceback(ex.__traceback__)\n\n # try to force cleanup of underlying c objects when user exits\n atexit.register(self.__del__)\n\n def __del__(\n self,\n ) -> None:\n \"\"\"Properly disposes of the rpipixelStrip object.\n\n Prevents memory leaks (hopefully) that were happening in the rpi.PixelStrip module.\n\n Raises:\n SystemExit: if exiting\n KeyboardInterrupt: if user quits\n LightStringException: if something bad happens\n \"\"\"\n # check if pixel strip has been created\n if isinstance(self.pixelStrip, rpi_ws281x.PixelStrip):\n # turn off leds\n self.off()\n # cleanup c memory usage\n try:\n self.pixelStrip._cleanup()\n except SystemExit: # pylint:disable=try-except-raise\n raise\n except KeyboardInterrupt: # pylint:disable=try-except-raise\n raise\n except Exception as ex:\n LOGGER.exception(\"Failed to clean up WS281X object: %s\", str(ex))\n raise LightStringException(str(ex)).with_traceback(ex.__traceback__)\n\n def __len__(\n self,\n ) -> int:\n \"\"\"Return length of the light string (the number of LEDs).\n\n Returns:\n the number of LEDs in the array\n \"\"\"\n if self.rgbArray is not None:\n return len(self.rgbArray)\n else:\n return 0\n\n @overload\n def __getitem__( # noqa D105\n self,\n idx: int,\n ) -> NDArray[(3,), np.int32]:\n ... # pylint: disable=pointless-statement\n\n @overload\n def __getitem__( # noqa D105 # pylint: disable=function-redefined\n self,\n s: slice,\n ) -> NDArray[(3, Any), np.int32]:\n ... # pylint: disable=pointless-statement\n\n def __getitem__( # pylint: disable=function-redefined\n self, key: Union[int, slice]\n ) -> Union[NDArray[(3,), np.int32], NDArray[(3, Any), np.int32]]:\n \"\"\"Return a LED index or slice from LED array.\n\n Args:\n key: an index of a single LED, or a slice specifying a range of LEDs\n\n Returns:\n the LED value or values as requested\n\n Raises:\n SystemExit: if exiting\n KeyboardInterrupt: if user quits\n LightStringException: if something bad happens\n \"\"\"\n try:\n if isinstance(self.rgbArray, np.ndarray):\n return self.rgbArray[key].array\n else:\n raise LightStringException(\"Cannot index into uninitialized LightString object\")\n except SystemExit: # pylint:disable=try-except-raise\n raise\n except KeyboardInterrupt: # pylint:disable=try-except-raise\n raise\n except Exception as ex:\n LOGGER.exception('Failed to get key \"%s\" from %s: %s', key, self.rgbArray, ex)\n raise LightStringException(str(ex)).with_traceback(ex.__traceback__)\n\n def __setitem__(\n self,\n key: Union[int, slice],\n value: Union[NDArray[(3,), np.int32], NDArray[(3, Any), np.int32]],\n ) -> None:\n \"\"\"Set LED value(s) in the array.\n\n Args:\n key: the index or slice specifying one or more LED indices\n value: the RGB value or values to assign to the given LED indices\n\n Raises:\n SystemExit: if exiting\n KeyboardInterrupt: if user quits\n LightStringException: if something bad happens\n \"\"\"\n try:\n if isinstance(self.rgbArray, np.ndarray):\n if isinstance(key, slice):\n if isinstance(value, np.ndarray):\n self.rgbArray.__setitem__(key, value)\n elif isinstance(value, Sequence):\n self.rgbArray.__setitem__(key, [Pixel(v).array for v in value])\n else:\n raise LightStringException(\n \"Cannot assign multiple indices of LightString using a single value\"\n )\n else:\n if isinstance(value, np.ndarray):\n self.rgbArray.__setitem__(key, value)\n elif isinstance(value, Pixel):\n self.rgbArray.__setitem__(key, Pixel(value).array)\n else:\n raise LightStringException(\n \"Cannot assign single index of LightString using multiple values\"\n )\n else:\n raise LightStringException(\"Cannot index into uninitialized LightString object\")\n except SystemExit: # pylint:disable=try-except-raise\n raise\n except KeyboardInterrupt: # pylint:disable=try-except-raise\n raise\n except Exception as ex:\n LOGGER.exception(\"Failed to set light %s to value %s: %s\", key, value, ex)\n raise LightStringException(str(ex)).with_traceback(ex.__traceback__)\n\n def __enter__(\n self,\n ) -> \"LightString\":\n \"\"\"Get an instance of this object object.\n\n Returns:\n an instance of LightString\n \"\"\"\n return self\n\n def __exit__(\n self,\n *args,\n ) -> None:\n \"\"\"Cleanup the instance of this object.\n\n Args:\n args: ignored\n \"\"\"\n self.__del__()\n\n def off(\n self,\n ) -> None:\n \"\"\"Turn all of the LEDs in the LightString off.\n\n Raises:\n SystemExit: if exiting\n KeyboardInterrupt: if user quits\n LightStringException: if something bad happens\n \"\"\"\n for index in range(len(self.rgbArray)):\n try:\n self[index] = PixelColors.OFF.array\n except SystemExit: # pylint:disable=try-except-raise\n raise\n except KeyboardInterrupt: # pylint:disable=try-except-raise\n raise\n except Exception as ex:\n LOGGER.exception(\n \"Failed to set pixel %s in WS281X to value %s: %s\",\n index,\n LightString(0),\n ex,\n )\n raise LightStringException(str(ex)).with_traceback(ex.__traceback__)\n self.refresh()\n\n def refresh(\n self,\n ) -> None:\n \"\"\"Update the ws281x signal using the numpy array.\n\n Raises:\n SystemExit: if exiting\n KeyboardInterrupt: if user quits\n LightStringException: if something bad happens\n \"\"\"\n try:\n # define callback for map method (fast iterator)\n if self.simulate is False:\n\n def SetPixel(irgb):\n try:\n i = irgb[0]\n rgb = irgb[1]\n value = (int(rgb[0]) << 16) + (int(rgb[1]) << 8) + int(rgb[2])\n self.pixelStrip.setPixelColor(i, value)\n except SystemExit: # pylint:disable=try-except-raise\n raise\n except KeyboardInterrupt: # pylint:disable=try-except-raise\n raise\n except Exception as ex:\n LOGGER.exception(\n \"Failed to set pixel %d in WS281X to value %d: %s\",\n i,\n value,\n str(ex),\n )\n raise LightStringException(str(ex)).with_traceback(ex.__traceback__)\n\n # copy this class's array into the ws281x array\n if self.simulate is False:\n list(\n map(\n SetPixel,\n enumerate(self.rgbArray),\n )\n )\n # send the signal out\n self.pixelStrip.show()\n except SystemExit: # pylint:disable=try-except-raise\n raise\n except KeyboardInterrupt: # pylint:disable=try-except-raise\n raise\n except Exception as ex:\n LOGGER.exception('Function call \"show\" in WS281X object failed: %s', str(ex))\n raise LightStringException(str(ex)).with_traceback(ex.__traceback__)\n\n\nif __name__ == \"__main__\":\n LOGGER.info(\"Running LightString\")\n # the number of pixels in the light string\n PIXEL_COUNT = 100\n # GPIO pin to use for PWM signal\n GPIO_PWM_PIN = 18\n # DMA channel\n DMA_CHANNEL = 5\n # frequency to run the PWM signal at\n PWM_FREQUENCY = 800000\n GAMMA = None\n LED_STRIP_TYPE = None\n INVERT = False\n PWM_CHANNEL = 0\n with LightString(\n pixelStrip=rpi_ws281x.PixelStrip(\n num=PIXEL_COUNT,\n pin=GPIO_PWM_PIN,\n dma=DMA_CHANNEL,\n freq_hz=PWM_FREQUENCY,\n channel=PWM_CHANNEL,\n invert=INVERT,\n gamma=GAMMA,\n strip_type=LED_STRIP_TYPE,\n ),\n ) as liteStr:\n liteStr.refresh()\n p = Pixel((255, 0, 0))\n liteStr[4] = PixelColors.RED\n liteStr.refresh()\n time.sleep(1)\n"
] | [
[
"numpy.zeros"
]
] |
xadupre/mlprodict | [
"f82c8a26a60104948c67849b1c4af95ca812c153"
] | [
"mlprodict/onnxrt/ops_cpu/op_solve.py"
] | [
"# -*- encoding: utf-8 -*-\n# pylint: disable=E0203,E1101,C0111\n\"\"\"\n@file\n@brief Runtime operator.\n\"\"\"\nfrom scipy.linalg import solve\nfrom ._op import OpRunBinaryNum\nfrom ._new_ops import OperatorSchema\n\n\nclass Solve(OpRunBinaryNum):\n\n atts = {'lower': False,\n 'transposed': False}\n\n def __init__(self, onnx_node, desc=None, **options):\n OpRunBinaryNum.__init__(self, onnx_node, desc=desc,\n expected_attributes=Solve.atts,\n **options)\n\n def _find_custom_operator_schema(self, op_name):\n if op_name == \"Solve\":\n return SolveSchema()\n raise RuntimeError( # pragma: no cover\n \"Unable to find a schema for operator '{}'.\".format(op_name))\n\n def _run(self, a, b): # pylint: disable=W0221\n if self.inplaces.get(1, False):\n return (solve(a, b, overwrite_b=True, lower=self.lower,\n transposed=self.transposed), )\n return (solve(a, b, lower=self.lower, transposed=self.transposed), )\n\n def _infer_shapes(self, a, b): # pylint: disable=W0221\n \"\"\"\n Returns the shapes.\n \"\"\"\n return (b, )\n\n def to_python(self, inputs):\n return ('from scipy.linalg import solve',\n \"return solve({}, {}, lower={}, transposed={})\".format(\n inputs[0], inputs[1], self.lower, self.transposed))\n\n\nclass SolveSchema(OperatorSchema):\n \"\"\"\n Defines a schema for operators added in this package\n such as @see cl TreeEnsembleClassifierDouble.\n \"\"\"\n\n def __init__(self):\n OperatorSchema.__init__(self, 'Solve')\n self.attributes = Solve.atts\n"
] | [
[
"scipy.linalg.solve"
]
] |
smellslikeml/rikai | [
"179526dfe98b21059371d83f7540e3d43aa1200f"
] | [
"python/rikai/types/vision.py"
] | [
"#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Vision Related User-defined Types:\n\n- :py:class:`Image`\n\"\"\"\n\nfrom __future__ import annotations\n\nfrom io import IOBase\nfrom pathlib import Path\nfrom tempfile import NamedTemporaryFile\nfrom typing import Union\nfrom urllib.parse import urlparse\n\n# Third-party libraries\nimport numpy as np\nfrom PIL import Image as PILImage\n\n# Rikai\nfrom rikai.internal.uri_utils import normalize_uri\nfrom rikai.io import copy\nfrom rikai.mixin import Asset, Displayable, ToNumpy, ToPIL\nfrom rikai.spark.types import ImageType\n\n__all__ = [\"Image\"]\n\n\nclass Image(ToNumpy, ToPIL, Asset, Displayable):\n \"\"\"An external Image Asset.\n\n It contains a reference URI to an image stored on the remote system.\n\n Parameters\n ----------\n image : bytes, file-like object, str or :py:class:`~pathlib.Path`\n It can be the content of image, or a URI / Path of an image.\n \"\"\"\n\n __UDT__ = ImageType()\n\n def __init__(\n self,\n image: Union[bytes, bytearray, IOBase, str, Path],\n ):\n data, uri = None, None\n if isinstance(image, IOBase):\n data = image.read()\n elif isinstance(image, (bytes, bytearray)):\n data = image\n else:\n uri = image\n super().__init__(data=data, uri=uri)\n\n @classmethod\n def from_array(\n cls,\n array: np.ndarray,\n uri: Union[str, Path],\n mode: str = None,\n format: str = None,\n **kwargs,\n ) -> Image:\n \"\"\"Create an image in memory from numpy array.\n\n Parameters\n ----------\n array : np.ndarray\n Array data\n uri : str or Path\n The external URI to store the data.\n mode : str, optional\n The mode which PIL used to create image. See supported\n `modes on PIL document <https://pillow.readthedocs.io/en/stable/handbook/concepts.html#concept-modes>`_.\n format : str, optional\n The image format to save as. See\n `supported formats <https://pillow.readthedocs.io/en/stable/reference/Image.html#PIL.Image.Image.save>`_ for details.\n kwargs : dict, optional\n Optional arguments to pass to `PIL.Image.save <https://pillow.readthedocs.io/en/stable/reference/Image.html#PIL.Image.Image.save>`_.\n\n See Also\n --------\n :py:class:`PIL.Image.fromarray`\n :py:func:`~rikai.spark.functions.vision.numpy_to_image`\n\n \"\"\" # noqa: E501\n\n assert array is not None\n img = PILImage.fromarray(array, mode=mode)\n return cls.from_pil(img, uri, format=format, **kwargs)\n\n @staticmethod\n def from_pil(\n img: PILImage, uri: Union[str, Path], format: str = None, **kwargs\n ) -> Image:\n \"\"\"Create an image in memory from a :py:class:`PIL.Image`.\n\n Parameters\n ----------\n img : :py:class:`PIL.Image`\n An PIL Image instance\n uri : str or Path\n The URI to store the image externally.\n format : str, optional\n The image format to save as. See\n `supported formats <https://pillow.readthedocs.io/en/stable/reference/Image.html#PIL.Image.Image.save>`_ for details.\n kwargs : dict, optional\n Optional arguments to pass to `PIL.Image.save <https://pillow.readthedocs.io/en/stable/reference/Image.html#PIL.Image.Image.save>`_.\n \"\"\" # noqa: E501\n parsed = urlparse(normalize_uri(uri))\n if parsed.scheme == \"file\":\n img.save(uri, format=format, **kwargs)\n else:\n with NamedTemporaryFile() as fobj:\n img.save(fobj, format=format, **kwargs)\n fobj.flush()\n copy(fobj.name, uri)\n return Image(uri)\n\n def display(self, **kwargs):\n \"\"\"\n Custom visualizer for this image in jupyter notebook\n\n Parameters\n ----------\n kwargs: dict\n Optional display arguments\n\n Returns\n -------\n img: IPython.display.Image\n \"\"\"\n from IPython.display import Image\n\n with self.open() as fobj:\n return Image(fobj.read(), **kwargs)\n\n def __repr__(self) -> str:\n return f\"Image(uri={self.uri})\"\n\n def _repr_html_(self):\n \"\"\"Default visualizer for remote ref (or local ref under cwd)\"\"\"\n return self.display()._repr_html_()\n\n def _repr_mimebundle_(self, include=None, exclude=None):\n \"\"\"default visualizer for embedded mime bundle\"\"\"\n return self.display()._repr_mimebundle_(\n include=include, exclude=exclude\n )\n\n def _repr_jpeg_(self):\n \"\"\"default visualizer for embedded jpeg\"\"\"\n return self.display()._repr_jpeg_()\n\n def _repr_png_(self):\n \"\"\"default visualizer for embedded png\"\"\"\n return self.display()._repr_png_()\n\n def __eq__(self, other) -> bool:\n return isinstance(other, Image) and super().__eq__(other)\n\n def to_pil(self) -> PILImage:\n \"\"\"Return an PIL image.\n\n Note\n ----\n The caller should close the image.\n https://pillow.readthedocs.io/en/stable/reference/open_files.html#image-lifecycle\n \"\"\"\n return PILImage.open(self.open())\n\n def to_numpy(self) -> np.ndarray:\n \"\"\"Convert this image into an :py:class:`numpy.ndarray`.\"\"\"\n with self.to_pil() as pil_img:\n return np.asarray(pil_img)\n"
] | [
[
"numpy.asarray"
]
] |
JamesTheZ/BladeDISC | [
"e6c76ee557ebfccd560d44f6b6276bbc4e0a8a34"
] | [
"pytorch_blade/tests/tensorrt/test_support_info.py"
] | [
"# Copyright 2022 The BladeDISC Authors. All rights reserved.\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport collections\nimport unittest\nimport torch\nfrom torch.nn import functional as F\nfrom torch_blade import tensorrt\nfrom torch_blade import utils\nfrom torch_blade import tools\nfrom torch_blade import Config\nfrom torch_blade.logging import logger\nfrom torch_blade.testing.common_utils import Feedforward, TestCase\nfrom tests.tensorrt import skipIfNoTensorRT\nfrom torch_blade.onnx_backends.backend_testbed import OnnxBackendChecker\n\n\n@skipIfNoTensorRT()\nclass TestTensorRTSupportInfo(TestCase):\n def test_support_info(self):\n input = torch.ones([10, 10]).cuda()\n net = Feedforward(10, 10)\n net.eval().cuda()\n module = torch.jit.trace(net, input)\n module = tools.freeze_module(module._c, disableShapePeephole=False)\n graph = module.forward.graph\n\n unsupported = tensorrt.get_unsupported_nodes(graph)\n self.assertEqual(len(unsupported), 0)\n\n def test_empty_onnx_export(self):\n class Model(torch.nn.Module):\n def __init__(self):\n super(Model, self).__init__()\n self.linear = torch.nn.Linear(3, 4)\n self.dropout = torch.nn.Dropout(p=0.8)\n\n def forward(self, x):\n x = self.linear(x)\n x = self.dropout(x)\n return x.contiguous().detach()\n\n model = Model().cuda().eval()\n module = torch.jit.trace(model, torch.ones([2, 3]).cuda())\n module = tools.freeze_module(module._c, disableShapePeephole=False)\n graph = module.forward.graph\n\n unsupported = tensorrt.get_unsupported_nodes(graph)\n self.assertEqual(len(unsupported), 0)\n\n def test_inplace_safety(self):\n class BasicBlock(torch.nn.Module):\n def __init__(self):\n super(BasicBlock, self).__init__()\n self.conv1 = torch.nn.Conv2d(3, 10, kernel_size=3, padding=1)\n self.conv2 = torch.nn.Conv2d(10, 3, kernel_size=3, padding=1)\n self.conv3 = torch.nn.Conv2d(3, 3, kernel_size=3, padding=1)\n self.bnorm = torch.nn.BatchNorm2d(3)\n\n def forward_inplace(self, x):\n out = self.conv1(x)\n # this inplace bias is supported\n out += 1\n # this inplace relu_ is supported\n out = F.relu_(out)\n out = self.conv2(out)\n # this inplace relu_ is supported\n out = F.relu_(out)\n shortcut = out\n # this inplace add_ is supported\n out += shortcut\n shortcut = out\n out = self.conv3(out)\n out = self.bnorm(out)\n # this inplace add_ is supported\n out += shortcut\n out1 = out[:, :1, :, :]\n out2 = out[:, 1:, :, :]\n out1 = F.relu_(out1)\n out2 = F.relu_(out2)\n out[:, :1, :, :] = out1\n out[:, 1:, :, :] = out2\n return out\n\n def forward_no_inplace(self, x):\n out = self.conv1(x)\n out = out + 1\n out = F.relu(out)\n out = self.conv2(out)\n out = F.relu(out)\n shortcut = out\n out = out + shortcut\n shortcut = out\n out = self.conv3(out)\n out = self.bnorm(out)\n out = out + shortcut\n out = F.relu(out)\n return out\n\n class Model(torch.nn.Module):\n def __init__(self):\n super(Model, self).__init__()\n self.block1 = BasicBlock()\n self.block2 = BasicBlock()\n\n def forward(self, x):\n out1 = self.block1.forward_inplace(x)\n out1 = self.block2.forward_inplace(out1)\n out2 = self.block1.forward_no_inplace(x)\n out2 = self.block2.forward_no_inplace(out2)\n return out1, out2\n\n model = Model()\n model.eval()\n model.cuda()\n\n batch = torch.ones([1, 3, 224, 224])\n batch = batch.cuda()\n out1, out2 = model(batch)\n self.assertEqual(out1, out2)\n traced_model = torch.jit.trace(model, batch)\n frozen_module = tools.freeze_module(traced_model._c, disableShapePeephole=False)\n graph = frozen_module.forward.graph\n ops_counter = utils.list_ops_count(graph)\n unspt_counter = collections.Counter()\n unsupported = tensorrt.get_unsupported_nodes(graph)\n for node in unsupported:\n unspt_counter[node.kind()] += 1\n self.assertEqual(ops_counter[\"aten::slice\"], unspt_counter[\"aten::slice\"])\n self.assertEqual(ops_counter[\"aten::view\"], unspt_counter[\"aten::view\"])\n self.assertEqual(ops_counter[\"aten::copy_\"], unspt_counter[\"aten::copy_\"])\n self.assertEqual(ops_counter[\"aten::expand\"], unspt_counter[\"aten::expand\"])\n self.assertEqual(unspt_counter[\"aten::relu_\"], 4)\n logger.info(ops_counter)\n logger.info(unspt_counter)\n self.assertEqual(unspt_counter[\"aten::add_\"], 0)\n\n def test_inplace_safety_another(self):\n def op(x):\n return x + 1\n\n def op_(x):\n x -= 1\n return x\n\n def _count_unsupported(unspt):\n unspt_counter = collections.Counter()\n for node in unspt:\n unspt_counter[node.kind()] += 1\n return unspt_counter\n\n def _count_graph(graph):\n unsupported = tensorrt.get_unsupported_nodes(graph, ignore_device=True)\n return _count_unsupported(unsupported)\n\n def _count_model(model):\n model.eval().cuda()\n input = torch.zeros([4]).cuda()\n output = model(input)\n traced_module = torch.jit.trace(model, (input,))\n graph = traced_module.graph\n return _count_graph(graph)\n\n class Model1(torch.nn.Module):\n \"\"\"\n Within this model, torch.jit.trace will produce graph like:\n %2 : Float = aten::add(%1, some_constant)\n %3 : Float = aten::sub_(%2, some_constant)\n %4 : Float = aten::add(%3, some_constant)\n\n The input of the third node is %3 instead of %2 which is not consistent with the definition of the\n corresponding nn.Module. So the inplace node aten::sub_ is the last consumer of its inputs which make it\n inplace-safe, and therefore all the nodes in this graph is inplace-safe.\n\n The same phenomenon occurs in model2. So we manually add two graphs that have 'correct' topology structures\n with corresponding nn.Module (i.e. Model1 and Model2) and use them as UTs.\n \"\"\"\n\n def forward(self, x):\n x1 = op(x)\n x2 = op_(x1)\n x3 = op(x1)\n return x3\n\n class Model2(torch.nn.Module):\n def forward(self, x):\n x1 = op(x)\n x2 = op_(x1) # support\n x3 = op_(x2) # support\n x4 = op(x3)\n x5 = op_(x3) # not support\n x6 = op_(x5) # not support\n x7 = op(x3)\n return x7\n\n unspt_counter = _count_model(Model1())\n self.assertEqual(unspt_counter[\"aten::sub_\"], 0)\n unspt_counter = _count_model(Model2())\n self.assertEqual(unspt_counter[\"aten::sub_\"], 0)\n\n if utils.torch_version_number() >= utils.parse_version(\"1.8.1\"):\n graph1 = torch.parse_ir(\n \"\"\"\n graph( %x.1 : Float(4)):\n %1 : int = prim::Constant[value=1]()\n %2 : Float(4) = aten::add(%x.1, %1, %1)\n %3 : int = prim::Constant[value=1]()\n %4 : Float(4) = aten::sub_(%2, %3, %3)\n %5 : int = prim::Constant[value=1]()\n %6 : Float(4) = aten::add(%2, %5, %5)\n return (%6)\n \"\"\"\n )\n\n graph2 = torch.parse_ir(\n \"\"\"\n graph( %x.1 : Float(4)):\n %1 : int = prim::Constant[value=1]()\n %2 : Float(4) = aten::add(%x.1, %1, %1)\n %3 : int = prim::Constant[value=1]()\n %4 : Float(4) = aten::sub_(%2, %3, %3)\n %5 : int = prim::Constant[value=1]()\n %6 : Float(4) = aten::sub_(%4, %5, %5)\n %7 : int = prim::Constant[value=1]()\n %8 : Float(4) = aten::add(%6, %7, %7)\n %9 : int = prim::Constant[value=1]()\n %10 : Float(4) = aten::sub_(%6, %9, %9)\n %11 : int = prim::Constant[value=1]()\n %12 : Float(4) = aten::sub_(%10, %11, %11)\n %13 : int = prim::Constant[value=1]()\n %14 : Float(4) = aten::add(%6, %13, %13)\n return (%14)\n \"\"\"\n )\n else:\n graph1 = torch.parse_ir(\n \"\"\"\n graph( %x.1 : Float(4:1)):\n %1 : int = prim::Constant[value=1]()\n %2 : Float(4:1) = aten::add(%x.1, %1, %1)\n %3 : int = prim::Constant[value=1]()\n %4 : Float(4:1) = aten::sub_(%2, %3, %3)\n %5 : int = prim::Constant[value=1]()\n %6 : Float(4:1) = aten::add(%2, %5, %5)\n return (%6)\n \"\"\"\n )\n\n graph2 = torch.parse_ir(\n \"\"\"\n graph( %x.1 : Float(4:1)):\n %1 : int = prim::Constant[value=1]()\n %2 : Float(4:1) = aten::add(%x.1, %1, %1)\n %3 : int = prim::Constant[value=1]()\n %4 : Float(4:1) = aten::sub_(%2, %3, %3)\n %5 : int = prim::Constant[value=1]()\n %6 : Float(4:1) = aten::sub_(%4, %5, %5)\n %7 : int = prim::Constant[value=1]()\n %8 : Float(4:1) = aten::add(%6, %7, %7)\n %9 : int = prim::Constant[value=1]()\n %10 : Float(4:1) = aten::sub_(%6, %9, %9)\n %11 : int = prim::Constant[value=1]()\n %12 : Float(4:1) = aten::sub_(%10, %11, %11)\n %13 : int = prim::Constant[value=1]()\n %14 : Float(4:1) = aten::add(%6, %13, %13)\n return (%14)\n \"\"\"\n )\n unspt_counter = _count_graph(graph1)\n self.assertEqual(unspt_counter[\"aten::sub_\"], 1)\n unspt_counter = _count_graph(graph2)\n self.assertEqual(unspt_counter[\"aten::sub_\"], 2)\n\n def test_graph_input_inplace_safe(self):\n class Model(torch.nn.Module):\n def forward(self, x):\n return F.relu_(x)\n\n batch = torch.Tensor([1, -1, 1, -1])\n batch = batch.cuda()\n model = Model().eval().cuda()\n traced_model = torch.jit.trace(model, batch)\n self.assertEqual(batch, torch.Tensor([1, 0, 1, 0]))\n\n frozen_module = torch._C._freeze_module(traced_model._c)\n graph = frozen_module.forward.graph\n unspt_counter = collections.Counter()\n unsupported = tensorrt.get_unsupported_nodes(graph)\n for node in unsupported:\n unspt_counter[node.kind()] += 1\n self.assertEqual(unspt_counter[\"aten::relu_\"], 1)\n\n def test_view_kinds_0(self):\n if utils.torch_version_number() >= utils.parse_version(\"1.8.1\"):\n graph = torch.parse_ir(\n \"\"\"\n graph( %x.1 : Float(1, 1, 1)):\n %1 : int = prim::Constant[value=0]()\n %2 : int = prim::Constant[value=1]()\n %3 : Float(1, 1) = aten::select(%x.1, %1, %2)\n %4 : int = prim::Constant[value=0]()\n %5 : int = prim::Constant[value=1]()\n %6 : Float(1) = aten::select(%3, %4, %5)\n %7 : int = prim::Constant[value=1]()\n %8 : int = prim::Constant[value=1]()\n %9 : Float(1) = aten::add(%6, %7, %8)\n return (%9)\n \"\"\"\n )\n else:\n graph = torch.parse_ir(\n \"\"\"\n graph( %x.1 : Float(1:1, 1:1, 1:1)):\n %1 : int = prim::Constant[value=0]()\n %2 : int = prim::Constant[value=1]()\n %3 : Float(1:1, 1:1) = aten::select(%x.1, %1, %2)\n %4 : int = prim::Constant[value=0]()\n %5 : int = prim::Constant[value=1]()\n %6 : Float(1:1) = aten::select(%3, %4, %5)\n %7 : int = prim::Constant[value=1]()\n %8 : int = prim::Constant[value=1]()\n %9 : Float(1:1) = aten::add(%6, %7, %8)\n return (%9)\n \"\"\"\n )\n unsupported = tensorrt.get_unsupported_nodes(graph, True)\n self.assertEqual(len(unsupported), 0)\n\n def test_view_kinds_1(self):\n if utils.torch_version_number() >= utils.parse_version(\"1.8.1\"):\n graph = torch.parse_ir(\n \"\"\"\n graph( %x.1 : Float(1, 1, 1)):\n %1 : int = prim::Constant[value=0]()\n %2 : int = prim::Constant[value=1]()\n %3 : Float(1, 1) = aten::select(%x.1, %1, %2)\n %4 : int = prim::Constant[value=0]()\n %5 : int = prim::Constant[value=1]()\n %6 : Float(1) = aten::select(%3, %4, %5)\n %7 : int = prim::Constant[value=1]()\n %8 : int = prim::Constant[value=1]()\n %9 : Float(1) = aten::add_(%6, %7, %8)\n return (%9)\n \"\"\"\n )\n else:\n graph = torch.parse_ir(\n \"\"\"\n graph( %x.1 : Float(1:1, 1:1, 1:1)):\n %1 : int = prim::Constant[value=0]()\n %2 : int = prim::Constant[value=1]()\n %3 : Float(1:1, 1:1) = aten::select(%x.1, %1, %2)\n %4 : int = prim::Constant[value=0]()\n %5 : int = prim::Constant[value=1]()\n %6 : Float(1:1) = aten::select(%3, %4, %5)\n %7 : int = prim::Constant[value=1]()\n %8 : int = prim::Constant[value=1]()\n %9 : Float(1:1) = aten::add_(%6, %7, %8)\n return (%9)\n \"\"\"\n )\n unsupported = tensorrt.get_unsupported_nodes(graph, True)\n self.assertEqual(len(unsupported), 3)\n\n def test_view_kinds_2(self):\n if utils.torch_version_number() >= utils.parse_version(\"1.8.1\"):\n graph = torch.parse_ir(\n \"\"\"\n graph( %x.1 : Float(1, 1, 1)):\n %1 : int = prim::Constant[value=0]()\n %2 : int = prim::Constant[value=1]()\n %3 : Float(1, 1) = aten::select(%x.1, %1, %2)\n %4 : int = prim::Constant[value=0]()\n %5 : int = prim::Constant[value=1]()\n %6 : Float(1, 1) = aten::add_(%3, %4, %5)\n %7 : int = prim::Constant[value=1]()\n %8 : int = prim::Constant[value=1]()\n %9 : Float(1) = aten::select(%3, %7, %8)\n return (%9)\n \"\"\"\n )\n else:\n graph = torch.parse_ir(\n \"\"\"\n graph( %x.1 : Float(1:1, 1:1, 1:1)):\n %1 : int = prim::Constant[value=0]()\n %2 : int = prim::Constant[value=1]()\n %3 : Float(1:1, 1:1) = aten::select(%x.1, %1, %2)\n %4 : int = prim::Constant[value=0]()\n %5 : int = prim::Constant[value=1]()\n %6 : Float(1:1, 1:1) = aten::add_(%3, %4, %5)\n %7 : int = prim::Constant[value=1]()\n %8 : int = prim::Constant[value=1]()\n %9 : Float(1:1) = aten::select(%3, %7, %8)\n return (%9)\n \"\"\"\n )\n unsupported = tensorrt.get_unsupported_nodes(graph, True)\n self.assertEqual(len(unsupported), 3)\n\n # NOTE: this unsupported set length should be 3 (two aten::select and one aten::add_)\n # However, due to a flaw of the inplace safety check algorithm, aten::add_ is excluded\n # in the set.\n # todo: fix this error.\n # graph = torch.parse_ir(\n # '''\n # graph( %x.1 : Float(1:1, 1:1, 1:1)):\n # %1 : int = prim::Constant[value=0]()\n # %2 : int = prim::Constant[value=1]()\n # %3 : Float(1:1, 1:1, 1:1) = aten::add(%x.1, %1, %2)\n # %4 : int = prim::Constant[value=0]()\n # %5 : int = prim::Constant[value=1]()\n # %6 : Float(1:1, 1:1) = aten::select(%3, %4, %5)\n # %7 : Float(1:1, 1:1) = aten::add_(%3, %4, %5)\n # %8 : int = prim::Constant[value=1]()\n # %9 : int = prim::Constant[value=1]()\n # %10 : Float(1:1) = aten::select(%6, %8, %9)\n # return (%9)\n # '''\n # )\n # unsupported = tensorrt.get_unsupported_nodes(graph, True)\n # self.assertEqual(len(unsupported), 2)\n\n@skipIfNoTensorRT()\nclass TestManRules(TestCase):\n def _make_check(self, graph, target):\n checker = OnnxBackendChecker(graph, tensorrt.is_onnx2trt_supported, \"TensorRT\")\n is_supported = checker()\n self.assertEqual(is_supported, target)\n\n def test_aten_mul(self):\n graph = torch.parse_ir(\n \"\"\"\n graph(%0 : int[]):\n %1 : int = prim::Constant[value=1]()\n %3 : int = aten::mul(%0, %1)\n return (%3)\n \"\"\"\n )\n self._make_check(graph, False)\n\n def test_aten_add(self):\n graph = torch.parse_ir(\n \"\"\"\n graph(%0 : int[], %1 : int[]):\n %2 : int[] = aten::add(%0, %1)\n return (%2)\n \"\"\"\n )\n self._make_check(graph, False)\n\n def test_aten_eq(self):\n graph = torch.parse_ir(\n \"\"\"\n graph(%0 : int[]):\n %1 : int = prim::Constant[value=1]()\n %2 : int[] = prim::ListConstruct(%1)\n %3 : bool = aten::eq(%0, %2)\n return (%3)\n \"\"\"\n )\n self._make_check(graph, False)\n\n def test_const_fold_before_export(self):\n if utils.torch_version_number() >= utils.parse_version(\"1.8.1\"):\n graph = torch.parse_ir(\n \"\"\"\n graph(%input0.2 : Float(1, 512, 18, 18, requires_grad=0, device=cuda:0)):\n %1 : None = prim::Constant() # :0:0\n %2 : bool = prim::Constant[value=1]()\n %3 : float[] = prim::Constant[value=[2., 2.]]()\n %x1.3 : Float(1, 512, 36, 36, requires_grad=0, device=cuda:0) = aten::upsample_bilinear2d(%input0.2, %1, %2, %3)\n return (%x1.3)\n \"\"\"\n )\n else:\n graph = torch.parse_ir(\n \"\"\"\n graph(%input0.2 : Float(1:165888, 512:324, 18:18, 18:1, requires_grad=0, device=cuda:0)):\n %1 : None = prim::Constant() # :0:0\n %2 : bool = prim::Constant[value=1]()\n %3 : float[] = prim::Constant[value=[2., 2.]]()\n %x1.3 : Float(1:663552, 512:1296, 36:36, 36:1, requires_grad=0, device=cuda:0) = aten::upsample_bilinear2d(%input0.2, %1, %2, %3)\n return (%x1.3)\n \"\"\"\n )\n cfg = Config.get_current_context_or_new().clone()\n cfg.customize_onnx_opset_version = 11\n with cfg:\n self._make_check(graph, True)\n\n def test_scalar_input_on_graph(self):\n if utils.torch_version_number() >= utils.parse_version(\"1.8.1\"):\n graph = torch.parse_ir(\n \"\"\"\n graph(%x.3 : Float(1, 64, 1, 1, requires_grad=0, device=cuda:0),\n %1 : int):\n %2 : int = prim::Constant[value=-1]()\n %3 : int[] = prim::ListConstruct(%1, %2)\n %input.14 : Float(1, 64, requires_grad=0, device=cuda:0) = aten::view(%x.3, %3)\n return (%input.14)\n \"\"\"\n )\n else:\n graph = torch.parse_ir(\n \"\"\"\n graph(%x.3 : Float(1:64, 64:1, 1:1, 1:1, requires_grad=0, device=cuda:0),\n %1 : int):\n %2 : int = prim::Constant[value=-1]()\n %3 : int[] = prim::ListConstruct(%1, %2)\n %input.14 : Float(1:64, 64:1, requires_grad=0, device=cuda:0) = aten::view(%x.3, %3)\n return (%input.14)\n \"\"\"\n )\n self._make_check(graph, True)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n"
] | [
[
"torch.nn.BatchNorm2d",
"torch.ones",
"torch.nn.Linear",
"torch.nn.Dropout",
"torch.parse_ir",
"torch.nn.functional.relu",
"torch.nn.functional.relu_",
"torch.nn.Conv2d",
"torch.zeros",
"torch._C._freeze_module",
"torch.Tensor",
"torch.jit.trace"
]
] |
Gattocrucco/sipmfilter | [
"74215d6c53b998808fc6c677b46030234d996bdf"
] | [
"figthesis/figlaserpos.py"
] | [
"from matplotlib import pyplot as plt\n\nimport figlatex\nimport afterpulse_tile21\nimport textbox\nimport colormap\n\nvov = 5.5\n\n################\n\nap21 = afterpulse_tile21.AfterPulseTile21(vov)\n\nfig = plt.figure(num='figlaserpos-0', clear=True, figsize=[4.5, 3])\n\nap21.sim.hist('mainpos-offset', 'mainnpe==1', fig=fig, selection=False)\nax, = fig.get_axes()\ntextbox.textbox(ax, f'{vov} VoV', fontsize='medium', loc='lower center')\nax.set_xlabel('Laser peak position [ns]')\n\nfigs = [fig]\n\nfig = plt.figure(num='figlaserpos-1', clear=True, figsize=[4.5, 3])\n\nap21.sim.hist2d('mainpos-offset', 'mainampl', '(mainnpe==1)&(length==128)', fig=fig, cmap=colormap.uniform(), selection=False)\nax, _ = fig.get_axes()\ntextbox.textbox(ax, f'{vov} VoV', fontsize='medium', loc='lower center')\nax.set_xlabel('Laser peak position [ns]')\nax.set_ylabel('Peak height')\n\nfigs.append(fig)\n\nfor fig in figs:\n fig.tight_layout()\n fig.show()\n\nfiglatex.save([figs])\n"
] | [
[
"matplotlib.pyplot.figure"
]
] |
egpbos/amuse | [
"64b3bc5b7fef9496012b023578c4d71cecef92b7",
"64b3bc5b7fef9496012b023578c4d71cecef92b7"
] | [
"examples/simple/salpeter.py",
"examples/simple/unstable_binary.py"
] | [
"\"\"\"\nGenerates a cluster using a plummer model with a salpeter Initial Mass Function.\nCompares the generated IMF against the expected line.\n\"\"\"\n\nimport numpy \nfrom matplotlib import pyplot\nfrom amuse.units import units\nfrom amuse.units import nbody_system\nfrom amuse.ic.plummer import new_plummer_model\nfrom amuse.ic.salpeter import new_salpeter_mass_distribution\n\ndef new_cluster(number_of_stars = 1000):\n masses = new_salpeter_mass_distribution(\n number_of_stars, \n mass_min = 0.1 | units.MSun,\n mass_max = 125.0 | units.MSun, \n alpha = -2.35\n )\n nbody_converter = nbody_system.nbody_to_si(masses.sum(), 1 | units.parsec)\n particles = new_plummer_model(number_of_stars, nbody_converter)\n particles.mass = masses\n particles.move_to_center()\n return particles\n\ndef plot_particles_and_mass_distribution(particles):\n figure = pyplot.figure(figsize= (12,6))\n \n subplot = figure.add_subplot(1, 2, 1)\n \n subplot.scatter(\n particles.x.value_in(units.parsec),\n particles.y.value_in(units.parsec),\n s = particles.mass.value_in(units.MSun),# * len(particles),\n edgecolors = 'red',\n facecolors = 'red'\n )\n \n subplot.set_xlim(-4,4)\n subplot.set_ylim(-4,4)\n subplot.set_xlabel('x (parsec)')\n subplot.set_ylabel('y (parsec)')\n \n subplot = figure.add_subplot(1, 2, 2)\n \n masses = particles.mass.value_in(units.MSun)\n \n bins = 10**numpy.linspace(-1, 2, 100)\n number_of_particles, bin_edges= numpy.histogram(masses, bins = bins)\n \n bin_sizes = bin_edges[1:] - bin_edges[:-1]\n \n y = number_of_particles / bin_sizes\n x = (bin_edges[1:] + bin_edges[:-1]) / 2.0\n \n y = y[number_of_particles > 10.0] \n x = x[number_of_particles > 10.0]\n subplot.scatter(x, y)\n \n c = ((0.1**-1.35) - (125.0**-1.35)) / 1.35\n subplot.plot(x, len(particles)/ c * (x**-2.35))\n \n subplot.set_xscale('log')\n subplot.set_yscale('log')\n \n subplot.set_xlabel(u'M [M\\u2299]')\n subplot.set_ylabel('N')\n \n pyplot.show()\n \nif __name__ == \"__main__\":\n particles = new_cluster(20000)\n plot_particles_and_mass_distribution(particles)\n",
"\"\"\"\nEvolves two stars dynamically (hermit, nbody code) each star will \nlose mass during the evolution (evtwin, stellar evolution code)\n\nWe start with two stars, one 10.0 and one 1.0 solar mass star. These\nstars start orbiting with a stable kepler orbit. \nAfter 2 orbital periods the stars will begin to lose mass and the binary\nwill become unstable.\n\"\"\"\n\nfrom amuse.plot import scatter, xlabel, ylabel, plot\nfrom matplotlib import pyplot\nfrom math import pi\nfrom amuse.units.optparse import OptionParser\n\nfrom amuse.units import units\nfrom amuse.units import constants\nfrom amuse.units.nbody_system import nbody_to_si\nfrom amuse.community.evtwin.interface import EVtwin\nfrom amuse.community.sse.interface import SSE\nfrom amuse.community.hermite0.interface import Hermite\n\nfrom amuse.datamodel import Particles\n\n\ndef set_up_initial_conditions(orbital_period, kinetic_to_potential_ratio):\n print(\"Setting up initial conditions\")\n stars = Particles(2)\n stars.mass = [10.0, 1.0] | units.MSun\n stars.radius = 0 | units.RSun\n stars.position = [0.0, 0.0, 0.0] | units.AU\n stars.velocity = [0.0, 0.0, 0.0] | units.km / units.s\n \n print(\"Binary with masses: \"+str(stars.mass)+\", and orbital period: \", orbital_period)\n semimajor_axis = ((constants.G * stars.total_mass() * (orbital_period / (2 * pi))**2.0)**(1.0/3.0))\n separation = 2 * semimajor_axis * (1 - kinetic_to_potential_ratio)\n print(\"Initial separation:\", separation.as_quantity_in(units.AU))\n relative_velocity = ( (kinetic_to_potential_ratio / (1.0 - kinetic_to_potential_ratio)) * \n constants.G * stars.total_mass() / semimajor_axis).sqrt()\n print(\"Initial relative velocity:\", relative_velocity.as_quantity_in(units.km / units.s))\n \n stars[0].x = separation\n stars[0].vy = relative_velocity\n stars.move_to_center()\n return stars\n\ndef set_up_stellar_evolution_code(stars):\n stellar_evolution = EVtwin()\n stellar_evolution.initialize_code()\n # if you run with mesa, you can play with the wind efficiency\n # stellar_evolution.parameters.RGB_wind_scheme = 1\n # stellar_evolution.parameters.reimers_wind_efficiency = 1.0e6 # ridiculous, but instructive\n stellar_evolution.particles.add_particles(stars)\n return stellar_evolution\n \ndef set_up_gravitational_dynamics_code(stars):\n convert_nbody = nbody_to_si(11.0 | units.MSun, 10.0 | units.AU)\n gravitational_dynamics = Hermite(convert_nbody)\n gravitational_dynamics.parameters.epsilon_squared = 0.0 | units.AU ** 2\n view_on_the_primary = gravitational_dynamics.particles.add_particle(stars[0])\n gravitational_dynamics.particles.add_particle(stars[1])\n return gravitational_dynamics, view_on_the_primary\n \n\ndef simulate_binary_evolution(binary, orbital_period, t_offset_stars, t_end):\n distance = [] | units.AU\n mass = [] | units.MSun\n time = [] | units.yr\n \n stellar_evolution = set_up_stellar_evolution_code(binary)\n gravitational_dynamics, primary = set_up_gravitational_dynamics_code(binary)\n from_se_to_gd = stellar_evolution.particles.new_channel_to(gravitational_dynamics.particles)\n \n current_time = 0.0 * t_end\n \n \n print(\"Evolving with stellar wind\")\n while current_time < t_end:\n current_time += orbital_period / 10\n gravitational_dynamics.evolve_model(current_time)\n stellar_evolution.evolve_model(current_time + t_offset_stars)\n from_se_to_gd.copy_attributes(['mass'])\n separation = (gravitational_dynamics.particles[0].position - gravitational_dynamics.particles[1].position).length()\n distance.append(separation)\n mass.append(primary.mass)\n time.append(current_time)\n print(\"System evolved to time: \", current_time, \", primary mass:\", primary.mass.as_quantity_in(units.MSun), \", separation:\", separation.as_quantity_in(units.AU))\n \n print(\"Evolution done\")\n return distance, mass, time\n\ndef orbit_plot(distance, mass, time):\n figure = pyplot.figure(figsize = (6, 10), dpi = 100)\n subplot = figure.add_subplot(2, 1, 1)\n plot(time, distance)\n xlabel('t')\n ylabel('separation')\n pyplot.margins(0.05)\n subplot = figure.add_subplot(2, 1, 2)\n plot(time, ((mass - mass[0]) / mass[0]) * 100.0)\n xlabel('t')\n ylabel('mass')\n pyplot.margins(0.05)\n pyplot.show()\n\ndef main(\n orbital_period = 1000.0 | units.yr, \n kinetic_to_potential_ratio = 0.8, \n periods = 10,\n age = 10 | units.Myr\n ):\n \n t_offset_stars = age\n t_end = periods * orbital_period\n \n binary = set_up_initial_conditions(orbital_period, kinetic_to_potential_ratio)\n distance, mass, time = simulate_binary_evolution(binary, orbital_period, t_offset_stars, t_end)\n orbit_plot(distance, mass, time)\n \ndef new_option_parser():\n result = OptionParser()\n result.add_option(\n \"-o\", \"--orbitalperiod\", \n default = 1000 | units.yr,\n dest=\"orbital_period\",\n help=\"initial orbital period of the binary (in years)\",\n type=\"float\",\n unit=units.yr\n )\n \n result.add_option(\n \"-k\", \"--kpratio\", \n default = 0.8,\n dest=\"kinetic_to_potential_ratio\",\n help=\"kinetec to potential energy ratio, values less than 1.0 correspond to bound systems\",\n type=\"float\"\n )\n result.add_option(\n \"--periods\", \n default = 10,\n dest=\"periods\",\n help=\"number of orbital periods to evolve the binary\",\n type=\"int\"\n )\n result.add_option(\n \"--age\", \n default = 10 | units.Myr,\n dest=\"age\",\n help=\"initial age of the stars to start the simulation with\",\n type=\"float\",\n unit=units.Myr\n )\n \n \n return result\n \n \nif __name__ == \"__plot__\":\n main(1000 | units.yr, 0.8, 10, 10 | units.Myr)\n \nif __name__ == \"__main__\":\n options, args = new_option_parser().parse_args()\n main(**options.__dict__)\n"
] | [
[
"numpy.histogram",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.show",
"numpy.linspace"
],
[
"matplotlib.pyplot.figure",
"matplotlib.pyplot.show",
"matplotlib.pyplot.margins"
]
] |
suyukun666/UFO | [
"ba481b39b80d78c98e11cc22444d69de9e010439"
] | [
"Intra_MLP.py"
] | [
"import torch\nimport numpy\n\n# codes of this function are borrowed from https://github.com/yanx27/Pointnet_Pointnet2_pytorch/blob/master/models/pointnet2_utils.py\ndef index_points(device, points, idx):\n \"\"\"\n\n Input:\n points: input points data, [B, N, C]\n idx: sample index data, [B, S]\n Return:\n new_points:, indexed points data, [B, S, C]\n \"\"\"\n B = points.shape[0]\n view_shape = list(idx.shape)\n view_shape[1:] = [1] * (len(view_shape) - 1)\n repeat_shape = list(idx.shape)\n repeat_shape[0] = 1\n # batch_indices = torch.arange(B, dtype=torch.long).to(device).view(view_shape).repeat(repeat_shape)\n batch_indices = torch.arange(B, dtype=torch.long).cuda().view(view_shape).repeat(repeat_shape)\n new_points = points[batch_indices, idx, :]\n return new_points\n\ndef knn_l2(device, net, k, u):\n '''\n Input:\n k: int32, number of k in k-nn search\n net: (batch_size, npoint, c) float32 array, points\n u: int32, block size\n Output:\n idx: (batch_size, npoint, k) int32 array, indices to input points\n '''\n INF = 1e8\n batch_size = net.size(0)\n npoint = net.size(1)\n n_channel = net.size(2)\n\n square = torch.pow(torch.norm(net, dim=2,keepdim=True),2)\n\n def u_block(batch_size, npoint, u):\n block = numpy.zeros([batch_size, npoint, npoint])\n n = npoint // u\n for i in range(n):\n block[:, (i*u):(i*u+u), (i*u):(i*u+u)] = numpy.ones([batch_size, u, u]) * (-INF)\n return block\n\n # minus_distance = 2 * torch.matmul(net, net.transpose(2,1)) - square - square.transpose(2,1) + torch.Tensor(u_block(batch_size, npoint, u)).to(device)\n minus_distance = 2 * torch.matmul(net, net.transpose(2,1)) - square - square.transpose(2,1) + torch.Tensor(u_block(batch_size, npoint, u)).cuda()\n _, indices = torch.topk(minus_distance, k, largest=True, sorted=False)\n \n return indices\n\n"
] | [
[
"numpy.ones",
"numpy.zeros",
"torch.norm",
"torch.topk",
"torch.arange"
]
] |
triplet02/KoSpeech | [
"74d267b76ec72cf8bc916982af9a58df2dc1ee4e"
] | [
"kospeech/data/audio/parser.py"
] | [
"import numpy as np\nfrom torch import Tensor, FloatTensor\nfrom kospeech.data.audio.core import load_audio\nfrom kospeech.data.audio.augment import NoiseInjector, SpecAugment\nfrom kospeech.data.audio.feature import MelSpectrogram, MFCC, Spectrogram, FilterBank\n\n\nclass AudioParser(object):\n \"\"\"\n Provides inteface of audio parser.\n\n Note:\n Do not use this class directly, use one of the sub classes.\n\n Method:\n - **parse_audio()**: abstract method. you have to override this method.\n - **parse_transcript()**: abstract method. you have to override this method.\n \"\"\"\n def __init__(self, dataset_path, noiseset_size, sample_rate=16000, noise_level=0.7, noise_augment=False):\n if noise_augment:\n self.noise_injector = NoiseInjector(dataset_path, noiseset_size, sample_rate, noise_level)\n\n def parse_audio(self, *args, **kwargs):\n raise NotImplementedError\n\n def parse_transcript(self, *args, **kwargs):\n raise NotImplementedError\n\n\nclass SpectrogramParser(AudioParser):\n \"\"\"\n Parses audio file into (spectrogram / mel spectrogram / mfcc) with various options.\n\n Args:\n transform_method (str): which feature to use (default: mel)\n sample_rate (int): Sample rate of audio signal. (Default: 16000)\n n_mels (int): Number of mfc coefficients to retain. (Default: 40)\n frame_length (int): frame length for spectrogram (ms) (Default : 20)\n frame_shift (int): Length of hop between STFT windows. (ms) (Default: 10)\n feature_extract_by (str): which library to use for feature extraction(default: librosa)\n del_silence (bool): flag indication whether to delete silence or not (default: True)\n input_reverse (bool): flag indication whether to reverse input or not (default: True)\n normalize (bool): flag indication whether to normalize spectrum or not (default:True)\n time_mask_para (int): Hyper Parameter for Time Masking to limit time masking length\n freq_mask_para (int): Hyper Parameter for Freq Masking to limit freq masking length\n time_mask_num (int): how many time-masked area to make\n freq_mask_num (int): how many freq-masked area to make\n sos_id (int): start of sentence token`s identification\n eos_id (int): end of sentence token`s identification\n target_dict (dict): dictionary of filename and labels\n \"\"\"\n VANILLA = 0 # Not apply augmentation\n SPEC_AUGMENT = 1 # SpecAugment\n NOISE_INJECTION = 2 # Noise Injection\n HYBRID_AUGMENT = 3 # Noise Injection & SpecAugment\n\n def __init__(self, feature_extract_by: str = 'librosa', sample_rate: int = 16000,\n n_mels: int = 80, frame_length: int = 20, frame_shift: int = 10,\n del_silence: bool = False, input_reverse: bool = True,\n normalize: bool = False, transform_method: str = 'mel',\n time_mask_para: int = 70, freq_mask_para: int = 12, time_mask_num: int = 2, freq_mask_num: int = 2,\n sos_id: int = 1, eos_id: int = 2, target_dict: dict = None, noise_augment: bool = False,\n dataset_path: str = None, noiseset_size: int = 0, noise_level: float = 0.7) -> None:\n super(SpectrogramParser, self).__init__(dataset_path, noiseset_size, sample_rate, noise_level, noise_augment)\n self.del_silence = del_silence\n self.input_reverse = input_reverse\n self.normalize = normalize\n self.sos_id = sos_id\n self.eos_id = eos_id\n self.target_dict = target_dict\n self.spec_augment = SpecAugment(time_mask_para, freq_mask_para, time_mask_num, freq_mask_num)\n\n if transform_method.lower() == 'mel':\n self.transforms = MelSpectrogram(sample_rate, n_mels, frame_length, frame_shift, feature_extract_by)\n\n elif transform_method.lower() == 'mfcc':\n self.transforms = MFCC(sample_rate, n_mels, frame_length, frame_shift, feature_extract_by)\n\n elif transform_method.lower() == 'spect':\n self.transforms = Spectrogram(sample_rate, frame_length, frame_shift, feature_extract_by)\n\n elif transform_method.lower() == 'fbank':\n self.transforms = FilterBank(sample_rate, n_mels, frame_length, frame_shift)\n\n else:\n raise ValueError(\"Unsupported feature : {0}\".format(transform_method))\n\n def parse_audio(self, audio_path: str, augment_method: int) -> Tensor:\n \"\"\"\n Parses audio.\n\n Args:\n audio_path (str): path of audio file\n augment_method (int): flag indication which augmentation method to use.\n\n Returns: feature_vector\n - **feature_vector** (torch.FloatTensor): feature from audio file.\n \"\"\"\n signal = load_audio(audio_path, self.del_silence)\n\n if signal is None:\n return None\n\n if augment_method == SpectrogramParser.NOISE_INJECTION or augment_method == SpectrogramParser.HYBRID_AUGMENT:\n signal = self.noise_injector(signal)\n\n feature_vector = self.transforms(signal)\n\n if self.normalize:\n feature_vector -= feature_vector.mean()\n\n if self.input_reverse: # Refer to \"Sequence to Sequence Learning with Neural Network\" paper\n feature_vector = feature_vector[:, ::-1]\n feature_vector = FloatTensor(np.ascontiguousarray(np.swapaxes(feature_vector, 0, 1)))\n else:\n feature_vector = FloatTensor(feature_vector).transpose(0, 1)\n\n if augment_method == SpectrogramParser.SPEC_AUGMENT or augment_method == SpectrogramParser.HYBRID_AUGMENT:\n feature_vector = self.spec_augment(feature_vector)\n\n return feature_vector\n\n def parse_transcript(self, *args, **kwargs):\n raise NotImplementedError\n"
] | [
[
"numpy.swapaxes",
"torch.FloatTensor"
]
] |
ZhangJianAI-CV/Awesome-project | [
"b07c8c270bd511246133541c4aee28c2472c633f"
] | [
"PaddleDetection/deploy/pptracking/python/mot/tracker/jde_tracker.py"
] | [
"# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nThis code is based on https://github.com/Zhongdao/Towards-Realtime-MOT/blob/master/tracker/multitracker.py\n\"\"\"\n\nimport numpy as np\nfrom collections import defaultdict\n\nfrom ..matching import jde_matching as matching\nfrom ..motion import KalmanFilter\nfrom .base_jde_tracker import TrackState, STrack\nfrom .base_jde_tracker import joint_stracks, sub_stracks, remove_duplicate_stracks\n\n__all__ = ['JDETracker']\n\n\nclass JDETracker(object):\n __shared__ = ['num_classes']\n \"\"\"\n JDE tracker, support single class and multi classes\n\n Args:\n num_classes (int): the number of classes\n det_thresh (float): threshold of detection score\n track_buffer (int): buffer for tracker\n min_box_area (int): min box area to filter out low quality boxes\n vertical_ratio (float): w/h, the vertical ratio of the bbox to filter\n bad results. If set <0 means no need to filter bboxes,usually set\n 1.6 for pedestrian tracking.\n tracked_thresh (float): linear assignment threshold of tracked \n stracks and detections\n r_tracked_thresh (float): linear assignment threshold of \n tracked stracks and unmatched detections\n unconfirmed_thresh (float): linear assignment threshold of \n unconfirmed stracks and unmatched detections\n motion (str): motion model, KalmanFilter as default\n conf_thres (float): confidence threshold for tracking\n metric_type (str): either \"euclidean\" or \"cosine\", the distance metric \n used for measurement to track association.\n \"\"\"\n\n def __init__(self,\n use_byte=False,\n num_classes=1,\n det_thresh=0.3,\n track_buffer=30,\n min_box_area=200,\n vertical_ratio=1.6,\n tracked_thresh=0.7,\n r_tracked_thresh=0.5,\n unconfirmed_thresh=0.7,\n conf_thres=0,\n match_thres=0.8,\n low_conf_thres=0.2,\n motion='KalmanFilter',\n metric_type='euclidean'):\n self.use_byte = use_byte\n self.num_classes = num_classes\n self.det_thresh = det_thresh if not use_byte else conf_thres + 0.1\n self.track_buffer = track_buffer\n self.min_box_area = min_box_area\n self.vertical_ratio = vertical_ratio\n\n self.tracked_thresh = tracked_thresh\n self.r_tracked_thresh = r_tracked_thresh\n self.unconfirmed_thresh = unconfirmed_thresh\n self.conf_thres = conf_thres\n self.match_thres = match_thres\n self.low_conf_thres = low_conf_thres\n\n if motion == 'KalmanFilter':\n self.motion = KalmanFilter()\n self.metric_type = metric_type\n\n self.frame_id = 0\n self.tracked_tracks_dict = defaultdict(list) # dict(list[STrack])\n self.lost_tracks_dict = defaultdict(list) # dict(list[STrack])\n self.removed_tracks_dict = defaultdict(list) # dict(list[STrack])\n\n self.max_time_lost = 0\n # max_time_lost will be calculated: int(frame_rate / 30.0 * track_buffer)\n\n def update(self, pred_dets, pred_embs=None):\n \"\"\"\n Processes the image frame and finds bounding box(detections).\n Associates the detection with corresponding tracklets and also handles\n lost, removed, refound and active tracklets.\n\n Args:\n pred_dets (np.array): Detection results of the image, the shape is\n [N, 6], means 'cls_id, score, x0, y0, x1, y1'.\n pred_embs (np.array): Embedding results of the image, the shape is\n [N, 128] or [N, 512].\n\n Return:\n output_stracks_dict (dict(list)): The list contains information\n regarding the online_tracklets for the recieved image tensor.\n \"\"\"\n self.frame_id += 1\n if self.frame_id == 1:\n STrack.init_count(self.num_classes)\n activated_tracks_dict = defaultdict(list)\n refined_tracks_dict = defaultdict(list)\n lost_tracks_dict = defaultdict(list)\n removed_tracks_dict = defaultdict(list)\n output_tracks_dict = defaultdict(list)\n\n pred_dets_dict = defaultdict(list)\n pred_embs_dict = defaultdict(list)\n\n # unify single and multi classes detection and embedding results\n for cls_id in range(self.num_classes):\n cls_idx = (pred_dets[:, 0:1] == cls_id).squeeze(-1)\n pred_dets_dict[cls_id] = pred_dets[cls_idx]\n if pred_embs is not None:\n pred_embs_dict[cls_id] = pred_embs[cls_idx]\n else:\n pred_embs_dict[cls_id] = None\n\n for cls_id in range(self.num_classes):\n \"\"\" Step 1: Get detections by class\"\"\"\n pred_dets_cls = pred_dets_dict[cls_id]\n pred_embs_cls = pred_embs_dict[cls_id]\n remain_inds = (pred_dets_cls[:, 1:2] > self.conf_thres).squeeze(-1)\n if remain_inds.sum() > 0:\n pred_dets_cls = pred_dets_cls[remain_inds]\n if self.use_byte:\n detections = [\n STrack(\n STrack.tlbr_to_tlwh(tlbrs[2:6]),\n tlbrs[1],\n cls_id,\n 30,\n temp_feat=None) for tlbrs in pred_dets_cls\n ]\n else:\n pred_embs_cls = pred_embs_cls[remain_inds]\n detections = [\n STrack(\n STrack.tlbr_to_tlwh(tlbrs[2:6]), tlbrs[1], cls_id,\n 30, temp_feat)\n for (tlbrs, temp_feat\n ) in zip(pred_dets_cls, pred_embs_cls)\n ]\n else:\n detections = []\n ''' Add newly detected tracklets to tracked_stracks'''\n unconfirmed_dict = defaultdict(list)\n tracked_tracks_dict = defaultdict(list)\n for track in self.tracked_tracks_dict[cls_id]:\n if not track.is_activated:\n # previous tracks which are not active in the current frame are added in unconfirmed list\n unconfirmed_dict[cls_id].append(track)\n else:\n # Active tracks are added to the local list 'tracked_stracks'\n tracked_tracks_dict[cls_id].append(track)\n \"\"\" Step 2: First association, with embedding\"\"\"\n # building tracking pool for the current frame\n track_pool_dict = defaultdict(list)\n track_pool_dict[cls_id] = joint_stracks(\n tracked_tracks_dict[cls_id], self.lost_tracks_dict[cls_id])\n\n # Predict the current location with KalmanFilter\n STrack.multi_predict(track_pool_dict[cls_id], self.motion)\n\n if self.use_byte:\n dists = matching.iou_distance(track_pool_dict[cls_id],\n detections)\n matches, u_track, u_detection = matching.linear_assignment(\n dists, thresh=self.match_thres) # not self.tracked_thresh\n else:\n dists = matching.embedding_distance(\n track_pool_dict[cls_id],\n detections,\n metric=self.metric_type)\n dists = matching.fuse_motion(\n self.motion, dists, track_pool_dict[cls_id], detections)\n matches, u_track, u_detection = matching.linear_assignment(\n dists, thresh=self.tracked_thresh)\n\n for i_tracked, idet in matches:\n # i_tracked is the id of the track and idet is the detection\n track = track_pool_dict[cls_id][i_tracked]\n det = detections[idet]\n if track.state == TrackState.Tracked:\n # If the track is active, add the detection to the track\n track.update(detections[idet], self.frame_id)\n activated_tracks_dict[cls_id].append(track)\n else:\n # We have obtained a detection from a track which is not active,\n # hence put the track in refind_stracks list\n track.re_activate(det, self.frame_id, new_id=False)\n refined_tracks_dict[cls_id].append(track)\n\n # None of the steps below happen if there are no undetected tracks.\n \"\"\" Step 3: Second association, with IOU\"\"\"\n if self.use_byte:\n inds_low = pred_dets_dict[cls_id][:, 1:2] > self.low_conf_thres\n inds_high = pred_dets_dict[cls_id][:, 1:2] < self.conf_thres\n inds_second = np.logical_and(inds_low, inds_high).squeeze(-1)\n pred_dets_cls_second = pred_dets_dict[cls_id][inds_second]\n\n # association the untrack to the low score detections\n if len(pred_dets_cls_second) > 0:\n detections_second = [\n STrack(\n STrack.tlbr_to_tlwh(tlbrs[:4]),\n tlbrs[4],\n cls_id,\n 30,\n temp_feat=None)\n for tlbrs in pred_dets_cls_second[:, :5]\n ]\n else:\n detections_second = []\n r_tracked_stracks = [\n track_pool_dict[cls_id][i] for i in u_track\n if track_pool_dict[cls_id][i].state == TrackState.Tracked\n ]\n dists = matching.iou_distance(r_tracked_stracks,\n detections_second)\n matches, u_track, u_detection_second = matching.linear_assignment(\n dists, thresh=0.4) # not r_tracked_thresh\n else:\n detections = [detections[i] for i in u_detection]\n r_tracked_stracks = []\n for i in u_track:\n if track_pool_dict[cls_id][i].state == TrackState.Tracked:\n r_tracked_stracks.append(track_pool_dict[cls_id][i])\n dists = matching.iou_distance(r_tracked_stracks, detections)\n\n matches, u_track, u_detection = matching.linear_assignment(\n dists, thresh=self.r_tracked_thresh)\n\n for i_tracked, idet in matches:\n track = r_tracked_stracks[i_tracked]\n det = detections[\n idet] if not self.use_byte else detections_second[idet]\n if track.state == TrackState.Tracked:\n track.update(det, self.frame_id)\n activated_tracks_dict[cls_id].append(track)\n else:\n track.re_activate(det, self.frame_id, new_id=False)\n refined_tracks_dict[cls_id].append(track)\n\n for it in u_track:\n track = r_tracked_stracks[it]\n if not track.state == TrackState.Lost:\n track.mark_lost()\n lost_tracks_dict[cls_id].append(track)\n '''Deal with unconfirmed tracks, usually tracks with only one beginning frame'''\n detections = [detections[i] for i in u_detection]\n dists = matching.iou_distance(unconfirmed_dict[cls_id], detections)\n matches, u_unconfirmed, u_detection = matching.linear_assignment(\n dists, thresh=self.unconfirmed_thresh)\n for i_tracked, idet in matches:\n unconfirmed_dict[cls_id][i_tracked].update(detections[idet],\n self.frame_id)\n activated_tracks_dict[cls_id].append(unconfirmed_dict[cls_id][\n i_tracked])\n for it in u_unconfirmed:\n track = unconfirmed_dict[cls_id][it]\n track.mark_removed()\n removed_tracks_dict[cls_id].append(track)\n \"\"\" Step 4: Init new stracks\"\"\"\n for inew in u_detection:\n track = detections[inew]\n if track.score < self.det_thresh:\n continue\n track.activate(self.motion, self.frame_id)\n activated_tracks_dict[cls_id].append(track)\n \"\"\" Step 5: Update state\"\"\"\n for track in self.lost_tracks_dict[cls_id]:\n if self.frame_id - track.end_frame > self.max_time_lost:\n track.mark_removed()\n removed_tracks_dict[cls_id].append(track)\n\n self.tracked_tracks_dict[cls_id] = [\n t for t in self.tracked_tracks_dict[cls_id]\n if t.state == TrackState.Tracked\n ]\n self.tracked_tracks_dict[cls_id] = joint_stracks(\n self.tracked_tracks_dict[cls_id], activated_tracks_dict[cls_id])\n self.tracked_tracks_dict[cls_id] = joint_stracks(\n self.tracked_tracks_dict[cls_id], refined_tracks_dict[cls_id])\n self.lost_tracks_dict[cls_id] = sub_stracks(\n self.lost_tracks_dict[cls_id], self.tracked_tracks_dict[cls_id])\n self.lost_tracks_dict[cls_id].extend(lost_tracks_dict[cls_id])\n self.lost_tracks_dict[cls_id] = sub_stracks(\n self.lost_tracks_dict[cls_id], self.removed_tracks_dict[cls_id])\n self.removed_tracks_dict[cls_id].extend(removed_tracks_dict[cls_id])\n self.tracked_tracks_dict[cls_id], self.lost_tracks_dict[\n cls_id] = remove_duplicate_stracks(\n self.tracked_tracks_dict[cls_id],\n self.lost_tracks_dict[cls_id])\n\n # get scores of lost tracks\n output_tracks_dict[cls_id] = [\n track for track in self.tracked_tracks_dict[cls_id]\n if track.is_activated\n ]\n\n return output_tracks_dict\n"
] | [
[
"numpy.logical_and"
]
] |
tkhe/simple-mtcnn | [
"f39b66ec958efc745e1af8a4e0c65a63e0d4a6d8"
] | [
"tools/train_net.py"
] | [
"import argparse\nimport pprint\nimport sys\n\nimport torch\nimport torchvision.transforms as transforms\nfrom torch.utils.data import DataLoader\n\nfrom mtcnn.config import cfg\nfrom mtcnn.datasets.iteration_based_batch_sampler import build_batch_sampler\nfrom mtcnn.datasets.roidb import get_roidb\nfrom mtcnn.engine.trainer import do_train\nfrom mtcnn.modeling.model_builder import build_model\nfrom mtcnn.utils.logger import setup_logging\nfrom mtcnn.utils.lr_scheduler import make_optimizer\nfrom mtcnn.utils.lr_scheduler import make_scheduler\n\nlogger = setup_logging(__name__)\n\n\ndef train():\n model = build_model(cfg.MODEL.TYPE)\n device = torch.device(cfg.MODEL.DEVICE)\n model.to(device)\n\n optimizer = make_optimizer(cfg, model)\n scheduler = make_scheduler(cfg, optimizer)\n transform = transforms.ToTensor()\n\n roidb = get_roidb(transform=transform)\n batch_sampler = build_batch_sampler(\n roidb,\n cfg.TRAIN.BATCH_SIZE,\n shuffle=True\n )\n data_loader = DataLoader(roidb, batch_sampler=batch_sampler)\n\n do_train(model, data_loader, optimizer, scheduler, device)\n\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument(\n '--cfg',\n dest='cfg_file',\n default=None,\n type=str\n )\n if len(sys.argv) == 1:\n parser.print_help()\n sys.exit(1)\n\n return parser.parse_args()\n\n\ndef main():\n args = parse_args()\n logger.info('Called with args:')\n logger.info(pprint.pformat(args))\n if args.cfg_file:\n cfg.merge_from_file(args.cfg_file)\n logger.info('Using configs:')\n logger.info(pprint.pformat(cfg))\n\n train()\n\n\nif __name__ == '__main__':\n main()\n"
] | [
[
"torch.utils.data.DataLoader",
"torch.device"
]
] |
NegriLuca/pigasus | [
"d5057b771f81cfa05bb08ea4b0fd99088150cd7a"
] | [
"python/fem/norm.py"
] | [
"# -*- coding: UTF-8 -*-\n#! /usr/bin/python\n\n# To change this template, choose Tools | Templates\n# and open the template in the editor.\n\n__author__=\"ARA\"\n__all__ = ['norm']\n__date__ =\"$Feb 14, 2012 11:40:06 AM$\"\n\nfrom . import common_obj as _com\nfrom . import constants as _cst\nimport numpy as _np\nfrom .pigasusObject import *\n\nclass norm(pigasusObject):\n def __init__ ( self, field = None, type = None, func = None, paramevalfunc = False, exact = None ):\n pigasusObject.__init__(self)\n\n self.id = self.com.nnorms\n self.nparam = 0\n self.paramevalfunc = paramevalfunc\n\n if field is not None:\n self.field = field\n self.space = field.space\n self.loc_id = self.space.grids.add_norm_id(self)\n else:\n raise(\"You must give a field for the current norm\")\n\n if type is not None:\n self.type = type\n else:\n self.type = _cst.NORM_L2\n\n self._set_nparam()\n\n from .utils import function\n if func is not None:\n self.func = function(func, space=self.space)\n else:\n self.defaultFuncParam()\n\n if exact is not None:\n self.exact = function(exact, space=self.space)\n else:\n self.defaultFuncExact()\n\n # this must be the last thing to do\n self.com.nnorms += 1\n self.com.norms.append(self)\n\n def setInfoData(self):\n \"\"\"\n prints informations about the current norm\n \"\"\"\n self.infoData['id'] = str(self.id)\n self.infoData['field'] = str(self.field.id)\n self.infoData['space'] = str(self.space.id)\n self.infoData['loc_id'] = str(self.loc_id)\n self.infoData['nparam'] = str(self.nparam)\n self.infoData['paramevalfunc'] = str(self.paramevalfunc)\n self.infoData['type'] = str(self.type)\n\n def _getGlobalNorm(self):\n return self.com.pyfem.getglobalnorm ( self.id )\n\n def _getPatchNorm(self):\n li_npatchs = self.space.grids.npatchs\n return self.com.pyfem._getPatchNorm ( self.id, li_npatchs )\n\n def _getElementNorm(self, ai_patch):\n\n li_nel = self.space.grids.list_grid[ai_patch].nel\n return self.com.pyfem._getElementNorm ( self.id, ai_patch, li_nel)\n\n def get(self, type=0, ai_patch=None):\n \"\"\"\n returns values for a given type of norm\n type = 0 : for a global computation\n type = 1 : for a patch computation\n type = 2 : for an element computation\n \"\"\"\n if (type == 0) :\n return self._getGlobalNorm()\n if (type == 1) :\n return self._getPatchNorm()\n if (type == 2) and (ai_patch is not None):\n return self._getElementNorm(ai_patch)\n\n def setEvalNorm(self, ai_patch=0, fields=[], funcs=[]):\n \"\"\"\n fields is a list of fields\n funcs is a list of functions\n \"\"\"\n lpr_pts = self.space.get_points(ai_patch)\n list_pts = []\n for i in range(0, self.space.dim):\n list_pts.append(lpr_pts[i,0,:])\n lpr_pts = list_pts\n\n li_dim = self.space.dim\n if li_dim not in [2]:\n print(\"setEvalNorm: Not yet implemetend for the desired dimension\")\n\n lpi_shape = lpr_pts.shape[0:-1]\n lpr_val = _np.zeros((1,lpi_shape[0],lpi_shape[1]))\n for F in fields:\n lpr_f = F.eval(ai_patch, elts)[ai_patch,:,:]\n lpr_val[0,:,:] += lpr_f[:,:]\n for func in funcs:\n lpr_f = _np.zeros(lpr_pts.shape[0:-1])\n for (i,list_p) in enumerate(lpr_pts):\n for (j,p) in enumerate(list_p):\n lpr_f[i,j] =func (p[0], p[1])[0]\n lpr_val[0,:,:] += lpr_f[:,:]\n self.com.pyfem.set_field_on_grids(self.field.id, ai_patch, lpr_val)\n\n def _set_nparam(self):\n\n if ( self.type in [ _cst.NORM_L2 ] ):\n self.nparam = 1\n return\n if ( self.type in [ _cst.NORM_H1 ] ):\n li_dim = self.space.dim\n self.nparam = li_dim**2\n return\n else :\n print(\"NORM-_set_nparam : type not implemented yet\")\n import sys; sys.exit(1)\n\n def evalfunc(self, ai_patch, apr_points, elts=None, type=\"param\"):\n \"\"\"\n Evaluation of the param-function over a given list of points\n \"\"\"\n if not self.paramevalfunc :\n lpr_val = self._evalfunc_std(ai_patch, apr_points, elts, type)\n else:\n lpr_parampts = self.space.get_parametricPoints(ai_patch_id=ai_patch)\n lpr_val = self._evalfunc_std(ai_patch, lpr_parampts, elts, type)\n return lpr_val\n\n def _evalfunc_std(self, ai_patch, apr_points, elts, type):\n \"\"\"\n sequential version of the evaluation\n \"\"\"\n if type == \"param\":\n# print \"==== param evaluation\"\n return self.func(apr_points)\n if type == \"exact\":\n# print \"==== exact evaluation\"\n return self.exact(apr_points)\n\n def defaultFuncParam(self):\n li_dim = self.space.dim\n\n if ( self.type in [ _cst.NORM_L2 ] ):\n if li_dim == 1:\n func = lambda x : [1.0]\n if li_dim == 2:\n func = lambda x,y : [1.0]\n if li_dim == 3:\n func = lambda x,y,z : [1.0]\n elif ( self.type in [ _cst.NORM_H1 ] ):\n if li_dim == 1:\n func = lambda x : [1.0]\n if li_dim == 2:\n func = lambda x,y : [1.0, 0.0, 0.0, 1.0]\n if li_dim == 3:\n func = lambda x,y,z : [1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0]\n else :\n print(\"NORM-defaultFuncParam : type not implemented yet\")\n import sys; sys.exit(1)\n\n from .utils import function\n self.func = function(func, space=self.space)\n\n def defaultFuncExact(self):\n li_dim = self.space.dim\n\n if li_dim == 1:\n func = lambda x : [0.0] * self.field.ndof\n elif li_dim == 2:\n func = lambda x,y : [0.0] * self.field.ndof\n elif li_dim == 3:\n func = lambda x,y,z : [0.0] * self.field.ndof\n else :\n raise(\"type not implemented yet\")\n\n from .utils import function\n self.exact = function(exact, space=self.space)\n\n\n def set_func(self, exact):\n \"\"\"\n this sets the param-function of the current field\n \"\"\"\n from .utils import function\n self.exact = function(exact, space=self.space)\n"
] | [
[
"numpy.zeros"
]
] |
iacolippo/octconv-pytorch | [
"032641413f1e8ece2893118e13cd1815d71ce0a9"
] | [
"octconv.py"
] | [
"import numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\nclass OctConv(nn.Module):\n def __init__(self, ch_in, ch_out, kernel_size, stride=1, alphas=(0.5, 0.5)):\n super(OctConv, self).__init__()\n self.alpha_in, self.alpha_out = alphas\n assert 0 <= self.alpha_in <= 1 and 0 <= self.alpha_in <= 1, \"Alphas must be in interval [0, 1]\"\n\n # CH IN\n self.ch_in_hf = int((1 - self.alpha_in) * ch_in)\n self.ch_in_lf = ch_in - self.ch_in_hf\n\n # CH OUT\n self.ch_out_hf = int((1 - self.alpha_out) * ch_out)\n self.ch_out_lf = ch_out - self.ch_out_hf\n\n # FILTERS\n self.wHtoH = nn.Parameter(torch.randn(self.ch_out_hf, self.ch_in_hf, kernel_size, kernel_size))\n self.wHtoL = nn.Parameter(torch.randn(self.ch_out_lf, self.ch_in_hf, kernel_size, kernel_size))\n self.wLtoH = nn.Parameter(torch.randn(self.ch_out_hf, self.ch_in_lf, kernel_size, kernel_size))\n self.wLtoL = nn.Parameter(torch.randn(self.ch_out_lf, self.ch_in_lf, kernel_size, kernel_size))\n\n # PADDING: (H - F + 2P)/S + 1 = 2 * [(0.5 H - F + 2P)/S +1] -> P = (F-S)/2\n self.padding = (kernel_size - stride) // 2\n\n def forward(self, input):\n # logic to handle input tensors:\n # if alpha_in = 0., we assume to be at the first layer, with only high freq repr\n if self.alpha_in == 0:\n hf_input = input\n lf_input = torch.Tensor([]).reshape(1, 0)\n else:\n fmap_size = input.shape[-1]\n hf_input = input[:, :self.ch_in_hf * 4, ...].reshape(-1, self.ch_in_hf, fmap_size * 2, fmap_size * 2)\n lf_input = input[:, self.ch_in_hf * 4:, ...]\n\n HtoH = HtoL = LtoL = LtoH = 0.\n if self.alpha_in < 1:\n # if alpha < 1 there is high freq component\n if self.ch_out_hf > 0:\n HtoH = F.conv2d(hf_input, self.wHtoH, padding=self.padding)\n if self.ch_out_lf > 0:\n HtoL = F.conv2d(F.avg_pool2d(hf_input, 2), self.wHtoL, padding=self.padding)\n if self.alpha_in > 0:\n # if alpha > 0 there is low freq component\n if self.ch_out_hf > 0:\n LtoH = F.interpolate(F.conv2d(lf_input, self.wLtoH, padding=self.padding),\n scale_factor=2, mode='nearest')\n if self.ch_out_lf > 0:\n LtoL = F.conv2d(lf_input, self.wLtoL, padding=self.padding)\n\n hf_output = HtoH + LtoH\n lf_output = LtoL + HtoL\n if 0 < self.alpha_out < 1:\n # if alpha in (0, 1)\n fmap_size = hf_output.shape[-1] // 2\n hf_output = hf_output.reshape(-1, 4 * self.ch_out_hf, fmap_size, fmap_size)\n output = torch.cat([hf_output, lf_output], dim=1) # cat over channel dim\n elif np.isclose(self.alpha_out, 1., atol=1e-8):\n # if only low req (alpha_out = 1.)\n output = lf_output\n elif np.isclose(self.alpha_out, 0., atol=1e-8):\n # if only high freq (alpha_out = 0.)\n output = hf_output\n return output\n\n\noc = OctConv(ch_in=3, ch_out=3, kernel_size=3, alphas=(0., 0.5))\noc1 = OctConv(ch_in=3, ch_out=10, kernel_size=7, alphas=(0.5, 0.8))\noc2 = OctConv(ch_in=10, ch_out=1, kernel_size=3, alphas=(0.8, 0.))\nout = oc2(oc1(oc(torch.randn(2, 3, 32, 32))))\nprint(out.shape)\n"
] | [
[
"torch.nn.functional.avg_pool2d",
"torch.randn",
"torch.nn.functional.conv2d",
"numpy.isclose",
"torch.cat",
"torch.Tensor"
]
] |
you74674/pytorch | [
"06838ce8b16b2cc2f9e903f3ebdd46659a0e66bb"
] | [
"test/fx2trt/converters/acc_op/test_reshape.py"
] | [
"# Owner(s): [\"oncall: fx\"]\n\nimport torch\nimport torch.fx.experimental.fx_acc.acc_ops as acc_ops\nfrom torch.testing._internal.common_fx2trt import AccTestCase, InputTensorSpec\nfrom parameterized import parameterized\nfrom torch.testing._internal.common_utils import run_tests\n\n\nclass TestReshapeConverter(AccTestCase):\n @parameterized.expand(\n [\n ((1, 20),),\n ((1, 10, -1),),\n ]\n )\n def test_reshape(self, target_shape):\n class TestModule(torch.nn.Module):\n def __init__(self, target_shape):\n super().__init__()\n self.target_shape = target_shape\n\n def forward(self, x):\n return torch.reshape(x, self.target_shape)\n\n inputs = [torch.randn(1, 2, 10)]\n self.run_test(TestModule(target_shape), inputs, expected_ops={acc_ops.reshape})\n\n @parameterized.expand(\n [\n ((-1, 2),),\n ((1, 2, -1),),\n ]\n )\n def test_reshape_with_dynamic_shape(self, target_shape):\n class TestModule(torch.nn.Module):\n def __init__(self, target_shape):\n super().__init__()\n self.target_shape = target_shape\n\n def forward(self, x):\n return torch.reshape(x, self.target_shape)\n\n input_specs = [\n InputTensorSpec(\n shape=(-1, -1, -1),\n dtype=torch.float32,\n shape_ranges=[((1, 1, 1), (1, 2, 3), (3, 3, 3))],\n ),\n ]\n self.run_test_with_dynamic_shape(\n TestModule(target_shape), input_specs, expected_ops={acc_ops.reshape}\n )\n\nif __name__ == '__main__':\n run_tests()\n"
] | [
[
"torch.randn",
"torch.testing._internal.common_utils.run_tests",
"torch.reshape",
"torch.testing._internal.common_fx2trt.InputTensorSpec"
]
] |
joybanerjee08/imgaug | [
"e9d3515b52f2205cee1d3c9a913fcc638d15993b"
] | [
"test/augmenters/test_blur.py"
] | [
"from __future__ import print_function, division, absolute_import\n\nimport time\n\nimport matplotlib\nmatplotlib.use('Agg') # fix execution of tests involving matplotlib on travis\nimport numpy as np\nimport six.moves as sm\nimport cv2\nfrom scipy import ndimage\n\nimport imgaug as ia\nfrom imgaug import augmenters as iaa\nfrom imgaug import parameters as iap\nfrom imgaug.testutils import keypoints_equal, reseed\nfrom imgaug.augmenters import meta\n\n\ndef main():\n time_start = time.time()\n\n test_GaussianBlur()\n test_AverageBlur()\n test_MedianBlur()\n # TODO BilateralBlur\n\n time_end = time.time()\n print(\"<%s> Finished without errors in %.4fs.\" % (__file__, time_end - time_start,))\n\n\ndef test_GaussianBlur():\n reseed()\n\n base_img = np.array([[0, 0, 0],\n [0, 255, 0],\n [0, 0, 0]], dtype=np.uint8)\n base_img = base_img[:, :, np.newaxis]\n\n images = np.array([base_img])\n images_list = [base_img]\n outer_pixels = ([], [])\n for i in sm.xrange(base_img.shape[0]):\n for j in sm.xrange(base_img.shape[1]):\n if i != j:\n outer_pixels[0].append(i)\n outer_pixels[1].append(j)\n\n keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=0, y=0), ia.Keypoint(x=1, y=1),\n ia.Keypoint(x=2, y=2)], shape=base_img.shape)]\n\n # no blur, shouldnt change anything\n aug = iaa.GaussianBlur(sigma=0)\n\n observed = aug.augment_images(images)\n expected = images\n assert np.array_equal(observed, expected)\n\n # weak blur of center pixel\n aug = iaa.GaussianBlur(sigma=0.5)\n aug_det = aug.to_deterministic()\n\n # images as numpy array\n observed = aug.augment_images(images)\n assert 100 < observed[0][1, 1] < 255\n assert (observed[0][outer_pixels[0], outer_pixels[1]] > 0).all()\n assert (observed[0][outer_pixels[0], outer_pixels[1]] < 50).all()\n\n observed = aug_det.augment_images(images)\n assert 100 < observed[0][1, 1] < 255\n assert (observed[0][outer_pixels[0], outer_pixels[1]] > 0).all()\n assert (observed[0][outer_pixels[0], outer_pixels[1]] < 50).all()\n\n # images as list\n observed = aug.augment_images(images_list)\n assert 100 < observed[0][1, 1] < 255\n assert (observed[0][outer_pixels[0], outer_pixels[1]] > 0).all()\n assert (observed[0][outer_pixels[0], outer_pixels[1]] < 50).all()\n\n observed = aug_det.augment_images(images_list)\n assert 100 < observed[0][1, 1] < 255\n assert (observed[0][outer_pixels[0], outer_pixels[1]] > 0).all()\n assert (observed[0][outer_pixels[0], outer_pixels[1]] < 50).all()\n\n # keypoints shouldnt be changed\n observed = aug.augment_keypoints(keypoints)\n expected = keypoints\n assert keypoints_equal(observed, expected)\n\n observed = aug_det.augment_keypoints(keypoints)\n expected = keypoints\n assert keypoints_equal(observed, expected)\n\n # varying blur sigmas\n aug = iaa.GaussianBlur(sigma=(0, 1))\n aug_det = aug.to_deterministic()\n\n last_aug = None\n last_aug_det = None\n nb_changed_aug = 0\n nb_changed_aug_det = 0\n nb_iterations = 1000\n for i in sm.xrange(nb_iterations):\n observed_aug = aug.augment_images(images)\n observed_aug_det = aug_det.augment_images(images)\n if i == 0:\n last_aug = observed_aug\n last_aug_det = observed_aug_det\n else:\n if not np.array_equal(observed_aug, last_aug):\n nb_changed_aug += 1\n if not np.array_equal(observed_aug_det, last_aug_det):\n nb_changed_aug_det += 1\n last_aug = observed_aug\n last_aug_det = observed_aug_det\n assert nb_changed_aug >= int(nb_iterations * 0.8)\n assert nb_changed_aug_det == 0\n\n #############################\n # test other dtypes below\n # ndimage.gaussian_filter() rejects: float16\n # float64 implementation in gaussian_filter() was too inaccurate\n #############################\n\n # --\n # blur of various dtypes at sigma=0\n # --\n aug = iaa.GaussianBlur(sigma=0)\n\n # bool\n image = np.zeros((3, 3), dtype=bool)\n image[1, 1] = True\n image_aug = aug.augment_image(image)\n assert image_aug.dtype.type == np.bool_\n assert np.all(image_aug == image)\n\n # uint, int\n for dtype in [np.uint8, np.uint16, np.uint32, np.int8, np.int16, np.int32]:\n _min_value, center_value, _max_value = meta.get_value_range_of_dtype(dtype)\n image = np.zeros((3, 3), dtype=dtype)\n image[1, 1] = int(center_value)\n image_aug = aug.augment_image(image)\n assert image_aug.dtype.type == dtype\n assert np.all(image_aug == image)\n\n # float\n for dtype in [np.float16, np.float32, np.float64]:\n _min_value, center_value, _max_value = meta.get_value_range_of_dtype(dtype)\n image = np.zeros((3, 3), dtype=dtype)\n image[1, 1] = center_value\n image_aug = aug.augment_image(image)\n assert image_aug.dtype.type == dtype\n assert np.allclose(image_aug, image)\n\n # --\n # blur of various dtypes at sigma=1.0\n # and using an example value of 100 for int/uint/float and True for bool\n # --\n aug = iaa.GaussianBlur(sigma=1.0)\n\n # prototype kernel, generated via:\n # mask = np.zeros((3, 3), dtype=np.float64)\n # mask[1, 1] = 1.0\n # mask = ndimage.gaussian_filter(mask, 1.0)\n kernel = np.float64([\n [0.08767308, 0.12075024, 0.08767308],\n [0.12075024, 0.16630671, 0.12075024],\n [0.08767308, 0.12075024, 0.08767308]\n ])\n\n # bool\n image = np.zeros((3, 3), dtype=bool)\n image[1, 1] = True\n image_aug = aug.augment_image(image)\n expected = kernel > 0.5\n assert image_aug.dtype.type == np.bool_\n assert np.all(image_aug == expected)\n\n # uint, int\n for dtype in [np.uint8, np.uint16, np.uint32, np.int8, np.int16, np.int32]:\n image = np.zeros((3, 3), dtype=dtype)\n image[1, 1] = 100\n image_aug = aug.augment_image(image)\n expected = (kernel * 100).astype(dtype)\n diff = np.abs(image_aug.astype(np.int64) - expected.astype(np.int64))\n assert image_aug.dtype.type == dtype\n assert np.max(diff) <= 2\n\n # float\n for dtype in [np.float16, np.float32, np.float64]:\n image = np.zeros((3, 3), dtype=dtype)\n image[1, 1] = 100.0\n image_aug = aug.augment_image(image)\n expected = (kernel * 100.0).astype(dtype)\n diff = np.abs(image_aug.astype(np.float128) - expected.astype(np.float128))\n assert image_aug.dtype.type == dtype\n assert np.max(diff) < 1.0\n\n # --\n # blur of various dtypes at sigma=0.4\n # and using an example value of 100 for int/uint/float and True for bool\n # --\n aug = iaa.GaussianBlur(sigma=0.4)\n\n # prototype kernel, generated via:\n # mask = np.zeros((3, 3), dtype=np.float64)\n # mask[1, 1] = 1.0\n # kernel = ndimage.gaussian_filter(mask, 0.4)\n kernel = np.float64([\n [0.00163144, 0.03712817, 0.00163144],\n [0.03712817, 0.84496158, 0.03712817],\n [0.00163144, 0.03712817, 0.00163144]\n ])\n\n # bool\n image = np.zeros((3, 3), dtype=bool)\n image[1, 1] = True\n image_aug = aug.augment_image(image)\n expected = kernel > 0.5\n assert image_aug.dtype.type == np.bool_\n assert np.all(image_aug == expected)\n\n # uint, int\n for dtype in [np.uint8, np.uint16, np.uint32, np.int8, np.int16, np.int32]:\n image = np.zeros((3, 3), dtype=dtype)\n image[1, 1] = 100\n image_aug = aug.augment_image(image)\n expected = (kernel * 100).astype(dtype)\n diff = np.abs(image_aug.astype(np.int64) - expected.astype(np.int64))\n assert image_aug.dtype.type == dtype\n assert np.max(diff) <= 2\n\n # float\n for dtype in [np.float16, np.float32, np.float64]:\n image = np.zeros((3, 3), dtype=dtype)\n image[1, 1] = 100.0\n image_aug = aug.augment_image(image)\n expected = (kernel * 100.0).astype(dtype)\n diff = np.abs(image_aug.astype(np.float128) - expected.astype(np.float128))\n assert image_aug.dtype.type == dtype\n assert np.max(diff) < 1.0\n\n # --\n # blur of various dtypes at sigma=0.75\n # and values being half-way between center and maximum for each dtype (bool is skipped as it doesnt make any\n # sense here)\n # The goal of this test is to verify that no major loss of resolution happens for large dtypes.\n # Such inaccuracies appear for float64 if used.\n # --\n aug = iaa.GaussianBlur(sigma=0.75)\n\n # prototype kernel, generated via:\n # mask = np.zeros((3, 3), dtype=np.float64)\n # mask[1, 1] = 1.0\n # kernel = ndimage.gaussian_filter(mask, 0.75)\n kernel = np.float64([\n [0.05469418, 0.12447951, 0.05469418],\n [0.12447951, 0.28330525, 0.12447951],\n [0.05469418, 0.12447951, 0.05469418]\n ])\n\n # uint, int\n for dtype in [np.uint8, np.uint16, np.uint32, np.int8, np.int16, np.int32]:\n _min_value, center_value, max_value = meta.get_value_range_of_dtype(dtype)\n value = int(center_value + 0.4 * max_value)\n image = np.zeros((3, 3), dtype=dtype)\n image[1, 1] = value\n image_aug = aug.augment_image(image)\n expected = (kernel * value).astype(dtype)\n diff = np.abs(image_aug.astype(np.int64) - expected.astype(np.int64))\n assert image_aug.dtype.type == dtype\n # accepts difference of 4, 8, 16 (at 1, 2, 4 bytes, i.e. 8, 16, 32 bit)\n assert np.max(diff) <= 2**(1 + np.dtype(dtype).itemsize)\n\n # float\n for dtype, value in zip([np.float16, np.float32, np.float64], [5000, 1000*1000, 1000*1000*1000]):\n image = np.zeros((3, 3), dtype=dtype)\n image[1, 1] = value\n image_aug = aug.augment_image(image)\n expected = (kernel * value).astype(dtype)\n diff = np.abs(image_aug.astype(np.float128) - expected.astype(np.float128))\n assert image_aug.dtype.type == dtype\n # accepts difference of 2.0, 4.0, 8.0, 16.0 (at 1, 2, 4, 8 bytes, i.e. 8, 16, 32, 64 bit)\n assert np.max(diff) < 2**(1 + np.dtype(dtype).itemsize)\n\n # assert failure on invalid dtypes\n aug = iaa.GaussianBlur(sigma=1.0)\n for dt in [np.uint64, np.int64, np.float128]:\n got_exception = False\n try:\n _ = aug.augment_image(np.zeros((1, 1), dtype=dt))\n except Exception as exc:\n assert \"forbidden dtype\" in str(exc)\n got_exception = True\n assert got_exception\n\n\ndef test_AverageBlur():\n reseed()\n\n base_img = np.zeros((11, 11, 1), dtype=np.uint8)\n base_img[5, 5, 0] = 200\n base_img[4, 5, 0] = 100\n base_img[6, 5, 0] = 100\n base_img[5, 4, 0] = 100\n base_img[5, 6, 0] = 100\n\n blur3x3 = [\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 11, 11, 11, 0, 0, 0, 0],\n [0, 0, 0, 11, 44, 56, 44, 11, 0, 0, 0],\n [0, 0, 0, 11, 56, 67, 56, 11, 0, 0, 0],\n [0, 0, 0, 11, 44, 56, 44, 11, 0, 0, 0],\n [0, 0, 0, 0, 11, 11, 11, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n ]\n blur3x3 = np.array(blur3x3, dtype=np.uint8)[..., np.newaxis]\n\n blur4x4 = [\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 6, 6, 6, 6, 0, 0, 0],\n [0, 0, 0, 6, 25, 31, 31, 25, 6, 0, 0],\n [0, 0, 0, 6, 31, 38, 38, 31, 6, 0, 0],\n [0, 0, 0, 6, 31, 38, 38, 31, 6, 0, 0],\n [0, 0, 0, 6, 25, 31, 31, 25, 6, 0, 0],\n [0, 0, 0, 0, 6, 6, 6, 6, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n ]\n blur4x4 = np.array(blur4x4, dtype=np.uint8)[..., np.newaxis]\n\n blur5x5 = [\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 4, 4, 4, 4, 4, 0, 0, 0],\n [0, 0, 4, 16, 20, 20, 20, 16, 4, 0, 0],\n [0, 0, 4, 20, 24, 24, 24, 20, 4, 0, 0],\n [0, 0, 4, 20, 24, 24, 24, 20, 4, 0, 0],\n [0, 0, 4, 20, 24, 24, 24, 20, 4, 0, 0],\n [0, 0, 4, 16, 20, 20, 20, 16, 4, 0, 0],\n [0, 0, 0, 4, 4, 4, 4, 4, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n ]\n blur5x5 = np.array(blur5x5, dtype=np.uint8)[..., np.newaxis]\n\n keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=0, y=0), ia.Keypoint(x=1, y=1),\n ia.Keypoint(x=2, y=2)], shape=base_img.shape)]\n\n # no blur, shouldnt change anything\n aug = iaa.AverageBlur(k=0)\n observed = aug.augment_image(base_img)\n assert np.array_equal(observed, base_img)\n\n # k=3\n aug = iaa.AverageBlur(k=3)\n observed = aug.augment_image(base_img)\n assert np.array_equal(observed, blur3x3)\n\n # k=5\n aug = iaa.AverageBlur(k=5)\n observed = aug.augment_image(base_img)\n assert np.array_equal(observed, blur5x5)\n\n # k as (3, 4)\n aug = iaa.AverageBlur(k=(3, 4))\n nb_iterations = 100\n nb_seen = [0, 0]\n for i in sm.xrange(nb_iterations):\n observed = aug.augment_image(base_img)\n if np.array_equal(observed, blur3x3):\n nb_seen[0] += 1\n elif np.array_equal(observed, blur4x4):\n nb_seen[1] += 1\n else:\n raise Exception(\"Unexpected result in AverageBlur@1\")\n p_seen = [v/nb_iterations for v in nb_seen]\n assert 0.4 <= p_seen[0] <= 0.6\n assert 0.4 <= p_seen[1] <= 0.6\n\n # k as (3, 5)\n aug = iaa.AverageBlur(k=(3, 5))\n nb_iterations = 100\n nb_seen = [0, 0, 0]\n for i in sm.xrange(nb_iterations):\n observed = aug.augment_image(base_img)\n if np.array_equal(observed, blur3x3):\n nb_seen[0] += 1\n elif np.array_equal(observed, blur4x4):\n nb_seen[1] += 1\n elif np.array_equal(observed, blur5x5):\n nb_seen[2] += 1\n else:\n raise Exception(\"Unexpected result in AverageBlur@2\")\n p_seen = [v/nb_iterations for v in nb_seen]\n assert 0.23 <= p_seen[0] <= 0.43\n assert 0.23 <= p_seen[1] <= 0.43\n assert 0.23 <= p_seen[2] <= 0.43\n\n # k as stochastic parameter\n aug = iaa.AverageBlur(k=iap.Choice([3, 5]))\n nb_iterations = 100\n nb_seen = [0, 0]\n for i in sm.xrange(nb_iterations):\n observed = aug.augment_image(base_img)\n if np.array_equal(observed, blur3x3):\n nb_seen[0] += 1\n elif np.array_equal(observed, blur5x5):\n nb_seen[1] += 1\n else:\n raise Exception(\"Unexpected result in AverageBlur@3\")\n p_seen = [v/nb_iterations for v in nb_seen]\n assert 0.4 <= p_seen[0] <= 0.6\n assert 0.4 <= p_seen[1] <= 0.6\n\n # k as ((3, 5), (3, 5))\n aug = iaa.AverageBlur(k=((3, 5), (3, 5)))\n\n possible = dict()\n for kh in [3, 4, 5]:\n for kw in [3, 4, 5]:\n key = (kh, kw)\n if kh == 0 or kw == 0:\n possible[key] = np.copy(base_img)\n else:\n possible[key] = cv2.blur(base_img, (kh, kw))[..., np.newaxis]\n\n nb_iterations = 250\n nb_seen = dict([(key, 0) for key, val in possible.items()])\n for i in sm.xrange(nb_iterations):\n observed = aug.augment_image(base_img)\n for key, img_aug in possible.items():\n if np.array_equal(observed, img_aug):\n nb_seen[key] += 1\n # dont check sum here, because 0xX and Xx0 are all the same, i.e. much\n # higher sum than nb_iterations\n assert all([v > 0 for v in nb_seen.values()])\n\n # keypoints shouldnt be changed\n aug = iaa.AverageBlur(k=3)\n aug_det = aug.to_deterministic()\n observed = aug.augment_keypoints(keypoints)\n expected = keypoints\n assert keypoints_equal(observed, expected)\n\n observed = aug_det.augment_keypoints(keypoints)\n expected = keypoints\n assert keypoints_equal(observed, expected)\n\n #############################\n # test other dtypes below\n #############################\n\n # --\n # blur of various dtypes at k=0\n # --\n aug = iaa.AverageBlur(k=0)\n\n # bool\n image = np.zeros((3, 3), dtype=bool)\n image[1, 1] = True\n image[2, 2] = True\n image_aug = aug.augment_image(image)\n assert image_aug.dtype.type == np.bool_\n assert np.all(image_aug == image)\n\n # uint, int\n for dtype in [np.uint8, np.uint16, np.int8, np.int16]:\n _min_value, center_value, max_value = meta.get_value_range_of_dtype(dtype)\n image = np.zeros((3, 3), dtype=dtype)\n image[1, 1] = int(center_value + 0.4 * max_value)\n image[2, 2] = int(center_value + 0.4 * max_value)\n image_aug = aug.augment_image(image)\n assert image_aug.dtype.type == dtype\n assert np.all(image_aug == image)\n\n # float\n for dtype, value in zip([np.float16, np.float32, np.float64], [5000, 1000*1000, 1000*1000*1000]):\n image = np.zeros((3, 3), dtype=dtype)\n image[1, 1] = value\n image[2, 2] = value\n image_aug = aug.augment_image(image)\n assert image_aug.dtype.type == dtype\n assert np.allclose(image_aug, image)\n\n # --\n # blur of various dtypes at k=3\n # and using an example value of 100 for int/uint/float and True for bool\n # --\n aug = iaa.AverageBlur(k=3)\n\n # prototype mask\n # we place values in a 3x3 grid at positions (row=1, col=1) and (row=2, col=2) (beginning with 0)\n # AverageBlur uses cv2.blur(), which uses BORDER_REFLECT_101 as its default padding mode,\n # see https://docs.opencv.org/3.1.0/d2/de8/group__core__array.html\n # the matrix below shows the 3x3 grid and the padded row/col values around it\n # [1, 0, 1, 0, 1]\n # [0, 0, 0, 0, 0]\n # [1, 0, 1, 0, 1]\n # [0, 0, 0, 1, 0]\n # [1, 0, 1, 0, 1]\n mask = np.float64([\n [4/9, 2/9, 4/9],\n [2/9, 2/9, 3/9],\n [4/9, 3/9, 5/9]\n ])\n\n # bool\n image = np.zeros((3, 3), dtype=bool)\n image[1, 1] = True\n image[2, 2] = True\n image_aug = aug.augment_image(image)\n expected = mask > 0.5\n assert image_aug.dtype.type == np.bool_\n assert np.all(image_aug == expected)\n\n # uint, int\n for dtype in [np.uint8, np.uint16, np.int8, np.int16]:\n image = np.zeros((3, 3), dtype=dtype)\n image[1, 1] = 100\n image[2, 2] = 100\n image_aug = aug.augment_image(image)\n expected = np.round(mask * 100).astype(dtype) # cv2.blur() applies rounding for int/uint dtypes\n diff = np.abs(image_aug.astype(np.int64) - expected.astype(np.int64))\n assert image_aug.dtype.type == dtype\n assert np.max(diff) <= 2\n\n # float\n for dtype in [np.float16, np.float32, np.float64]:\n image = np.zeros((3, 3), dtype=dtype)\n image[1, 1] = 100.0\n image[2, 2] = 100.0\n image_aug = aug.augment_image(image)\n expected = (mask * 100.0).astype(dtype)\n diff = np.abs(image_aug.astype(np.float128) - expected.astype(np.float128))\n assert image_aug.dtype.type == dtype\n assert np.max(diff) < 1.0\n\n # --\n # blur of various dtypes at k=3\n # and values being half-way between center and maximum for each dtype (bool is skipped as it doesnt make any\n # sense here)\n # The goal of this test is to verify that no major loss of resolution happens for large dtypes.\n # --\n aug = iaa.AverageBlur(k=3)\n\n # prototype mask (see above)\n mask = np.float64([\n [4/9, 2/9, 4/9],\n [2/9, 2/9, 3/9],\n [4/9, 3/9, 5/9]\n ])\n\n # uint, int\n for dtype in [np.uint8, np.uint16, np.int8, np.int16]:\n _min_value, center_value, max_value = meta.get_value_range_of_dtype(dtype)\n value = int(center_value + 0.4 * max_value)\n image = np.zeros((3, 3), dtype=dtype)\n image[1, 1] = value\n image[2, 2] = value\n image_aug = aug.augment_image(image)\n expected = (mask * value).astype(dtype)\n diff = np.abs(image_aug.astype(np.int64) - expected.astype(np.int64))\n assert image_aug.dtype.type == dtype\n # accepts difference of 4, 8, 16 (at 1, 2, 4 bytes, i.e. 8, 16, 32 bit)\n assert np.max(diff) <= 2**(1 + np.dtype(dtype).itemsize)\n\n # float\n for dtype, value in zip([np.float16, np.float32, np.float64], [5000, 1000*1000, 1000*1000*1000]):\n image = np.zeros((3, 3), dtype=dtype)\n image[1, 1] = value\n image[2, 2] = value\n image_aug = aug.augment_image(image)\n expected = (mask * value).astype(dtype)\n diff = np.abs(image_aug.astype(np.float128) - expected.astype(np.float128))\n assert image_aug.dtype.type == dtype\n # accepts difference of 2.0, 4.0, 8.0, 16.0 (at 1, 2, 4, 8 bytes, i.e. 8, 16, 32, 64 bit)\n assert np.max(diff) < 2**(1 + np.dtype(dtype).itemsize)\n\n # assert failure on invalid dtypes\n aug = iaa.AverageBlur(k=3)\n for dt in [np.uint32, np.uint64, np.int32, np.int64]:\n got_exception = False\n try:\n _ = aug.augment_image(np.zeros((1, 1), dtype=dt))\n except Exception as exc:\n assert \"forbidden dtype\" in str(exc)\n got_exception = True\n assert got_exception\n\n\ndef test_MedianBlur():\n reseed()\n\n base_img = np.zeros((11, 11, 1), dtype=np.uint8)\n base_img[3:8, 3:8, 0] = 1\n base_img[4:7, 4:7, 0] = 2\n base_img[5:6, 5:6, 0] = 3\n\n blur3x3 = np.zeros_like(base_img)\n blur3x3[3:8, 3:8, 0] = 1\n blur3x3[4:7, 4:7, 0] = 2\n blur3x3[4, 4, 0] = 1\n blur3x3[4, 6, 0] = 1\n blur3x3[6, 4, 0] = 1\n blur3x3[6, 6, 0] = 1\n blur3x3[3, 3, 0] = 0\n blur3x3[3, 7, 0] = 0\n blur3x3[7, 3, 0] = 0\n blur3x3[7, 7, 0] = 0\n\n blur5x5 = np.copy(blur3x3)\n blur5x5[4, 3, 0] = 0\n blur5x5[3, 4, 0] = 0\n blur5x5[6, 3, 0] = 0\n blur5x5[7, 4, 0] = 0\n blur5x5[4, 7, 0] = 0\n blur5x5[3, 6, 0] = 0\n blur5x5[6, 7, 0] = 0\n blur5x5[7, 6, 0] = 0\n blur5x5[blur5x5 > 1] = 1\n\n keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=0, y=0), ia.Keypoint(x=1, y=1),\n ia.Keypoint(x=2, y=2)], shape=base_img.shape)]\n\n # no blur, shouldnt change anything\n aug = iaa.MedianBlur(k=1)\n observed = aug.augment_image(base_img)\n assert np.array_equal(observed, base_img)\n\n # k=3\n aug = iaa.MedianBlur(k=3)\n observed = aug.augment_image(base_img)\n assert np.array_equal(observed, blur3x3)\n\n # k=5\n aug = iaa.MedianBlur(k=5)\n observed = aug.augment_image(base_img)\n assert np.array_equal(observed, blur5x5)\n\n # k as (3, 5)\n aug = iaa.MedianBlur(k=(3, 5))\n seen = [False, False]\n for i in sm.xrange(100):\n observed = aug.augment_image(base_img)\n if np.array_equal(observed, blur3x3):\n seen[0] = True\n elif np.array_equal(observed, blur5x5):\n seen[1] = True\n else:\n raise Exception(\"Unexpected result in MedianBlur@1\")\n if all(seen):\n break\n assert all(seen)\n\n # k as stochastic parameter\n aug = iaa.MedianBlur(k=iap.Choice([3, 5]))\n seen = [False, False]\n for i in sm.xrange(100):\n observed = aug.augment_image(base_img)\n if np.array_equal(observed, blur3x3):\n seen[0] += True\n elif np.array_equal(observed, blur5x5):\n seen[1] += True\n else:\n raise Exception(\"Unexpected result in MedianBlur@2\")\n if all(seen):\n break\n assert all(seen)\n\n # keypoints shouldnt be changed\n aug = iaa.MedianBlur(k=3)\n aug_det = aug.to_deterministic()\n observed = aug.augment_keypoints(keypoints)\n expected = keypoints\n assert keypoints_equal(observed, expected)\n\n observed = aug_det.augment_keypoints(keypoints)\n expected = keypoints\n assert keypoints_equal(observed, expected)\n\n\ndef test_MotionBlur():\n reseed()\n\n # simple scenario\n aug = iaa.MotionBlur(k=3, angle=0, direction=0.0)\n matrix_func = aug.matrix\n matrices = [matrix_func(np.zeros((128, 128, 3), dtype=np.uint8), 3, ia.new_random_state(i)) for i in range(10)]\n expected = np.float32([\n [0, 1.0/3, 0],\n [0, 1.0/3, 0],\n [0, 1.0/3, 0]\n ])\n for matrices_image in matrices:\n for matrix_channel in matrices_image:\n assert np.allclose(matrix_channel, expected)\n\n # 90deg angle\n aug = iaa.MotionBlur(k=3, angle=90, direction=0.0)\n matrix_func = aug.matrix\n matrices = [matrix_func(np.zeros((128, 128, 3), dtype=np.uint8), 3, ia.new_random_state(i)) for i in range(10)]\n expected = np.float32([\n [0, 0, 0],\n [1.0/3, 1.0/3, 1.0/3],\n [0, 0, 0]\n ])\n for matrices_image in matrices:\n for matrix_channel in matrices_image:\n assert np.allclose(matrix_channel, expected)\n\n # 45deg angle\n aug = iaa.MotionBlur(k=3, angle=45, direction=0.0, order=0)\n matrix_func = aug.matrix\n matrices = [matrix_func(np.zeros((128, 128, 3), dtype=np.uint8), 3, ia.new_random_state(i)) for i in range(10)]\n expected = np.float32([\n [0, 0, 1.0/3],\n [0, 1.0/3, 0],\n [1.0/3, 0, 0]\n ])\n for matrices_image in matrices:\n for matrix_channel in matrices_image:\n assert np.allclose(matrix_channel, expected)\n\n # random angle\n aug = iaa.MotionBlur(k=3, angle=[0, 90], direction=0.0)\n matrix_func = aug.matrix\n matrices = [matrix_func(np.zeros((128, 128, 3), dtype=np.uint8), 3, ia.new_random_state(i)) for i in range(50)]\n expected1 = np.float32([\n [0, 1.0/3, 0],\n [0, 1.0/3, 0],\n [0, 1.0/3, 0]\n ])\n expected2 = np.float32([\n [0, 0, 0],\n [1.0/3, 1.0/3, 1.0/3],\n [0, 0, 0],\n ])\n nb_seen = [0, 0]\n for matrices_image in matrices:\n assert np.allclose(matrices_image[0], matrices_image[1])\n assert np.allclose(matrices_image[1], matrices_image[2])\n for matrix_channel in matrices_image:\n if np.allclose(matrix_channel, expected1):\n nb_seen[0] += 1\n elif np.allclose(matrix_channel, expected2):\n nb_seen[1] += 1\n assert nb_seen[0] > 0\n assert nb_seen[1] > 0\n\n # 5x5\n aug = iaa.MotionBlur(k=5, angle=90, direction=0.0)\n matrix_func = aug.matrix\n matrices = [matrix_func(np.zeros((128, 128, 3), dtype=np.uint8), 3, ia.new_random_state(i)) for i in range(10)]\n expected = np.float32([\n [0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0],\n [1.0/5, 1.0/5, 1.0/5, 1.0/5, 1.0/5],\n [0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0],\n ])\n for matrices_image in matrices:\n for matrix_channel in matrices_image:\n assert np.allclose(matrix_channel, expected)\n\n # random k\n aug = iaa.MotionBlur(k=[3, 5], angle=90, direction=0.0)\n matrix_func = aug.matrix\n matrices = [matrix_func(np.zeros((128, 128, 3), dtype=np.uint8), 3, ia.new_random_state(i)) for i in range(50)]\n expected1 = np.float32([\n [0, 0, 0],\n [1.0/3, 1.0/3, 1.0/3],\n [0, 0, 0],\n ])\n expected2 = np.float32([\n [0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0],\n [1.0/5, 1.0/5, 1.0/5, 1.0/5, 1.0/5],\n [0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0],\n ])\n nb_seen = [0, 0]\n for matrices_image in matrices:\n assert np.allclose(matrices_image[0], matrices_image[1])\n assert np.allclose(matrices_image[1], matrices_image[2])\n for matrix_channel in matrices_image:\n if matrix_channel.shape == expected1.shape and np.allclose(matrix_channel, expected1):\n nb_seen[0] += 1\n elif matrix_channel.shape == expected2.shape and np.allclose(matrix_channel, expected2):\n nb_seen[1] += 1\n assert nb_seen[0] > 0\n assert nb_seen[1] > 0\n\n # k with choice [a, b, c, ...] must error in case of non-discrete values\n got_exception = False\n try:\n _ = iaa.MotionBlur(k=[3, 3.5, 4])\n except Exception as exc:\n assert \"to only contain integer\" in str(exc)\n got_exception = True\n assert got_exception\n\n # no error in case of (a, b), checks for #215\n aug = iaa.MotionBlur(k=(3, 7))\n for _ in range(10):\n _ = aug.augment_image(np.zeros((11, 11, 3), dtype=np.uint8))\n\n # direction 1.0\n aug = iaa.MotionBlur(k=3, angle=0, direction=1.0)\n matrix_func = aug.matrix\n matrices = [matrix_func(np.zeros((128, 128, 3), dtype=np.uint8), 3, ia.new_random_state(i)) for i in range(10)]\n expected = np.float32([\n [0, 1.0/1.5, 0],\n [0, 0.5/1.5, 0],\n [0, 0.0/1.5, 0]\n ])\n for matrices_image in matrices:\n for matrix_channel in matrices_image:\n assert np.allclose(matrix_channel, expected, rtol=0, atol=1e-2)\n\n # direction -1.0\n aug = iaa.MotionBlur(k=3, angle=0, direction=-1.0)\n matrix_func = aug.matrix\n matrices = [matrix_func(np.zeros((128, 128, 3), dtype=np.uint8), 3, ia.new_random_state(i)) for i in range(10)]\n expected = np.float32([\n [0, 0.0/1.5, 0],\n [0, 0.5/1.5, 0],\n [0, 1.0/1.5, 0]\n ])\n for matrices_image in matrices:\n for matrix_channel in matrices_image:\n assert np.allclose(matrix_channel, expected, rtol=0, atol=1e-2)\n\n # random direction\n aug = iaa.MotionBlur(k=3, angle=[0, 90], direction=[-1.0, 1.0])\n matrix_func = aug.matrix\n matrices = [matrix_func(np.zeros((128, 128, 3), dtype=np.uint8), 3, ia.new_random_state(i)) for i in range(50)]\n expected1 = np.float32([\n [0, 1.0/1.5, 0],\n [0, 0.5/1.5, 0],\n [0, 0.0/1.5, 0]\n ])\n expected2 = np.float32([\n [0, 0.0/1.5, 0],\n [0, 0.5/1.5, 0],\n [0, 1.0/1.5, 0]\n ])\n nb_seen = [0, 0]\n for matrices_image in matrices:\n assert np.allclose(matrices_image[0], matrices_image[1])\n assert np.allclose(matrices_image[1], matrices_image[2])\n for matrix_channel in matrices_image:\n if np.allclose(matrix_channel, expected1, rtol=0, atol=1e-2):\n nb_seen[0] += 1\n elif np.allclose(matrix_channel, expected2, rtol=0, atol=1e-2):\n nb_seen[1] += 1\n assert nb_seen[0] > 0\n assert nb_seen[1] > 0\n\n # test of actual augmenter\n img = np.zeros((7, 7, 3), dtype=np.uint8)\n img[3-1:3+2, 3-1:3+2, :] = 255\n aug = iaa.MotionBlur(k=3, angle=90, direction=0.0)\n img_aug = aug.augment_image(img)\n v1 = (255*(1/3))\n v2 = (255*(1/3)) * 2\n v3 = (255*(1/3)) * 3\n expected = np.float32([\n [0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0],\n [0, v1, v2, v3, v2, v1, 0],\n [0, v1, v2, v3, v2, v1, 0],\n [0, v1, v2, v3, v2, v1, 0],\n [0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0]\n ]).astype(np.uint8)\n expected = np.tile(expected[..., np.newaxis], (1, 1, 3))\n assert np.allclose(img_aug, expected)\n\n\nif __name__ == \"__main__\":\n main()\n"
] | [
[
"numpy.zeros_like",
"numpy.tile",
"numpy.allclose",
"numpy.zeros",
"numpy.round",
"numpy.dtype",
"numpy.float32",
"numpy.copy",
"numpy.all",
"numpy.max",
"numpy.array_equal",
"matplotlib.use",
"numpy.array",
"numpy.float64"
]
] |
mdodici/trojan-WD-pollution | [
"ec79a96f0d9517a53df4c82ca1be0d5d38f3346b"
] | [
"3-Trojan_Results/Scripts/1kB_Evals.py"
] | [
"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nimport numpy as np\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\ntarget = '1kB'\nradeg = np.pi/180\n\ndef cart_to_pol(x,y):\n r = np.sqrt(x**2 + y**2)\n phi = np.arctan2(y,x)\n return r, phi\n\ndef pol_to_cart(r,phi):\n x = r*np.cos(phi)\n y = r*np.sin(phi)\n return x, y\n\ndef L45(msun,mjup):\n u2 = mjup/(msun+mjup)\n \n x_L4 = 0.5 - u2\n x_L5 = x_L4\n \n y_L4 = np.sqrt(3)/2\n y_L5 = -y_L4\n \n return np.array([x_L4,x_L5]), np.array([y_L4,y_L5])\n\ndef L45_nonnorm(xjup,yjup,xsun,ysun):\n phi_jup = np.arctan2(yjup,xjup)\n \n phi_L4 = phi_jup + np.pi/3\n phi_L5 = phi_jup - np.pi/3\n \n xsep = (xsun - xjup)\n ysep = (ysun - yjup)\n \n r_jupsol = np.sqrt(xsep**2 + ysep**2)\n \n x_L4 = r_jupsol*np.cos(phi_L4)\n x_L5 = r_jupsol*np.cos(phi_L5)\n y_L4 = r_jupsol*np.sin(phi_L4)\n y_L5 = r_jupsol*np.sin(phi_L5)\n \n return np.array([x_L4,x_L5]), np.array([y_L4,y_L5])\n\ndef hill(a,e,m,M):\n return a*(1-e)*np.power(m/(3*M),1/3)\n\n\ndef r_pol(r,psi,M1,M2,a):\n q = M2/M1\n z = np.zeros((len(psi),len(r)))\n for i, phi in enumerate(psi):\n x_ = r*np.cos(phi)\n y_ = r*np.sin(phi)\n x = x_/a\n y = y_/a\n s1 = np.sqrt(x**2 + y**2)\n s2 = np.sqrt((x-1)**2 + y**2)\n \n term1 = 2/(s1*(1+q))\n term2 = 2*q/(s2*(1+q))\n term3 = (x - q/(1+q))**2\n term4 = y**2\n z[i] = term1 + term2 + term3 + term4\n return z\n\nast_d = np.load('{0}_Trojandata.npy'.format(target))\nnum_asts = len(ast_d[0,:,0])\nprint(ast_d.shape)\n\njup_d = np.load('{0}_Planetdata.npy'.format(target))\nsol_d = np.load('{0}_Stardata.npy'.format(target))\ntimes = np.load('{0}_Timesteps.npy'.format(target))\n\nast_a = ast_d[0]; ast_e = ast_d[1]; ast_i = ast_d[2] \nast_o = ast_d[3]; ast_p = ast_d[4]; ast_l = ast_d[5]\nast_x = ast_d[6]; ast_y = ast_d[7]; ast_z = ast_d[8]\nast_meda = np.median(ast_a,axis=0)\n\njup_a = jup_d[0]; jup_e = jup_d[1]; jup_i = jup_d[2]; jup_p = jup_d[3]\njup_l = jup_d[4]; jup_x = jup_d[5]; jup_y = jup_d[6]; jup_z = jup_d[7]\nsol_m = sol_d[0]; sol_l = sol_d[1]; sol_x = sol_d[2]; sol_y = sol_d[3]; sol_z = sol_d[4]\njhill = hill(jup_a,jup_e,9.546e-4,sol_m)\ndst_jall = np.sqrt((ast_x - jup_x)**2 + (ast_y - jup_y)**2)\n\nL45x, L45y = L45_nonnorm(jup_x,jup_y,sol_x,sol_y)\nL4_xs = L45x[0]; L4_ys = L45y[0]\nL5_xs = L45x[1]; L5_ys = L45y[1]\n\ni_dif = np.zeros_like(ast_i)\ni_int = ast_i[:,0]\nfor i in range(len(ast_a[0,:])):\n i_dif[:,i] = ast_i[:,i] - i_int\n \nphi_vals = np.linspace(-np.pi,np.pi,500)\nZ = r_pol(jup_a,phi_vals,sol_m,9.546e-4,jup_a)\nPot = np.flip(Z,1)\n\nast_r, ast_h = cart_to_pol(ast_x,ast_y)\njup_r, jup_h = cart_to_pol(jup_x,jup_y)\nphdif = np.zeros_like(ast_h)\nfor i in range(len(jup_h)):\n phdif[:,i] = ast_h[:,i] - jup_h[i]\n \nid4 = []\nid5 = []\nfor i in range(num_asts):\n for it in range(len(jup_h)):\n if phdif[i,it] < -np.pi:\n phdif[i,it] = phdif[i,it] + 2*np.pi\n if phdif[i,it] > np.pi:\n phdif[i,it] = phdif[i,it] - 2*np.pi\n if phdif[i,0] > 0:\n id4.append(i)\n if phdif[i,0] < 0:\n id5.append(i)\n \nprint('Percentage at L4: %2.1f' %(len(id4)*100/num_asts))\n\nliba = np.zeros((num_asts,200))\nlibp = np.zeros((num_asts,200))\nfor i in range(num_asts):\n for n in range(200):\n high = int(500*(n+1))\n loww = int(500*n)\n pmax = np.amax(phdif[i,loww:high])\n pmin = np.amin(phdif[i,loww:high])\n amax = np.amax(ast_a[i,loww:high])\n amin = np.amin(ast_a[i,loww:high])\n amid = np.median(jup_a[loww:high])\n \n if pmax > 0:\n mid = np.pi/3\n if pmax < 0:\n mid = -np.pi/3\n \n lip = ((pmax - mid) + (pmin - mid)) / 2\n lia = ((amax - amid)+(amin - amid)) / 2\n libp[i,n] = abs(lip)\n liba[i,n] = abs(lia)\n \nindices = []\nhillers = []\nfor i in range(num_asts):\n it = 0\n while it < len(ast_meda):\n a_focus = ast_a[i,it]\n a_media = ast_meda[it]\n if a_focus > a_media + 2:\n indices.append(i)\n break\n elif a_focus < a_media - 2:\n indices.append(i)\n break\n else:\n it += 1\n it = 0\n while it < len(jhill):\n d = dst_jall[i,it]\n h = jhill[it]\n if d <= h + 0.1:\n hillers.append(i)\n break\n else:\n it += 1\n\nidx = np.array(indices)\nhdx = np.array(hillers)\n\nhill_not_sma = np.array(list(set(hillers) - set(indices)))\nndx = np.array(list(set(range(num_asts)) - set(indices)))\n\nprint(\"Number of escapers: \", len(indices))\nprint(\"Number of hill crossers: \", len(hillers))\npct = len(indices)/num_asts\nprint('Pct escaped / Total Asts: %0.2f' %pct)\n\nnrm_a = ast_a[ndx]; nrm_e = ast_e[ndx]; nrm_i = ast_i[ndx]; ndifi = i_dif[ndx]; nrmla = liba[ndx]\nnrm_p = ast_p[ndx]; nrm_l = ast_l[ndx]; nrm_x = ast_x[ndx]; nrm_y = ast_y[ndx]; nrmlp = libp[ndx]\n\n\nodd_a = ast_a[idx]; odd_e = ast_e[idx]; odd_i = ast_i[idx]; odifi = i_dif[idx]; oddla = liba[idx]\nodd_p = ast_p[idx]; odd_l = ast_l[idx]; odd_x = ast_x[idx]; odd_y = ast_y[idx]; oddlp = libp[idx]\n\nnrm_r, nrmph = cart_to_pol(nrm_x,nrm_y); odd_r, oddph = cart_to_pol(odd_x,odd_y)\njup_r, jupph = cart_to_pol(jup_x,jup_y); sol_r, solph = cart_to_pol(sol_x,sol_y)\nL4_rs, L4phs = cart_to_pol(L4_xs,L4_ys); L5_rs, L5phs = cart_to_pol(L5_xs,L5_ys)\n\ndistj = np.sqrt((odd_x - jup_x)**2 + (odd_y - jup_y)**2)\ndisth = np.sqrt((ast_x[hdx] - jup_x)**2 + (ast_y[hdx] - jup_y)**2)\ndists = np.sqrt((odd_x - sol_x)**2 + (odd_y - sol_y)**2)\njdist = np.sqrt((jup_x - sol_x)**2 + (jup_y - sol_y)**2)\n\nearlies = []\nlaties = []\nhill_cross = np.zeros(len(hdx))\n\nfor i in range(len(odd_a)):\n it = 0\n while it < 100000:\n a_focus = odd_a[i,it]\n a_media = ast_meda[it]\n if a_focus > a_media + 2:\n if it < 33333:\n earlies.append(i)\n break\n elif it > 70000:\n laties.append(i)\n break\n else:\n break\n elif a_focus < a_media - 2:\n if it < 33333:\n earlies.append(i)\n break\n elif it > 70000:\n laties.append(i)\n break\n else:\n break\n else:\n it += 1\n \nfor i in range(len(hdx)):\n it = 0\n while it < 100000:\n d = disth[i,it]\n h = jhill[it]\n if d <= h:\n hill_cross[i] = it\n break\n else:\n it += 1\n \nhorses = []\nfor number,n in enumerate(idx):\n i = 0\n while i < 5000:\n val = phdif[n,i]\n if 170*radeg <= val:\n horses.append(n)\n break\n elif val <= -170*radeg:\n horses.append(n)\n break\n elif -5*radeg <= val <= 5*radeg:\n horses.append(n)\n break\n i += 1\n \nhrs = np.array(horses)\ntrs = np.array( list( set(idx) - set(horses) ) )\n \nedx = np.array(earlies)\nldx = np.array(laties)\n\nprint(\"Number of early escapees: \", len(earlies), \" (escaped before .67 Myr)\")\nprint(\"Number of late escapees: \", len(laties), \" (escaped after %1.2f Myr)\" %(times[70000]/1e6))\npct_e = len(earlies)/len(indices)\npct_l = len(laties)/len(indices)\nprint('Number early / Total escapees: %0.2f' %pct_e)\nprint('Number late / Total escapees: %0.2f' %pct_l)\npcT_e = len(earlies)/num_asts\npcT_l = len(laties)/num_asts\nprint('Number early / Total Asts.: %0.2f' %pcT_e)\nprint('Number late / Total Asts.: %0.2f' %pcT_l)\n\n\nx_axis = np.linspace(0,times[33333]/1e6)\nx_axi2 = np.linspace(times[70000]/1e6,times[-1]/1e6)\n\nfig, ax = plt.subplots(3,figsize=(14,13),sharex=True,gridspec_kw={'height_ratios': [3, 1, .75]})\nplt.subplots_adjust(hspace=0)\n\nax[0].plot(times/1e6,ast_meda,'k',lw=3)\nax[0].vlines([times[33333]/1e6,times[70000]/1e6],5,9.5,'b',alpha=0.8,zorder=0)\n\nax[0].fill_between(x_axis,5*np.ones_like(x_axis),9.5*np.ones_like(x_axis),facecolor='b',alpha=0.2,zorder=0)\nax[0].fill_between(x_axi2,5*np.ones_like(x_axis),9.5*np.ones_like(x_axis),facecolor='b',alpha=0.2,zorder=0)\nax[0].plot(times/1e6,jup_a,'gold',lw=3)\nax[0].legend(['Median Ast.','Planet'],fontsize=16,frameon=False,loc='upper left')\nax[0].set_ylabel('Semimajor Axis / AU',fontsize=16)\nax[0].set_ylim(5,9.5)\nax[0].set_xlim(0,2)\nax[0].text(0.18,7.25,\"%1.i escaped\" %len(earlies),fontsize=25)\nax[0].text(0.8,7.25,\"%2.i escaped\" %(len(indices) - len(earlies) - len(laties)),fontsize=25)\nax[0].text(1.48,7.25,\"%2.i escaped\" %len(laties),fontsize=25)\n\nax[1].plot(times/1e6,sol_l,'orange',lw=3,zorder=10)\nax[1].plot(times/1e6,sol_m,'g',ls=':',lw=3,zorder=10)\nax[1].vlines([times[33333]/1e6,times[70000]/1e6],0,4,'b',alpha=0.8,zorder=0)\nax[1].legend([\"log Stellar Luminosity\", \"Stellar Mass\"],fontsize=16,loc='center left',frameon=False)\nax[1].set_ylabel(\"Solar Units\",fontsize=16)\nax[1].set_ylim(0,4)\nax[1].fill_between(x_axis,0*np.ones_like(x_axis),4*np.ones_like(x_axis),facecolor='b',alpha=0.2,zorder=0)\nax[1].fill_between(x_axi2,0*np.ones_like(x_axis),4*np.ones_like(x_axis),facecolor='b',alpha=0.2,zorder=0)\nax[1].set_xlabel('Time / Myr',fontsize=16)\nax[1].set_yticks([0,1,2,3])\n\nax[2].hist(hill_cross*20/1e6,edgecolor='k',facecolor='k',alpha=0.5,range=[0,2],bins=20)\nax[2].set_ylabel(\"Escapes\",fontsize=16)\nax[2].set_xlabel(\"Time / Myr\",fontsize=16)\nax[2].set_ylim(0,35)\nax[2].set_yticks([0,10,20,30])\nfig.savefig('{0}_Timeseries.pdf'.format(target),dpi=300)\n\n############\n\nhist, axh = plt.subplots(1,4,figsize=(20,5))\n\naxh[0].hist(nrm_a[:,0],edgecolor='k',histtype='step',range=[4.95,5.45])\naxh[0].hist(odd_a[:,0],facecolor='r',alpha=0.7,range=[4.95,5.45])\naxh[0].set_xlabel(\"SMA (AU)\",fontsize=16)\naxh[0].set_xlim(4.95,5.45)\n\naxh[1].hist(nrm_e[:,0],edgecolor='k',histtype='step',range=[0,.25])\naxh[1].hist(odd_e[:,0],facecolor='r',alpha=0.7,range=[0,.25])\naxh[1].set_xlabel(\"Eccentricity\",fontsize=16)\naxh[1].set_xlim(0,0.25)\n\naxh[2].hist(abs(nrmla[:,0]),edgecolor='k',histtype='step',range=[0,0.02],bins=20)\naxh[2].hist(abs(liba[trs,0]),facecolor='r',alpha=0.7,range=[0,0.02],bins=20)\naxh[2].set_xlabel(\"SMA Libration Amp. (AU)\",fontsize=16)\naxh[2].set_xlim(0,.02)\naxh[2].set_xticks([0,0.005,0.01,0.015,0.02])\n\nradeg = np.pi/180\naxh[3].hist(abs(nrmlp[:,0])/radeg,edgecolor='k',histtype='step',range=[0,35])\naxh[3].hist(abs(libp[trs,0])/radeg,facecolor='r',alpha=0.7,range=[0,35])\naxh[3].set_xlabel(r\"$\\lambda$ Libration Amplitude (Deg.)\",fontsize=16)\naxh[3].set_xlim(0,35)\naxh[3].legend(labels=['Stable','Escaped'],fontsize=14,frameon=False,loc='upper right')\n\nhist.suptitle('Initial conditions',fontsize=18)\nhist.savefig('{0}_Histograms.pdf'.format(target),dpi=300)\n\n#############\n\norf, ora = plt.subplots(1,2,figsize=(15,5),gridspec_kw={'width_ratios': [2, 1]})\nfor i in range(len(ndx)):\n ora[0].plot(phdif[ndx[i],:500],ast_a[ndx[i],:500]/5.2,'k',alpha=0.01,zorder=5)\nfor i,tr in enumerate(trs):\n ora[0].plot(phdif[tr,:500],ast_a[tr,:500]/5.2,'r',alpha=0.05,zorder=10)\nora[0].set_xlim(-np.pi,np.pi)\nora[0].set_ylim(.9,1.1)\nora[0].set_xlabel(r\"$\\phi - \\phi_{jup}$\",fontsize=16)\nora[0].set_ylabel(r\"SMA / $a_{jup}$\",fontsize=16)\nora[0].vlines([-np.pi/3,np.pi/3],0.9,1.1,ls='--',zorder=0)\nora[0].set_xticks([-np.pi,-np.pi/2,-np.pi/3,0,np.pi/3,np.pi/2,np.pi])\nora[0].set_xticklabels([r\"-$\\pi$\",r\"-$\\pi$/2\",r\"$L_5$\",'0',r\"$L_4$\",r\"$\\pi$/2\",r\"$\\pi$\"])\n\nsns.kdeplot(abs(nrmlp[:,0])/radeg,nrmla[:,0],shade=True,shade_lowest=None,cmap='Greys',levels=5,alpha=0.5)\nsns.kdeplot(abs(libp[trs,0])/radeg,liba[trs,0],shade=True,shade_lowest=None,cmap='Reds',levels=5,alpha=0.5)\nora[1].set_ylabel(\"Init. SMA Libration (AU)\",fontsize=16)\nora[1].set_xlabel(r\"Init. $\\lambda$ Libration (Deg.)\",fontsize=16)\nora[1].set_xlim(0,35)\norf.tight_layout()\norf.savefig('{0}_Orbits.pdf'.format(target),dpi=300)\n\n#############\n\nnorm = mpl.colors.Normalize(vmin = np.min(.005), vmax = np.max(.015), clip = False)\n\ntim, tax = plt.subplots(figsize=(7,6))\nscatter = tax.scatter(abs(libp[hdx,0])/radeg,hill_cross*20/1e6,c=abs(liba[hdx,0]),cmap='Reds',norm=norm)\ntax.set_xlim(0,35)\ntax.set_xlabel(r\"Initial $\\lambda$ Libration (Deg.)\",fontsize=16)\ntax.set_ylabel('Time of Encounter (Myr)',fontsize=16)\ntim.colorbar(scatter, label='Initial SMA Libration (AU)')\ntax.set_ylim(0,2)\ntim.savefig('{0}_Eject_Perts.pdf'.format(target),dpi=300)\n\n######################\n\nhill_data = np.array((hdx,hill_cross))\nnp.save('{0}_Ejects.npy'.format(target), idx)\nnp.save('{0}_Hillcr.npy'.format(target), hill_data)"
] | [
[
"numpy.zeros_like",
"numpy.arctan2",
"numpy.array",
"numpy.zeros",
"numpy.ones_like",
"numpy.median",
"matplotlib.pyplot.subplots",
"numpy.cos",
"numpy.amax",
"matplotlib.pyplot.subplots_adjust",
"numpy.power",
"numpy.amin",
"numpy.min",
"numpy.flip",
"numpy.max",
"numpy.sqrt",
"numpy.sin",
"numpy.linspace"
]
] |
PowerOlive/mindspore | [
"665ec683d4af85c71b2a1f0d6829356f2bc0e1ff",
"665ec683d4af85c71b2a1f0d6829356f2bc0e1ff",
"665ec683d4af85c71b2a1f0d6829356f2bc0e1ff",
"665ec683d4af85c71b2a1f0d6829356f2bc0e1ff",
"665ec683d4af85c71b2a1f0d6829356f2bc0e1ff"
] | [
"tests/st/pynative/data_parallel/test_pynative_hccl_allreduce.py",
"tests/st/auto_monad/test_auto_monad_layer.py",
"tests/st/ops/cpu/test_broadcast_to_op.py",
"tests/st/scipy_st/test_utils.py",
"tests/st/auto_monad/test_effect_random.py"
] | [
"# Copyright 2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\n\"\"\"test hccl allreduce performance with 8p\"\"\"\n\nimport os\nfrom multiprocessing import Process, Queue\nimport pytest\nimport numpy as np\nimport mindspore.nn as nn\nfrom mindspore import Tensor\nfrom mindspore import dtype as mstype\nfrom mindspore.ops import operations as P\nimport mindspore.communication.management as D\nfrom mindspore import context\nfrom mindspore.context import ParallelMode\n\nMINDSPORE_HCCL_CONFIG_PATH = \"/home/workspace/mindspore_config/hccl/rank_table_8p.json\"\n\nnp.random.seed(1)\nos.environ['GLOG_v'] = str(2)\n\nclass AllReduceNet(nn.Cell):\n def __init__(self):\n super(AllReduceNet, self).__init__()\n self.mul = P.Mul()\n self.all_reduce = P.AllReduce()\n self.add = P.Add()\n\n def construct(self, x):\n x = self.mul(x, 2)\n y1 = Tensor(np.array([[2, 2, 2, 2], [2, 2, 2, 2], [2, 2, 2, 2]])).astype(np.float32)\n z = self.add(x, y1)\n z = self.all_reduce(z)\n y2 = Tensor(np.array([[-16, -16, -16, -16], [-16, -16, -16, -16], [-16, -16, -16, -16]])).astype(np.float32)\n out = self.add(z, y2)\n out = self.all_reduce(out)\n out = self.mul(out, 2)\n return out\n\ndef train_allreduce_8p(q, device_id, device_num):\n os.system(\"mkdir \" + str(device_id))\n os.chdir(str(device_id))\n context.set_context(mode=context.PYNATIVE_MODE, device_target=\"Ascend\", device_id=device_id)\n os.environ['MINDSPORE_HCCL_CONFIG_PATH'] = MINDSPORE_HCCL_CONFIG_PATH\n os.environ['RANK_ID'] = str(device_id)\n os.environ['RANK_SIZE'] = str(device_num)\n D.init()\n context.reset_auto_parallel_context()\n context.set_auto_parallel_context(parallel_mode=ParallelMode.DATA_PARALLEL, gradients_mean=False,\n device_num=device_num)\n\n net = AllReduceNet()\n input_x = np.ones([3, 4]).astype(np.float32)\n output = net(Tensor(input_x, mstype.float32))\n q.put(output)\n\n@pytest.mark.level0\n@pytest.mark.platform_arm_ascend_training\n@pytest.mark.platform_x86_ascend_training\n@pytest.mark.env_single\ndef test_pynative_hccl_allreduce_8p():\n device_num = 8\n process = []\n q = Queue()\n for i in range(device_num):\n device_id = i\n process.append(Process(target=train_allreduce_8p, args=(q, device_id, device_num)))\n\n for i in range(device_num):\n process[i].start()\n\n print(\"Waiting for all subprocesses done...\")\n\n for i in range(device_num):\n process[i].join()\n\n # check result\n for i in range(device_num):\n expect_output = [[256, 256, 256, 256], [256, 256, 256, 256], [256, 256, 256, 256]]\n assert not q.empty()\n output = Tensor(q.get())\n assert np.allclose(output.asnumpy(), expect_output)\n\n for i in range(device_num):\n os.system(\"rm -rf \" + str(i))\n\n print(\"End training...\")\n",
"# Copyright 2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\nfrom tqdm import tqdm\nimport numpy as np\nimport mindspore as ms\nimport mindspore.nn as nn\nfrom mindspore.dataset import NumpySlicesDataset\nfrom mindspore import context, Tensor\n\ncontext.set_context(mode=context.GRAPH_MODE, device_target=\"Ascend\")\n\nclass AutoEncoderTrainNetwork(nn.Cell):\n def __init__(self):\n super(AutoEncoderTrainNetwork, self).__init__()\n self.loss_fun = nn.MSELoss()\n self.net = nn.CellList([nn.Dense(2, 32), nn.Dense(32, 2)])\n self.relu = nn.ReLU()\n\n def reconstruct_sample(self, x: Tensor):\n for _, layer in enumerate(self.net):\n x = layer(x)\n x = self.relu(x)\n return x\n\n def construct(self, x: Tensor):\n recon_x = self.reconstruct_sample(x)\n return self.loss_fun(recon_x, x)\n\n def sample_2d_data(self, n_normals=2000, n_outliers=400):\n z = np.random.randn(n_normals, 2)\n outliers = np.random.uniform(low=-6, high=6, size=(n_outliers, 2))\n centers = np.array([(2., 0), (-2., 0)])\n sigma = 0.3\n normal_points = sigma * z + centers[np.random.randint(len(centers), size=(n_normals,))]\n return np.vstack((normal_points, outliers))\n\n def create_synthetic_dataset(self):\n transformed_dataset = self.sample_2d_data()\n for dim in range(transformed_dataset.shape[1]):\n min_val = transformed_dataset[:, dim].min()\n max_val = transformed_dataset[:, dim].max()\n if min_val != max_val:\n transformed_dataset[:, dim] = (transformed_dataset[:, dim] - min_val) / (max_val - min_val)\n elif min_val != 1:\n transformed_dataset[:, dim] = transformed_dataset[:, dim] / min_val\n transformed_dataset = transformed_dataset.astype(np.float32)\n return transformed_dataset\n\n\ndef test_auto_monad_layer():\n ae_with_loss = AutoEncoderTrainNetwork()\n transformed_dataset = ae_with_loss.create_synthetic_dataset()\n dataloader = NumpySlicesDataset(data=(transformed_dataset,), shuffle=True)\n dataloader = dataloader.batch(batch_size=16)\n optim = nn.RMSProp(params=ae_with_loss.trainable_params(), learning_rate=0.002,)\n train_net = nn.TrainOneStepCell(ae_with_loss, optim)\n train_net.set_train()\n gen_samples = dict()\n num_epoch = 21\n for epoch in tqdm(range(num_epoch)):\n loss = []\n for _, (batch,) in enumerate(dataloader):\n batch = Tensor(batch, dtype=ms.float32)\n loss_ = train_net(batch)\n loss.append(loss_.asnumpy())\n avg_loss = np.array(loss).mean()\n if epoch % 10 == 0:\n gen_samples[epoch] = ae_with_loss.reconstruct_sample(Tensor(transformed_dataset)).asnumpy()\n print(f\"epoch: {epoch}/{num_epoch}, avg loss: {avg_loss}\")\n",
"# Copyright 2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\nimport numpy as np\nimport pytest\n\nimport mindspore.context as context\nfrom mindspore.common.tensor import Tensor\nfrom mindspore.ops import operations as P\n\n\n@pytest.mark.level0\n@pytest.mark.platform_x86_cpu\n@pytest.mark.env_onecard\ndef test_broadcast():\n context.set_context(mode=context.GRAPH_MODE, device_target='CPU')\n\n shape = (4, 5, 2, 3, 4, 5, 6)\n x_np = np.random.rand(2, 3, 1, 5, 1).astype(np.float32)\n output = P.BroadcastTo(shape)(Tensor(x_np))\n expect = np.broadcast_to(x_np, shape)\n assert np.allclose(output.asnumpy(), expect)\n\n shape = (3, 5, 7, 4, 5, 6)\n x_np = np.arange(20).reshape((4, 5, 1)).astype(np.int32)\n output = P.BroadcastTo(shape)(Tensor(x_np))\n expect = np.broadcast_to(x_np, shape)\n assert np.allclose(output.asnumpy(), expect)\n\n shape = (8, 5, 7, 4, 5, 6)\n x_np = np.arange(24).reshape((1, 4, 1, 6)).astype(np.bool)\n output = P.BroadcastTo(shape)(Tensor(x_np))\n expect = np.broadcast_to(x_np, shape)\n assert np.allclose(output.asnumpy(), expect)\n\n shape = (3, 4, 5, 2, 3, 4, 5, 7)\n x_np = np.random.rand(2, 3, 1, 5, 1).astype(np.float16)\n output = P.BroadcastTo(shape)(Tensor(x_np))\n expect = np.broadcast_to(x_np, shape)\n assert np.allclose(output.asnumpy(), expect)\n\n shape = (3, 4, 5, 6)\n x_np = np.random.rand(3, 1, 5, 1).astype(np.float32)\n output = P.BroadcastTo(shape)(Tensor(x_np))\n expect = np.broadcast_to(x_np, shape)\n assert np.allclose(output.asnumpy(), expect)\n\n x1_np = np.random.rand(3, 1, 5, 1).astype(np.float16)\n output = P.BroadcastTo(shape)(Tensor(x1_np))\n expect = np.broadcast_to(x1_np, shape)\n assert np.allclose(output.asnumpy(), expect)\n\n shape = (2, 3, 4, 5)\n x1_np = np.random.rand(4, 5).astype(np.float32)\n output = P.BroadcastTo(shape)(Tensor(x1_np))\n expect = np.broadcast_to(x1_np, shape)\n assert np.allclose(output.asnumpy(), expect)\n\n shape = (4, 5)\n x1_np = np.ones((1,)).astype(np.bool_)\n output = P.BroadcastTo(shape)(Tensor(x1_np))\n expect = np.broadcast_to(x1_np, shape)\n assert np.allclose(output.asnumpy(), expect)\n\n\n@pytest.mark.level0\n@pytest.mark.platform_x86_cpu\n@pytest.mark.env_onecard\ndef test_broadcast_dyn_init():\n \"\"\"\n Test running the op with -1's in the init shape to support varied inputs.\n \"\"\"\n context.set_context(mode=context.GRAPH_MODE, device_target='CPU')\n\n ms_shape = (-1, 4, 5, 6)\n np_shape = (3, 4, 5, 6)\n x_np = np.random.rand(3, 1, 5, 1).astype(np.float32)\n output = P.BroadcastTo(ms_shape)(Tensor(x_np))\n expect = np.broadcast_to(x_np, np_shape)\n assert np.allclose(output.asnumpy(), expect)\n\n x1_np = np.random.rand(3, 1, 5, 1).astype(np.float16)\n output = P.BroadcastTo(ms_shape)(Tensor(x1_np))\n expect = np.broadcast_to(x1_np, np_shape)\n assert np.allclose(output.asnumpy(), expect)\n\n ms_shape = (2, 3, -1, 5)\n np_shape = (2, 3, 4, 5)\n x1_np = np.random.rand(4, 5).astype(np.float32)\n output = P.BroadcastTo(ms_shape)(Tensor(x1_np))\n expect = np.broadcast_to(x1_np, np_shape)\n assert np.allclose(output.asnumpy(), expect)\n\n\n@pytest.mark.level0\n@pytest.mark.platform_x86_cpu\n@pytest.mark.env_onecard\ndef test_broadcast_dyn_invalid_init():\n \"\"\"\n Test running the op with -1's in the init shape in incorrect positions.\n Expected to fail.\n \"\"\"\n context.set_context(mode=context.GRAPH_MODE, device_target='CPU')\n ms_shape = (2, -1, 4, 5)\n x_np = np.random.rand(4, 5).astype(np.float32)\n with pytest.raises(ValueError):\n P.BroadcastTo(ms_shape)(Tensor(x_np))\n",
"# Copyright 2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"st for scipy.utils\"\"\"\n\nimport pytest\nimport numpy as onp\nfrom mindspore import context, Tensor\nfrom mindspore.scipy.utils import _safe_normalize\n\n\n@pytest.mark.level0\n@pytest.mark.platform_x86_gpu_training\n@pytest.mark.platform_x86_cpu\n@pytest.mark.env_onecard\n@pytest.mark.parametrize('mode', [context.GRAPH_MODE, context.PYNATIVE_MODE])\n@pytest.mark.parametrize('shape', [(10,), (10, 1)])\n@pytest.mark.parametrize('dtype', [onp.float32, onp.float64])\ndef test_safe_normalize(mode, shape, dtype):\n \"\"\"\n Feature: ALL TO ALL\n Description: test cases for _safe_normalize\n Expectation: the result match scipy\n \"\"\"\n context.set_context(mode=mode)\n x = onp.random.random(shape).astype(dtype)\n normalized_x, x_norm = _safe_normalize(Tensor(x))\n\n normalized_x = normalized_x.asnumpy()\n x_norm = x_norm.asnumpy()\n assert onp.allclose(onp.sum(normalized_x ** 2), 1)\n assert onp.allclose(x / x_norm, normalized_x)\n",
"# Copyright 2020 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\nimport pytest\nimport numpy as np\nimport mindspore.nn as nn\nimport mindspore.ops.operations as P\nimport mindspore.nn.probability.distribution as msd\nfrom mindspore import context, Tensor\nfrom mindspore.ops import composite as C\nfrom mindspore.common import dtype as mstype\nfrom mindspore import dtype\n\ncontext.set_context(mode=context.GRAPH_MODE, device_target=\"Ascend\")\n\n\nclass Sampling(nn.Cell):\n \"\"\"\n Test class: sample of Normal distribution.\n \"\"\"\n\n def __init__(self, shape, seed=0):\n super(Sampling, self).__init__()\n self.n1 = msd.Normal(0, 1, seed=seed, dtype=dtype.float32)\n self.shape = shape\n\n def construct(self, mean=None, sd=None):\n s1 = self.n1.sample(self.shape, mean, sd)\n s2 = self.n1.sample(self.shape, mean, sd)\n s3 = self.n1.sample(self.shape, mean, sd)\n return s1, s2, s3\n\n\n@pytest.mark.level1\n@pytest.mark.platform_arm_ascend_training\n@pytest.mark.platform_x86_ascend_training\n@pytest.mark.env_onecard\ndef test_sample_graph():\n shape = (2, 3)\n seed = 0\n samp = Sampling(shape, seed=seed)\n sample1, sample2, sample3 = samp()\n assert ((sample1 != sample2).any() and (sample1 != sample3).any() and (sample2 != sample3).any()), \\\n \"The results should be different!\"\n\n\nclass CompositeNormalNet(nn.Cell):\n def __init__(self, shape=None, seed=0):\n super(CompositeNormalNet, self).__init__()\n self.shape = shape\n self.seed = seed\n\n def construct(self, mean, stddev):\n s1 = C.normal(self.shape, mean, stddev, self.seed)\n s2 = C.normal(self.shape, mean, stddev, self.seed)\n s3 = C.normal(self.shape, mean, stddev, self.seed)\n return s1, s2, s3\n\n\n@pytest.mark.level1\n@pytest.mark.platform_arm_ascend_training\n@pytest.mark.platform_x86_ascend_training\n@pytest.mark.env_onecard\ndef test_composite_normal():\n shape = (3, 2, 4)\n mean = Tensor(0.0, mstype.float32)\n stddev = Tensor(1.0, mstype.float32)\n net = CompositeNormalNet(shape)\n s1, s2, s3 = net(mean, stddev)\n assert ((s1 != s2).any() and (s1 != s3).any() and (s2 != s3).any()), \\\n \"The results should be different!\"\n\n\nclass CompositeLaplaceNet(nn.Cell):\n def __init__(self, shape=None, seed=0):\n super(CompositeLaplaceNet, self).__init__()\n self.shape = shape\n self.seed = seed\n\n def construct(self, mean, lambda_param):\n s1 = C.laplace(self.shape, mean, lambda_param, self.seed)\n s2 = C.laplace(self.shape, mean, lambda_param, self.seed)\n s3 = C.laplace(self.shape, mean, lambda_param, self.seed)\n return s1, s2, s3\n\n\n@pytest.mark.level1\n@pytest.mark.platform_arm_ascend_training\n@pytest.mark.platform_x86_ascend_training\n@pytest.mark.env_onecard\ndef test_composite_laplace():\n shape = (3, 2, 4)\n mean = Tensor(1.0, mstype.float32)\n lambda_param = Tensor(1.0, mstype.float32)\n net = CompositeLaplaceNet(shape)\n s1, s2, s3 = net(mean, lambda_param)\n assert ((s1 != s2).any() and (s1 != s3).any() and (s2 != s3).any()), \\\n \"The results should be different!\"\n\n\nclass CompositeGammaNet(nn.Cell):\n def __init__(self, shape=None, seed=0):\n super(CompositeGammaNet, self).__init__()\n self.shape = shape\n self.seed = seed\n\n def construct(self, alpha, beta):\n s1 = C.gamma(self.shape, alpha, beta, self.seed)\n s2 = C.gamma(self.shape, alpha, beta, self.seed)\n s3 = C.gamma(self.shape, alpha, beta, self.seed)\n return s1, s2, s3\n\n\n@pytest.mark.level1\n@pytest.mark.platform_arm_ascend_training\n@pytest.mark.platform_x86_ascend_training\n@pytest.mark.env_onecard\ndef test_composite_gamma():\n shape = (3, 2, 4)\n alpha = Tensor(1.0, mstype.float32)\n beta = Tensor(1.0, mstype.float32)\n net = CompositeGammaNet(shape)\n s1, s2, s3 = net(alpha, beta)\n assert ((s1 != s2).any() and (s1 != s3).any() and (s2 != s3).any()), \\\n \"The results should be different!\"\n\n\nclass CompositePoissonNet(nn.Cell):\n def __init__(self, shape=None, seed=0):\n super(CompositePoissonNet, self).__init__()\n self.shape = shape\n self.seed = seed\n\n def construct(self, mean):\n s1 = C.poisson(self.shape, mean, self.seed)\n s2 = C.poisson(self.shape, mean, self.seed)\n s3 = C.poisson(self.shape, mean, self.seed)\n return s1, s2, s3\n\n\n@pytest.mark.level1\n@pytest.mark.platform_arm_ascend_training\n@pytest.mark.platform_x86_ascend_training\n@pytest.mark.env_onecard\ndef test_composite_poisson():\n shape = (3, 2, 4)\n mean = Tensor(2.0, mstype.float32)\n net = CompositePoissonNet(shape)\n s1, s2, s3 = net(mean)\n assert ((s1 != s2).any() and (s1 != s3).any() and (s2 != s3).any()), \\\n \"The results should be different!\"\n\n\nclass CompositeUniformNet(nn.Cell):\n def __init__(self, shape=None, seed=0):\n super(CompositeUniformNet, self).__init__()\n self.shape = shape\n self.seed = seed\n\n def construct(self, a, b):\n s1 = C.uniform(self.shape, a, b, self.seed)\n s2 = C.uniform(self.shape, a, b, self.seed)\n s3 = C.uniform(self.shape, a, b, self.seed)\n return s1, s2, s3\n\n\n@pytest.mark.level1\n@pytest.mark.platform_arm_ascend_training\n@pytest.mark.platform_x86_ascend_training\n@pytest.mark.env_onecard\ndef test_composite_uniform():\n shape = (3, 2, 4)\n a = Tensor(0.0, mstype.float32)\n b = Tensor(1.0, mstype.float32)\n net = CompositeUniformNet(shape)\n s1, s2, s3 = net(a, b)\n assert ((s1 != s2).any() and (s1 != s3).any() and (s2 != s3).any()), \\\n \"The results should be different!\"\n\n\nclass StandardNormalNet(nn.Cell):\n def __init__(self, shape, seed=0, seed2=0):\n super(StandardNormalNet, self).__init__()\n self.shape = shape\n self.seed = seed\n self.seed2 = seed2\n self.standard_normal = P.StandardNormal(seed, seed2)\n\n def construct(self):\n s1 = self.standard_normal(self.shape)\n s2 = self.standard_normal(self.shape)\n s3 = self.standard_normal(self.shape)\n return s1, s2, s3\n\n\n@pytest.mark.level1\n@pytest.mark.platform_arm_ascend_training\n@pytest.mark.platform_x86_ascend_training\n@pytest.mark.env_onecard\ndef test_standard_normal():\n shape = (4, 16)\n net = StandardNormalNet(shape)\n s1, s2, s3 = net()\n assert ((s1 != s2).any() and (s1 != s3).any() and (s2 != s3).any()), \\\n \"The results should be different!\"\n\n\nclass StandardLaplaceNet(nn.Cell):\n def __init__(self, shape, seed=0, seed2=0):\n super(StandardLaplaceNet, self).__init__()\n self.shape = shape\n self.seed = seed\n self.seed2 = seed2\n self.standard_laplace = P.StandardLaplace(seed, seed2)\n\n def construct(self):\n s1 = self.standard_laplace(self.shape)\n s2 = self.standard_laplace(self.shape)\n s3 = self.standard_laplace(self.shape)\n return s1, s2, s3\n\n\n@pytest.mark.level1\n@pytest.mark.platform_arm_ascend_training\n@pytest.mark.platform_x86_ascend_training\n@pytest.mark.env_onecard\ndef test_standard_laplace():\n shape = (4, 16)\n net = StandardLaplaceNet(shape)\n s1, s2, s3 = net()\n assert ((s1 != s2).any() and (s1 != s3).any() and (s2 != s3).any()), \\\n \"The results should be different!\"\n\n\nclass GammaNet(nn.Cell):\n def __init__(self, shape, alpha, beta, seed=0, seed2=0):\n super(GammaNet, self).__init__()\n self.shape = shape\n self.alpha = alpha\n self.beta = beta\n self.seed = seed\n self.seed2 = seed2\n self.gamma = P.Gamma(seed, seed2)\n\n def construct(self):\n s1 = self.gamma(self.shape, self.alpha, self.beta)\n s2 = self.gamma(self.shape, self.alpha, self.beta)\n s3 = self.gamma(self.shape, self.alpha, self.beta)\n return s1, s2, s3\n\n\n@pytest.mark.level1\n@pytest.mark.platform_arm_ascend_training\n@pytest.mark.platform_x86_ascend_training\n@pytest.mark.env_onecard\ndef test_gamma():\n shape = (4, 16)\n alpha = Tensor(1.0, mstype.float32)\n beta = Tensor(1.0, mstype.float32)\n net = GammaNet(shape, alpha, beta)\n s1, s2, s3 = net()\n assert ((s1 != s2).any() and (s1 != s3).any() and (s2 != s3).any()), \\\n \"The results should be different!\"\n\n\nclass PoissonNet(nn.Cell):\n def __init__(self, shape, seed=0, seed2=0):\n super(PoissonNet, self).__init__()\n self.shape = shape\n self.seed = seed\n self.seed2 = seed2\n self.poisson = P.Poisson(seed, seed2)\n\n def construct(self, mean):\n s1 = self.poisson(self.shape, mean)\n s2 = self.poisson(self.shape, mean)\n s3 = self.poisson(self.shape, mean)\n return s1, s2, s3\n\n\n@pytest.mark.level1\n@pytest.mark.platform_arm_ascend_training\n@pytest.mark.platform_x86_ascend_training\n@pytest.mark.env_onecard\ndef test_poisson():\n shape = (4, 16)\n mean = Tensor(5.0, mstype.float32)\n net = PoissonNet(shape=shape)\n s1, s2, s3 = net(mean)\n assert ((s1 != s2).any() and (s1 != s3).any() and (s2 != s3).any()), \\\n \"The results should be different!\"\n\n\nclass UniformIntNet(nn.Cell):\n def __init__(self, shape, seed=0, seed2=0):\n super(UniformIntNet, self).__init__()\n self.shape = shape\n self.seed = seed\n self.seed2 = seed2\n self.uniform_int = P.UniformInt(seed, seed2)\n\n def construct(self, minval, maxval):\n s1 = self.uniform_int(self.shape, minval, maxval)\n s2 = self.uniform_int(self.shape, minval, maxval)\n s3 = self.uniform_int(self.shape, minval, maxval)\n return s1, s2, s3\n\n\n@pytest.mark.level1\n@pytest.mark.platform_arm_ascend_training\n@pytest.mark.platform_x86_ascend_training\n@pytest.mark.env_onecard\ndef test_uniform_int():\n shape = (4, 16)\n minval = Tensor(1, mstype.int32)\n maxval = Tensor(5, mstype.int32)\n net = UniformIntNet(shape)\n s1, s2, s3 = net(minval, maxval)\n assert ((s1 != s2).any() and (s1 != s3).any() and (s2 != s3).any()), \\\n \"The results should be different!\"\n\n\nclass UniformRealNet(nn.Cell):\n def __init__(self, shape, seed=0, seed2=0):\n super(UniformRealNet, self).__init__()\n self.shape = shape\n self.seed = seed\n self.seed2 = seed2\n self.uniform_real = P.UniformReal(seed, seed2)\n\n def construct(self):\n s1 = self.uniform_real(self.shape)\n s2 = self.uniform_real(self.shape)\n s3 = self.uniform_real(self.shape)\n return s1, s2, s3\n\n\n@pytest.mark.level1\n@pytest.mark.platform_arm_ascend_training\n@pytest.mark.platform_x86_ascend_training\n@pytest.mark.env_onecard\ndef test_uniform_real():\n shape = (4, 16)\n net = UniformRealNet(shape)\n s1, s2, s3 = net()\n assert ((s1 != s2).any() and (s1 != s3).any() and (s2 != s3).any()), \\\n \"The results should be different!\"\n\n\nclass DropoutGenMaskNet(nn.Cell):\n def __init__(self, shape):\n super(DropoutGenMaskNet, self).__init__()\n self.shape = shape\n self.dropout_gen_mask = P.DropoutGenMask(Seed0=0, Seed1=0)\n\n def construct(self, keep_prob):\n s1 = self.dropout_gen_mask(self.shape, keep_prob)\n s2 = self.dropout_gen_mask(self.shape, keep_prob)\n s3 = self.dropout_gen_mask(self.shape, keep_prob)\n return s1, s2, s3\n\n\n@pytest.mark.level0\n@pytest.mark.platform_arm_ascend_training\n@pytest.mark.platform_x86_ascend_training\n@pytest.mark.env_onecard\ndef test_dropout_gen_mask():\n shape = (2, 4, 5)\n keep_prob = Tensor(0.5, mstype.float32)\n net = DropoutGenMaskNet(shape)\n s1, s2, s3 = net(keep_prob)\n assert ((s1 != s2).any() and (s1 != s3).any() and (s2 != s3).any()), \\\n \"The results should be different!\"\n\n\nclass RandomChoiceWithMaskNet(nn.Cell):\n def __init__(self):\n super(RandomChoiceWithMaskNet, self).__init__()\n self.rnd_choice_mask = P.RandomChoiceWithMask(count=4, seed=0)\n\n def construct(self, x):\n index1, _ = self.rnd_choice_mask(x)\n index2, _ = self.rnd_choice_mask(x)\n index3, _ = self.rnd_choice_mask(x)\n return index1, index2, index3\n\n\n@pytest.mark.level0\n@pytest.mark.platform_arm_ascend_training\n@pytest.mark.platform_x86_ascend_training\n@pytest.mark.env_onecard\ndef test_random_choice_with_mask():\n mode = context.get_context('mode')\n assert (mode == context.GRAPH_MODE), 'GRAPH_MODE required but got ' + str(mode)\n net = RandomChoiceWithMaskNet()\n x = Tensor(np.array([[1, 0, 1, 0], [0, 0, 0, 1], [1, 1, 1, 1], [0, 0, 0, 1]]).astype(np.bool))\n index1, index2, index3 = net(x)\n assert ((index1 != index2).any() and (index1 != index3).any() and (index2 != index3).any()), \\\n \"The results should be different!\"\n\n\nclass RandomCategoricalNet(nn.Cell):\n def __init__(self, num_sample):\n super(RandomCategoricalNet, self).__init__()\n self.random_categorical = P.RandomCategorical(mstype.int64)\n self.num_sample = num_sample\n\n def construct(self, logits, seed=0):\n s1 = self.random_categorical(logits, self.num_sample, seed)\n s2 = self.random_categorical(logits, self.num_sample, seed)\n s3 = self.random_categorical(logits, self.num_sample, seed)\n return s1, s2, s3\n\n\n@pytest.mark.level1\n@pytest.mark.platform_arm_ascend_training\n@pytest.mark.platform_x86_ascend_training\n@pytest.mark.env_onecard\ndef test_random_categorical():\n num_sample = 8\n net = RandomCategoricalNet(num_sample)\n x = Tensor(np.random.random((10, 5)).astype(np.float32))\n # Outputs may be the same, only basic functions are verified here.\n net(x)\n"
] | [
[
"numpy.array",
"numpy.ones",
"numpy.random.seed"
],
[
"numpy.random.uniform",
"numpy.random.randn",
"numpy.array",
"numpy.vstack"
],
[
"numpy.broadcast_to",
"numpy.ones",
"numpy.arange",
"numpy.random.rand"
],
[
"numpy.random.random",
"numpy.allclose",
"numpy.sum"
],
[
"numpy.array",
"numpy.random.random"
]
] |
navin3011/Seminar-Energy-economy | [
"ddff1bf28f445d5a447fab119d7a6192f231d9c3"
] | [
"simbench/converter/voltLvl.py"
] | [
"# -*- coding: utf-8 -*-\n\n# Copyright (c) 2019 by University of Kassel, Tu Dortmund, RWTH Aachen University and Fraunhofer\n# Institute for Energy Economics and Energy System Technology (IEE) Kassel and individual\n# contributors (see AUTHORS file for details). All rights reserved.\n\nimport numpy as np\nfrom pandas import Series\nfrom pandapower import element_bus_tuples\n\n__author__ = \"smeinecke\"\n\n\ndef convert_voltlvl_to_int(voltage_level):\n \"\"\" Returns voltage level names as int. \"\"\"\n if voltage_level in [\"EHV\", \"ehv\", \"UHV\", \"uhv\"]:\n return 1\n elif voltage_level in [\"EHV-HV\", \"ehv-hv\", \"UHV-HV\", \"uhv-hv\", \"EHVHV\", \"ehvhv\", \"UHVHV\",\n \"uhvhv\"]:\n return 2\n elif voltage_level in [\"HV\", \"hv\"]:\n return 3\n elif voltage_level in [\"HV-MV\", \"hv-mv\", \"HVMV\", \"hvmv\"]:\n return 4\n elif voltage_level in [\"MV\", \"mv\"]:\n return 5\n elif voltage_level in [\"MV-LV\", \"mv-lv\", \"MVLV\", \"mvlv\"]:\n return 6\n elif voltage_level in [\"LV\", \"lv\"]:\n return 7\n else:\n return int(voltage_level)\n\n\ndef convert_voltlvl_to_str(voltage_level):\n \"\"\" Returns voltage level names as string. \"\"\"\n return [\"EHV\", \"EHV-HV\", \"HV\", \"HV-MV\", \"MV\", \"MV-LV\", \"LV\"][convert_voltlvl_to_int(\n voltage_level)-1]\n\n\ndef convert_voltlvl_names(voltage_levels, desired_format):\n \"\"\" Returns voltage level names in desired format.\n EXAMPLE:\n voltlvl_names = convert_voltlvl_names([1, 2, \"hv\", 4, 5, \"ehv\", 7], str)\n \"\"\"\n if desired_format == str:\n if isinstance(voltage_levels, str) | (not hasattr(voltage_levels, \"__iter__\")):\n return convert_voltlvl_to_str(voltage_levels)\n else:\n names = []\n for voltage_level in voltage_levels:\n for voltage_level in voltage_levels:\n names += [convert_voltlvl_to_str(voltage_level)]\n return names\n elif desired_format == int:\n if isinstance(voltage_levels, str) | (not hasattr(voltage_levels, \"__iter__\")):\n return convert_voltlvl_to_int(voltage_levels)\n else:\n names = []\n for voltage_level in voltage_levels:\n for voltage_level in voltage_levels:\n names += [convert_voltlvl_to_int(voltage_level)]\n return names\n else:\n raise ValueError(\"desired_format must be str or int\")\n\n\ndef _voltlvl_idx(net, element, voltage_level, branch_bus=None, vn_kv_limits=[145, 60, 1]):\n \"\"\" similar to voltlvl_idx, but for only one voltage_level \"\"\"\n vn_kv_limits = [np.inf] + vn_kv_limits + [-np.inf]\n voltage_level = convert_voltlvl_names(voltage_level, int)\n lim_max = [0, 0, 1, 1, 2, 2, 3][voltage_level-1]\n lim_min = [1, 2, 2, 3, 3, 4, 4][voltage_level-1]\n Idx_bus = net.bus.index[(net.bus.vn_kv <= vn_kv_limits[lim_max]) &\n (net.bus.vn_kv > vn_kv_limits[lim_min])]\n if element == \"bus\":\n return list(Idx_bus)\n\n if branch_bus is None and element not in [\"trafo\", \"trafo3w\"]:\n # for all other elements than trafos, take the first possibility\n for elm, bus_name in element_bus_tuples():\n if elm == element:\n branch_bus = bus_name\n break\n\n if element == \"measurement\":\n measurement_buses = Series(index=net.measurement.index)\n # bus\n bool_ = net.measurement.element_type == \"bus\"\n measurement_buses.loc[bool_] = net.measurement.element.loc[bool_]\n # line and trafo\n for branch, side in zip([\"line\", \"line\", \"trafo\", \"trafo\"], [\"from\", \"to\", \"hv\", \"lv\"]):\n bus = side + \"_bus\"\n bool1 = net.measurement.element_type == branch\n bool2 = net.measurement.side == side\n measurement_buses.loc[bool1 & bool2] = net[branch][bus].loc[net.measurement.element.loc[\n bool1 & bool2]].values\n measurement_buses = measurement_buses.astype(int)\n isin_Idx_bus = measurement_buses.isin(Idx_bus)\n\n elif branch_bus in net[element].columns: # all other elements than measurement and bus\n isin_Idx_bus = net[element][branch_bus].isin(Idx_bus)\n\n else:\n raise KeyError(\"For net[%s] there is no column '%s'. Please\" % (element, str(branch_bus)) +\n \" give 'branch_bus' an valid bus column name, e.g. 'hv_bus' or 'lv_bus'.\")\n\n return list(net[element].index[isin_Idx_bus])\n\n\ndef voltlvl_idx(net, element, voltage_levels, branch_bus=None, vn_kv_limits=[145, 60, 1]):\n \"\"\"\n Returns indices of elements with special voltage level.\n Even voltage_level numbers behave equally to both neighboring numbers, i.e. 4 == [3, 5] and\n \"EHV-HV\" == [\"EHV\", \"HV\"].\n\n EXAMPLE:\n hv_and_mv_buses = voltlvl_idx(net, \"bus\", 4) # 4 == [3, 5]\n hv_and_mv_buses = voltlvl_idx(net, \"bus\", [3, 5])\n mv_loads = voltlvl_idx(net, \"load\", \"MV\")\n hvmv_trafos = voltlvl_idx(net, \"trafo\", \"HV\", branch_bus=\"hv_bus\")\n hvmv_trafos = voltlvl_idx(net, \"trafo\", \"MV\", branch_bus=\"lv_bus\")\n ehvhv_and_hvmv_trafos = voltlvl_idx(net, \"trafo\", 2, branch_bus=\"hv_bus\")\n ehvhv_and_hvmv_trafos = voltlvl_idx(net, \"trafo\", [1, 3], branch_bus=\"hv_bus\")\n ehvhv_and_hvmv_trafos = voltlvl_idx(net, \"trafo\", 4, branch_bus=\"lv_bus\")\n ehvhv_and_hvmv_trafos = voltlvl_idx(net, \"trafo\", [3, 5], branch_bus=\"lv_bus\")\n ehvhv_trafos = voltlvl_idx(net, \"trafo\", 2, branch_bus=\"lv_bus\")\n ehv_measurements = voltlvl_idx(net, \"measurement\", \"EHV\")\n \"\"\"\n if not net[element].shape[0]:\n return []\n\n if isinstance(voltage_levels, str) | (not hasattr(voltage_levels, \"__iter__\")):\n return _voltlvl_idx(net, element, voltage_levels, branch_bus=branch_bus,\n vn_kv_limits=vn_kv_limits)\n else:\n Idx = []\n for voltage_level in voltage_levels:\n Idx += _voltlvl_idx(net, element, voltage_level, branch_bus=branch_bus,\n vn_kv_limits=vn_kv_limits)\n return Idx\n\n\ndef get_voltlvl(voltage_values, vn_kv_limits=[145, 60, 1]):\n \"\"\" Returns an array of voltage levels as integer. \"\"\"\n iter_ = hasattr(voltage_values, \"__iter__\")\n voltage_values = voltage_values if iter_ else [voltage_values]\n voltage_values = np.array(voltage_values)\n voltage_levels = np.ones(voltage_values.shape)\n for lim in vn_kv_limits:\n voltage_levels[voltage_values <= lim] += 2\n if iter_:\n return voltage_levels.astype(int)\n else:\n return int(voltage_levels[0])\n"
] | [
[
"numpy.array",
"numpy.ones",
"pandas.Series"
]
] |
freol35241/pysim | [
"36faf67d00ff644a593f20994c0f15053d600886"
] | [
"pysim/systems/python_systems.py"
] | [
"\"\"\"Example systems created in Python\n\"\"\"\nimport numpy as np\n\nfrom pysim.cythonsystem import Sys\n\nclass VanDerPol(Sys):\n \"\"\"Simple example of a class representing a VanDerPol oscillator.\n \"\"\"\n def __init__(self):\n self.add_state_scalar(\"x\", \"dx\")\n self.add_state_scalar(\"y\", \"dy\")\n self.add_input_scalar(\"a\")\n self.add_input_scalar(\"b\")\n self.inputs.a = 1.0\n self.inputs.b = 1.0\n self.states.x = 1.0\n self.states.y = 0.0\n\n def do_step(self,dummy):\n \"\"\"Perform a timestep by implmenting the VanDerPol equations\"\"\"\n \n a = self.inputs.a\n b = self.inputs.b\n x = self.states.x\n y = self.states.y\n\n self.ders.dx = a*x*(b-y*y)-y\n self.ders.dy = x\n\n\nclass MassSpringDamper(Sys):\n \"\"\"Simple class for testing the mass-spring-damper simulations with \n a cython system\"\"\"\n\n def __init__(self):\n \"\"\"Setup two states (one dimensional vectors for now). Initial \n conditions are simular to those in the build in c++ system\"\"\"\n self.add_state_scalar(\"x1\", \"dx1\")\n self.add_state_scalar(\"x2\", \"dx2\")\n self.states.x1 = 1\n self.states.x2 = 0\n\n def do_step(self,dummy):\n \"\"\"Perform a step using default constants, same as those in the \n cpp system\"\"\"\n\n m = 100.0\n b = 1.0\n k = 50.0\n f = 0.0\n x1 = self.states.x1\n x2 = self.states.x2\n self.ders.dx1 = x2\n self.ders.dx2 =-k/m*x1-b/m*x2+1/m*f\n\nclass InOutTestSystem(Sys):\n \"\"\"Python representation of the cpp InOutTestSystem\n\n Used for testing that the cpp system behaves as the python system\n with regards to the input output handling\n \"\"\"\n def __init__(self):\n self.add_input_scalar(\"input_scalar\")\n self.add_input_vector(\"input_vector\",3)\n self.add_input_matrix(\"input_matrix\",3,3)\n\n self.add_state_scalar(\"state_scalar\",\"der_scalar\")\n self.add_state_vector(\"state_vector\",\"der_vector\", 3)\n self.add_state_matrix(\"state_matrix\",\"der_matrix\", 3, 3)\n\n self.add_output_scalar(\"input_output_scalar\")\n self.add_output_vector(\"input_output_vector\",3)\n self.add_output_matrix(\"input_output_matrix\",3,3)\n self.add_output_scalar(\"state_output_scalar\")\n self.add_output_vector(\"state_output_vector\",3)\n self.add_output_matrix(\"state_output_matrix\",3,3)\n\n self.inputs.input_scalar = 0.0\n self.inputs.input_vector = [0.0, 0.0, 0.0]\n self.inputs.input_matrix = np.zeros((3,3))\n\n self.outputs.input_output_scalar = 0.0\n self.outputs.input_output_vector = [0.0, 0.0, 0.0]\n self.outputs.input_output_matrix = np.zeros((3,3))\n self.outputs.state_output_scalar = 0.0\n self.outputs.state_output_vector = [0.0, 0.0, 0.0]\n self.outputs.state_output_matrix = np.zeros((3,3))\n\n self.states.state_scalar = 1.23\n self.states.state_vector = np.ones(3)*4.56\n self.states.state_matrix = np.ones((3,3))*7.89\n self.ders.der_scalar = 0\n self.ders.der_vector = np.zeros(3)\n self.ders.der_matrix = np.zeros((3,3))\n\n def do_step(self,dummy):\n \"\"\"During a timestep we set the outputs to their respective inputs\"\"\"\n self.outputs.input_output_scalar = self.inputs.input_scalar\n self.outputs.input_output_vector = self.inputs.input_vector\n self.outputs.input_output_matrix = self.inputs.input_matrix\n self.outputs.state_output_scalar = self.states.state_scalar\n self.outputs.state_output_vector = self.states.state_vector\n self.outputs.state_output_matrix = self.states.state_matrix\n"
] | [
[
"numpy.ones",
"numpy.zeros"
]
] |
GrapeBaBa/ibis | [
"507bb14efdcfd719a0487ee23fe1c85c177517f6"
] | [
"ibis/tests/benchmarks/test_benchmarks.py"
] | [
"import numpy as np\nimport pandas as pd\nimport pytest\n\nimport ibis\nimport ibis.expr.datatypes as dt\nfrom ibis.backends.pandas.udf import udf\n\n\ndef make_t():\n return ibis.table(\n [\n ('_timestamp', 'int32'),\n ('dim1', 'int32'),\n ('dim2', 'int32'),\n ('valid_seconds', 'int32'),\n ('meas1', 'int32'),\n ('meas2', 'int32'),\n ('year', 'int32'),\n ('month', 'int32'),\n ('day', 'int32'),\n ('hour', 'int32'),\n ('minute', 'int32'),\n ],\n name=\"t\",\n )\n\n\n@pytest.fixture\ndef t():\n return make_t()\n\n\ndef make_base(t):\n return (\n (t.year > 2016)\n | ((t.year == 2016) & (t.month > 6))\n | ((t.year == 2016) & (t.month == 6) & (t.day > 6))\n | ((t.year == 2016) & (t.month == 6) & (t.day == 6) & (t.hour > 6))\n | (\n (t.year == 2016)\n & (t.month == 6)\n & (t.day == 6)\n & (t.hour == 6)\n & (t.minute >= 5)\n )\n ) & (\n (t.year < 2016)\n | ((t.year == 2016) & (t.month < 6))\n | ((t.year == 2016) & (t.month == 6) & (t.day < 6))\n | ((t.year == 2016) & (t.month == 6) & (t.day == 6) & (t.hour < 6))\n | (\n (t.year == 2016)\n & (t.month == 6)\n & (t.day == 6)\n & (t.hour == 6)\n & (t.minute <= 5)\n )\n )\n\n\n@pytest.fixture\ndef base(t):\n return make_base(t)\n\n\ndef make_large_expr(t, base):\n src_table = t[base]\n src_table = src_table.mutate(\n _timestamp=(src_table['_timestamp'] - src_table['_timestamp'] % 3600)\n .cast('int32')\n .name('_timestamp'),\n valid_seconds=300,\n )\n\n aggs = []\n for meas in ['meas1', 'meas2']:\n aggs.append(src_table[meas].sum().cast('float').name(meas))\n src_table = src_table.aggregate(\n aggs, by=['_timestamp', 'dim1', 'dim2', 'valid_seconds']\n )\n\n part_keys = ['year', 'month', 'day', 'hour', 'minute']\n ts_col = src_table['_timestamp'].cast('timestamp')\n new_cols = {}\n for part_key in part_keys:\n part_col = getattr(ts_col, part_key)()\n new_cols[part_key] = part_col\n src_table = src_table.mutate(**new_cols)\n return src_table[\n [\n '_timestamp',\n 'dim1',\n 'dim2',\n 'meas1',\n 'meas2',\n 'year',\n 'month',\n 'day',\n 'hour',\n 'minute',\n ]\n ]\n\n\n@pytest.fixture\ndef large_expr(t, base):\n return make_large_expr(t, base)\n\n\n@pytest.mark.benchmark(group=\"construction\")\n@pytest.mark.parametrize(\n \"construction_fn\",\n [\n pytest.param(lambda *_: make_t(), id=\"small\"),\n pytest.param(lambda t, *_: make_base(t), id=\"medium\"),\n pytest.param(lambda t, base: make_large_expr(t, base), id=\"large\"),\n ],\n)\ndef test_construction(benchmark, construction_fn, t, base):\n benchmark(construction_fn, t, base)\n\n\n@pytest.mark.benchmark(group=\"builtins\")\n@pytest.mark.parametrize(\n \"expr_fn\",\n [\n pytest.param(lambda t, _base, _large_expr: t, id=\"small\"),\n pytest.param(lambda _t, base, _large_expr: base, id=\"medium\"),\n pytest.param(lambda _t, _base, large_expr: large_expr, id=\"large\"),\n ],\n)\n@pytest.mark.parametrize(\"builtin\", [hash, str])\ndef test_builtins(benchmark, expr_fn, builtin, t, base, large_expr):\n expr = expr_fn(t, base, large_expr)\n benchmark(builtin, expr)\n\n\n@pytest.mark.benchmark(group=\"compilation\")\n@pytest.mark.parametrize(\"module\", [\"impala\", \"sqlite\"])\n@pytest.mark.parametrize(\n \"expr_fn\",\n [\n pytest.param(lambda t, _base, _large_expr: t, id=\"small\"),\n pytest.param(lambda _t, base, _large_expr: base, id=\"medium\"),\n pytest.param(lambda _t, _base, large_expr: large_expr, id=\"large\"),\n ],\n)\ndef test_compile(benchmark, module, expr_fn, t, base, large_expr):\n try:\n mod = getattr(ibis, module)\n except AttributeError as e:\n pytest.skip(str(e))\n else:\n expr = expr_fn(t, base, large_expr)\n benchmark(mod.compile, expr)\n\n\n@pytest.fixture\ndef pt():\n n = 60_000\n data = pd.DataFrame(\n {\n 'key': np.random.choice(16000, size=n),\n 'low_card_key': np.random.choice(30, size=n),\n 'value': np.random.rand(n),\n 'timestamps': pd.date_range(\n start='now', periods=n, freq='s'\n ).values,\n 'timestamp_strings': pd.date_range(\n start='now', periods=n, freq='s'\n ).values.astype(str),\n 'repeated_timestamps': pd.date_range(\n start='2018-09-01', periods=30\n ).repeat(int(n / 30)),\n }\n )\n\n return ibis.pandas.connect(dict(df=data)).table('df')\n\n\ndef high_card_group_by(t):\n return t.groupby(t.key).aggregate(avg_value=t.value.mean())\n\n\ndef cast_to_dates(t):\n return t.timestamps.cast(dt.date)\n\n\ndef cast_to_dates_from_strings(t):\n return t.timestamp_strings.cast(dt.date)\n\n\ndef multikey_group_by_with_mutate(t):\n return (\n t.mutate(dates=t.timestamps.cast('date'))\n .groupby(['low_card_key', 'dates'])\n .aggregate(avg_value=lambda t: t.value.mean())\n )\n\n\ndef simple_sort(t):\n return t.sort_by([t.key])\n\n\ndef simple_sort_projection(t):\n return t[['key', 'value']].sort_by(['key'])\n\n\ndef multikey_sort(t):\n return t.sort_by(['low_card_key', 'key'])\n\n\ndef multikey_sort_projection(t):\n return t[['low_card_key', 'key', 'value']].sort_by(['low_card_key', 'key'])\n\n\ndef low_card_rolling_window(t):\n return ibis.trailing_range_window(\n ibis.interval(days=2),\n order_by=t.repeated_timestamps,\n group_by=t.low_card_key,\n )\n\n\ndef low_card_grouped_rolling(t):\n return t.value.mean().over(low_card_rolling_window(t))\n\n\ndef high_card_rolling_window(t):\n return ibis.trailing_range_window(\n ibis.interval(days=2),\n order_by=t.repeated_timestamps,\n group_by=t.key,\n )\n\n\ndef high_card_grouped_rolling(t):\n return t.value.mean().over(high_card_rolling_window(t))\n\n\n@udf.reduction(['double'], 'double')\ndef my_mean(series):\n return series.mean()\n\n\ndef low_card_grouped_rolling_udf_mean(t):\n return my_mean(t.value).over(low_card_rolling_window(t))\n\n\ndef high_card_grouped_rolling_udf_mean(t):\n return my_mean(t.value).over(high_card_rolling_window(t))\n\n\n@udf.analytic(['double'], 'double')\ndef my_zscore(series):\n return (series - series.mean()) / series.std()\n\n\ndef low_card_window(t):\n return ibis.window(group_by=t.low_card_key)\n\n\ndef high_card_window(t):\n return ibis.window(group_by=t.key)\n\n\ndef low_card_window_analytics_udf(t):\n return my_zscore(t.value).over(low_card_window(t))\n\n\ndef high_card_window_analytics_udf(t):\n return my_zscore(t.value).over(high_card_window(t))\n\n\n@udf.reduction(['double', 'double'], 'double')\ndef my_wm(v, w):\n return np.average(v, weights=w)\n\n\ndef low_card_grouped_rolling_udf_wm(t):\n return my_wm(t.value, t.value).over(low_card_rolling_window(t))\n\n\ndef high_card_grouped_rolling_udf_wm(t):\n return my_wm(t.value, t.value).over(low_card_rolling_window(t))\n\n\n@pytest.mark.benchmark(group=\"execution\")\n@pytest.mark.parametrize(\n \"expression_fn\",\n [\n pytest.param(high_card_group_by, id=\"high_card_group_by\"),\n pytest.param(cast_to_dates, id=\"cast_to_dates\"),\n pytest.param(\n cast_to_dates_from_strings, id=\"cast_to_dates_from_strings\"\n ),\n pytest.param(\n multikey_group_by_with_mutate, id=\"multikey_group_by_with_mutate\"\n ),\n pytest.param(simple_sort, id=\"simple_sort\"),\n pytest.param(simple_sort_projection, id=\"simple_sort_projection\"),\n pytest.param(multikey_sort, id=\"multikey_sort\"),\n pytest.param(multikey_sort_projection, id=\"multikey_sort_projection\"),\n pytest.param(low_card_grouped_rolling, id=\"low_card_grouped_rolling\"),\n pytest.param(\n high_card_grouped_rolling, id=\"high_card_grouped_rolling\"\n ),\n pytest.param(\n low_card_grouped_rolling_udf_mean,\n id=\"low_card_grouped_rolling_udf_mean\",\n ),\n pytest.param(\n high_card_grouped_rolling_udf_mean,\n id=\"high_card_grouped_rolling_udf_mean\",\n ),\n pytest.param(\n low_card_window_analytics_udf, id=\"low_card_window_analytics_udf\"\n ),\n pytest.param(\n high_card_window_analytics_udf, id=\"high_card_window_analytics_udf\"\n ),\n pytest.param(\n low_card_grouped_rolling_udf_wm,\n id=\"low_card_grouped_rolling_udf_wm\",\n ),\n pytest.param(\n high_card_grouped_rolling_udf_wm,\n id=\"high_card_grouped_rolling_udf_wm\",\n ),\n ],\n)\ndef test_execute(benchmark, expression_fn, pt):\n expr = expression_fn(pt)\n benchmark(expr.execute)\n"
] | [
[
"pandas.date_range",
"numpy.random.rand",
"numpy.average",
"numpy.random.choice"
]
] |
aman-gupta-1995/Machine-Learning-Mindware | [
"8b3050720711730520683c89949e3dbdfb168961",
"8b3050720711730520683c89949e3dbdfb168961"
] | [
"examples/cls_exp_user_defined_model.py",
"test/exps/basics/evaluate_text2vector.py"
] | [
"import argparse\nimport os\nimport sys\nimport time\nimport numpy as np\n\nfrom ConfigSpace.configuration_space import ConfigurationSpace\nfrom ConfigSpace.hyperparameters import UniformFloatHyperparameter, \\\n UniformIntegerHyperparameter, CategoricalHyperparameter, \\\n UnParametrizedHyperparameter, Constant\nfrom sklearn.datasets import load_iris\nfrom sklearn.metrics import balanced_accuracy_score\nfrom sklearn.model_selection import train_test_split\n\nsys.path.append(os.getcwd())\nfrom mindware.utils.data_manager import DataManager\nfrom mindware.estimators import Classifier\nfrom mindware.components.models.base_model import BaseClassificationModel\nfrom mindware.components.models.classification import add_classifier\nfrom mindware.components.utils.configspace_utils import check_none\nfrom mindware.components.utils.constants import DENSE, SPARSE, UNSIGNED_DATA, PREDICTIONS\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--time_limit', type=int, default=1200)\nargs = parser.parse_args()\n\ntime_limit = args.time_limit\n\n\nclass UserDefinedDecisionTree(BaseClassificationModel):\n def __init__(self, criterion, max_features, max_depth_factor,\n min_samples_split, min_samples_leaf, min_weight_fraction_leaf,\n max_leaf_nodes, min_impurity_decrease, class_weight=None,\n random_state=None):\n self.criterion = criterion\n self.max_features = max_features\n self.max_depth_factor = max_depth_factor\n self.min_samples_split = min_samples_split\n self.min_samples_leaf = min_samples_leaf\n self.max_leaf_nodes = max_leaf_nodes\n self.min_weight_fraction_leaf = min_weight_fraction_leaf\n self.min_impurity_decrease = min_impurity_decrease\n self.random_state = random_state\n self.class_weight = class_weight\n self.estimator = None\n self.time_limit = None\n\n def fit(self, X, y, sample_weight=None):\n from sklearn.tree import DecisionTreeClassifier\n\n self.max_features = float(self.max_features)\n # Heuristic to set the tree depth\n if check_none(self.max_depth_factor):\n max_depth_factor = self.max_depth_factor = None\n else:\n num_features = X.shape[1]\n self.max_depth_factor = int(self.max_depth_factor)\n max_depth_factor = max(\n 1,\n int(np.round(self.max_depth_factor * num_features, 0)))\n self.min_samples_split = int(self.min_samples_split)\n self.min_samples_leaf = int(self.min_samples_leaf)\n if check_none(self.max_leaf_nodes):\n self.max_leaf_nodes = None\n else:\n self.max_leaf_nodes = int(self.max_leaf_nodes)\n self.min_weight_fraction_leaf = float(self.min_weight_fraction_leaf)\n self.min_impurity_decrease = float(self.min_impurity_decrease)\n\n self.estimator = DecisionTreeClassifier(\n criterion=self.criterion,\n max_depth=max_depth_factor,\n min_samples_split=self.min_samples_split,\n min_samples_leaf=self.min_samples_leaf,\n max_leaf_nodes=self.max_leaf_nodes,\n min_weight_fraction_leaf=self.min_weight_fraction_leaf,\n min_impurity_decrease=self.min_impurity_decrease,\n class_weight=self.class_weight,\n random_state=self.random_state)\n self.estimator.fit(X, y, sample_weight=sample_weight)\n return self\n\n def predict(self, X):\n if self.estimator is None:\n raise NotImplementedError\n return self.estimator.predict(X)\n\n def predict_proba(self, X):\n if self.estimator is None:\n raise NotImplementedError()\n probas = self.estimator.predict_proba(X)\n return probas\n\n @staticmethod\n def get_properties(dataset_properties=None):\n return {'shortname': 'DT',\n 'name': 'Decision Tree Classifier',\n 'handles_regression': False,\n 'handles_classification': True,\n 'handles_multiclass': True,\n 'handles_multilabel': True,\n 'is_deterministic': True,\n 'input': (DENSE, SPARSE, UNSIGNED_DATA),\n 'output': (PREDICTIONS,)}\n\n @staticmethod\n def get_hyperparameter_search_space(dataset_properties=None, optimizer='smac'):\n if optimizer == 'smac':\n cs = ConfigurationSpace()\n criterion = CategoricalHyperparameter(\n \"criterion\", [\"gini\", \"entropy\"], default_value=\"gini\")\n max_depth_factor = UniformFloatHyperparameter(\n 'max_depth_factor', 0., 2., default_value=0.5)\n min_samples_split = UniformIntegerHyperparameter(\n \"min_samples_split\", 2, 20, default_value=2)\n min_samples_leaf = UniformIntegerHyperparameter(\n \"min_samples_leaf\", 1, 20, default_value=1)\n min_weight_fraction_leaf = Constant(\"min_weight_fraction_leaf\", 0.0)\n max_features = UnParametrizedHyperparameter('max_features', 1.0)\n max_leaf_nodes = UnParametrizedHyperparameter(\"max_leaf_nodes\", \"None\")\n min_impurity_decrease = UnParametrizedHyperparameter('min_impurity_decrease', 0.0)\n\n cs.add_hyperparameters([criterion, max_features, max_depth_factor,\n min_samples_split, min_samples_leaf,\n min_weight_fraction_leaf, max_leaf_nodes,\n min_impurity_decrease])\n return cs\n\n\nprint('==> Start to evaluate with Budget %d' % time_limit)\n\niris = load_iris()\nX, y = iris.data, iris.target\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=1)\ndm = DataManager(X_train, y_train)\ntrain_data = dm.get_data_node(X_train, y_train)\ntest_data = dm.get_data_node(X_test, y_test)\n\nsave_dir = './data/eval_exps/soln-ml'\nif not os.path.exists(save_dir):\n os.makedirs(save_dir)\n\nadd_classifier(UserDefinedDecisionTree)\nclf = Classifier(time_limit=time_limit,\n output_dir=save_dir,\n include_algorithms=['UserDefinedDecisionTree'],\n random_state=1,\n metric='acc',\n n_jobs=1)\n_start_time = time.time()\n_iter_id = 0\n\nclf.fit(train_data)\npred = clf.predict(test_data)\n\nprint(balanced_accuracy_score(test_data.data[1], pred))\n",
"import numpy as np\nimport os\nimport sys\n\nsys.path.append(os.getcwd())\n\nfrom mindware.components.feature_engineering.transformations.preprocessor.text2vector import \\\n Text2VectorTransformation\nfrom mindware.components.feature_engineering.transformation_graph import DataNode\nfrom mindware.components.utils.constants import *\nfrom mindware.estimators import Classifier\n\nx = np.array([[1, 'I am good', 'I am right', 3], [2, 'He is good', 'He is ok', 4],\n [2.5, 'Everyone is good', 'Everyone is ok', 7], [1.3333, 'well', 'what', 5]])\ny = np.array([0, 1, 0, 1])\n\nt2v = Text2VectorTransformation()\ndata = (x, y)\nfeature_type = [NUMERICAL, TEXT, TEXT, DISCRETE]\ndatanode = DataNode(data, feature_type)\n\nclf = Classifier(time_limit=20,\n enable_meta_algorithm_selection=False,\n include_algorithms=['random_forest'])\n\nclf.fit(datanode, opt_strategy='combined')\nprint(clf.predict(datanode))\n"
] | [
[
"sklearn.tree.DecisionTreeClassifier",
"sklearn.metrics.balanced_accuracy_score",
"numpy.round",
"sklearn.model_selection.train_test_split",
"sklearn.datasets.load_iris"
],
[
"numpy.array"
]
] |
maybeLee/keras | [
"793620ae1bdda7e37edd485b034e8962fff57f3e"
] | [
"keras/preprocessing/image.py"
] | [
"# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n# pylint: disable=invalid-name\n# pylint: disable=g-import-not-at-top\n# pylint: disable=g-classes-have-attributes\n# pylint: disable=g-direct-tensorflow-import\n\"\"\"Utilies for image preprocessing and augmentation.\n\nWarning: `tf.keras.preprocessing.image` APIs do not operate on tensors and are\nnot recommended for new code. Prefer loading data with\n`tf.keras.utils.image_dataset_from_directory`, and then transforming the output\n`tf.data.Dataset` with preprocessing layers. For more information, see the\ntutorials for [loading images](\nhttps://www.tensorflow.org/tutorials/load_data/images) and [augmenting images](\nhttps://www.tensorflow.org/tutorials/images/data_augmentation), as well as the\n[preprocessing layer guide](\nhttps://www.tensorflow.org/guide/keras/preprocessing_layers).\n\"\"\"\n\nimport collections\nimport io\nimport multiprocessing\nimport os\nimport pathlib\nimport threading\nimport warnings\n\nfrom keras import backend\nfrom keras.utils import data_utils\nimport numpy as np\nfrom tensorflow.python.util.tf_export import keras_export\n\ntry:\n import scipy\n from scipy import linalg # pylint: disable=unused-import\n from scipy import ndimage # pylint: disable=unused-import\nexcept ImportError:\n pass\ntry:\n from PIL import Image as pil_image\n from PIL import ImageEnhance\nexcept ImportError:\n pil_image = None\n ImageEnhance = None\n\n\nif pil_image is not None:\n _PIL_INTERPOLATION_METHODS = {\n 'nearest': pil_image.NEAREST,\n 'bilinear': pil_image.BILINEAR,\n 'bicubic': pil_image.BICUBIC,\n 'hamming': pil_image.HAMMING,\n 'box': pil_image.BOX,\n 'lanczos': pil_image.LANCZOS,\n }\n\n\n@keras_export('keras.utils.array_to_img',\n 'keras.preprocessing.image.array_to_img')\ndef array_to_img(x, data_format=None, scale=True, dtype=None):\n \"\"\"Converts a 3D Numpy array to a PIL Image instance.\n\n Usage:\n\n ```python\n from PIL import Image\n img = np.random.random(size=(100, 100, 3))\n pil_img = tf.keras.preprocessing.image.array_to_img(img)\n ```\n\n\n Args:\n x: Input data, in any form that can be converted to a Numpy array.\n data_format: Image data format, can be either \"channels_first\" or\n \"channels_last\". Defaults to `None`, in which case the global setting\n `tf.keras.backend.image_data_format()` is used (unless you changed it,\n it defaults to \"channels_last\").\n scale: Whether to rescale the image such that minimum and maximum values\n are 0 and 255 respectively. Defaults to `True`.\n dtype: Dtype to use. Default to `None`, in which case the global setting\n `tf.keras.backend.floatx()` is used (unless you changed it, it defaults\n to \"float32\")\n\n Returns:\n A PIL Image instance.\n\n Raises:\n ImportError: if PIL is not available.\n ValueError: if invalid `x` or `data_format` is passed.\n \"\"\"\n\n if data_format is None:\n data_format = backend.image_data_format()\n if dtype is None:\n dtype = backend.floatx()\n if pil_image is None:\n raise ImportError('Could not import PIL.Image. '\n 'The use of `array_to_img` requires PIL.')\n x = np.asarray(x, dtype=dtype)\n if x.ndim != 3:\n raise ValueError('Expected image array to have rank 3 (single image). '\n f'Got array with shape: {x.shape}')\n\n if data_format not in {'channels_first', 'channels_last'}:\n raise ValueError(f'Invalid data_format: {data_format}')\n\n # Original Numpy array x has format (height, width, channel)\n # or (channel, height, width)\n # but target PIL image has format (width, height, channel)\n if data_format == 'channels_first':\n x = x.transpose(1, 2, 0)\n if scale:\n x = x - np.min(x)\n x_max = np.max(x)\n if x_max != 0:\n x /= x_max\n x *= 255\n if x.shape[2] == 4:\n # RGBA\n return pil_image.fromarray(x.astype('uint8'), 'RGBA')\n elif x.shape[2] == 3:\n # RGB\n return pil_image.fromarray(x.astype('uint8'), 'RGB')\n elif x.shape[2] == 1:\n # grayscale\n if np.max(x) > 255:\n # 32-bit signed integer grayscale image. PIL mode \"I\"\n return pil_image.fromarray(x[:, :, 0].astype('int32'), 'I')\n return pil_image.fromarray(x[:, :, 0].astype('uint8'), 'L')\n else:\n raise ValueError(f'Unsupported channel number: {x.shape[2]}')\n\n\n@keras_export('keras.utils.img_to_array',\n 'keras.preprocessing.image.img_to_array')\ndef img_to_array(img, data_format=None, dtype=None):\n \"\"\"Converts a PIL Image instance to a Numpy array.\n\n Usage:\n\n ```python\n from PIL import Image\n img_data = np.random.random(size=(100, 100, 3))\n img = tf.keras.preprocessing.image.array_to_img(img_data)\n array = tf.keras.preprocessing.image.img_to_array(img)\n ```\n\n\n Args:\n img: Input PIL Image instance.\n data_format: Image data format, can be either \"channels_first\" or\n \"channels_last\". Defaults to `None`, in which case the global setting\n `tf.keras.backend.image_data_format()` is used (unless you changed it,\n it defaults to \"channels_last\").\n dtype: Dtype to use. Default to `None`, in which case the global setting\n `tf.keras.backend.floatx()` is used (unless you changed it, it defaults\n to \"float32\")\n\n Returns:\n A 3D Numpy array.\n\n Raises:\n ValueError: if invalid `img` or `data_format` is passed.\n \"\"\"\n\n if data_format is None:\n data_format = backend.image_data_format()\n if dtype is None:\n dtype = backend.floatx()\n if data_format not in {'channels_first', 'channels_last'}:\n raise ValueError(f'Unknown data_format: {data_format}')\n # Numpy array x has format (height, width, channel)\n # or (channel, height, width)\n # but original PIL image has format (width, height, channel)\n x = np.asarray(img, dtype=dtype)\n if len(x.shape) == 3:\n if data_format == 'channels_first':\n x = x.transpose(2, 0, 1)\n elif len(x.shape) == 2:\n if data_format == 'channels_first':\n x = x.reshape((1, x.shape[0], x.shape[1]))\n else:\n x = x.reshape((x.shape[0], x.shape[1], 1))\n else:\n raise ValueError(f'Unsupported image shape: {x.shape}')\n return x\n\n\n@keras_export('keras.utils.save_img', 'keras.preprocessing.image.save_img')\ndef save_img(path, x, data_format=None, file_format=None, scale=True, **kwargs):\n \"\"\"Saves an image stored as a Numpy array to a path or file object.\n\n Args:\n path: Path or file object.\n x: Numpy array.\n data_format: Image data format, either \"channels_first\" or\n \"channels_last\".\n file_format: Optional file format override. If omitted, the format to use\n is determined from the filename extension. If a file object was used\n instead of a filename, this parameter should always be used.\n scale: Whether to rescale image values to be within `[0, 255]`.\n **kwargs: Additional keyword arguments passed to `PIL.Image.save()`.\n \"\"\"\n if data_format is None:\n data_format = backend.image_data_format()\n img = array_to_img(x, data_format=data_format, scale=scale)\n if img.mode == 'RGBA' and (file_format == 'jpg' or file_format == 'jpeg'):\n warnings.warn('The JPG format does not support '\n 'RGBA images, converting to RGB.')\n img = img.convert('RGB')\n img.save(path, format=file_format, **kwargs)\n\n\n@keras_export('keras.utils.load_img', 'keras.preprocessing.image.load_img')\ndef load_img(path,\n grayscale=False,\n color_mode='rgb',\n target_size=None,\n interpolation='nearest',\n keep_aspect_ratio=False):\n \"\"\"Loads an image into PIL format.\n\n Usage:\n\n ```\n image = tf.keras.preprocessing.image.load_img(image_path)\n input_arr = tf.keras.preprocessing.image.img_to_array(image)\n input_arr = np.array([input_arr]) # Convert single image to a batch.\n predictions = model.predict(input_arr)\n ```\n\n Args:\n path: Path to image file.\n grayscale: DEPRECATED use `color_mode=\"grayscale\"`.\n color_mode: One of \"grayscale\", \"rgb\", \"rgba\". Default: \"rgb\". The desired\n image format.\n target_size: Either `None` (default to original size) or tuple of ints\n `(img_height, img_width)`.\n interpolation: Interpolation method used to resample the image if the\n target size is different from that of the loaded image. Supported\n methods are \"nearest\", \"bilinear\", and \"bicubic\". If PIL version 1.1.3\n or newer is installed, \"lanczos\" is also supported. If PIL version 3.4.0\n or newer is installed, \"box\" and \"hamming\" are also supported. By\n default, \"nearest\" is used.\n keep_aspect_ratio: Boolean, whether to resize images to a target\n size without aspect ratio distortion. The image is cropped in\n the center with target aspect ratio before resizing.\n\n Returns:\n A PIL Image instance.\n\n Raises:\n ImportError: if PIL is not available.\n ValueError: if interpolation method is not supported.\n \"\"\"\n if grayscale:\n warnings.warn('grayscale is deprecated. Please use '\n 'color_mode = \"grayscale\"')\n color_mode = 'grayscale'\n if pil_image is None:\n raise ImportError('Could not import PIL.Image. '\n 'The use of `load_img` requires PIL.')\n if isinstance(path, io.BytesIO):\n img = pil_image.open(path)\n elif isinstance(path, (pathlib.Path, bytes, str)):\n if isinstance(path, pathlib.Path):\n path = str(path.resolve())\n with open(path, 'rb') as f:\n img = pil_image.open(io.BytesIO(f.read()))\n else:\n raise TypeError('path should be path-like or io.BytesIO'\n ', not {}'.format(type(path)))\n\n if color_mode == 'grayscale':\n # if image is not already an 8-bit, 16-bit or 32-bit grayscale image\n # convert it to an 8-bit grayscale image.\n if img.mode not in ('L', 'I;16', 'I'):\n img = img.convert('L')\n elif color_mode == 'rgba':\n if img.mode != 'RGBA':\n img = img.convert('RGBA')\n elif color_mode == 'rgb':\n if img.mode != 'RGB':\n img = img.convert('RGB')\n else:\n raise ValueError('color_mode must be \"grayscale\", \"rgb\", or \"rgba\"')\n if target_size is not None:\n width_height_tuple = (target_size[1], target_size[0])\n if img.size != width_height_tuple:\n if interpolation not in _PIL_INTERPOLATION_METHODS:\n raise ValueError('Invalid interpolation method {} specified. Supported '\n 'methods are {}'.format(\n interpolation,\n ', '.join(_PIL_INTERPOLATION_METHODS.keys())))\n resample = _PIL_INTERPOLATION_METHODS[interpolation]\n\n if keep_aspect_ratio:\n width, height = img.size\n target_width, target_height = width_height_tuple\n\n crop_height = (width * target_height) // target_width\n crop_width = (height * target_width) // target_height\n\n # Set back to input height / width\n # if crop_height / crop_width is not smaller.\n crop_height = min(height, crop_height)\n crop_width = min(width, crop_width)\n\n crop_box_hstart = (height - crop_height) // 2\n crop_box_wstart = (width - crop_width) // 2\n crop_box_wend = crop_box_wstart + crop_width\n crop_box_hend = crop_box_hstart + crop_height\n crop_box = [\n crop_box_wstart, crop_box_hstart, crop_box_wend, crop_box_hend\n ]\n img = img.resize(width_height_tuple, resample, box=crop_box)\n else:\n img = img.resize(width_height_tuple, resample)\n return img\n\n\n@keras_export('keras.preprocessing.image.Iterator')\nclass Iterator(data_utils.Sequence):\n \"\"\"Base class for image data iterators.\n\n Warning: `tf.keras.preprocessing.image.Iterator` is not recommended for\n new code. Prefer loading images with\n `tf.keras.utils.image_dataset_from_directory` and transforming the output\n `tf.data.Dataset` with preprocessing layers. For more information, see the\n tutorials for [loading images](\n https://www.tensorflow.org/tutorials/load_data/images) and\n [augmenting images](\n https://www.tensorflow.org/tutorials/images/data_augmentation), as well as\n the [preprocessing layer guide](\n https://www.tensorflow.org/guide/keras/preprocessing_layers).\n\n Every `Iterator` must implement the `_get_batches_of_transformed_samples`\n method.\n\n Args:\n n: Integer, total number of samples in the dataset to loop over.\n batch_size: Integer, size of a batch.\n shuffle: Boolean, whether to shuffle the data between epochs.\n seed: Random seeding for data shuffling.\n \"\"\"\n white_list_formats = ('png', 'jpg', 'jpeg', 'bmp', 'ppm', 'tif', 'tiff')\n\n def __init__(self, n, batch_size, shuffle, seed):\n self.n = n\n self.batch_size = batch_size\n self.seed = seed\n self.shuffle = shuffle\n self.batch_index = 0\n self.total_batches_seen = 0\n self.lock = threading.Lock()\n self.index_array = None\n self.index_generator = self._flow_index()\n\n def _set_index_array(self):\n self.index_array = np.arange(self.n)\n if self.shuffle:\n self.index_array = np.random.permutation(self.n)\n\n def __getitem__(self, idx):\n if idx >= len(self):\n raise ValueError('Asked to retrieve element {idx}, '\n 'but the Sequence '\n 'has length {length}'.format(idx=idx, length=len(self)))\n if self.seed is not None:\n np.random.seed(self.seed + self.total_batches_seen)\n self.total_batches_seen += 1\n if self.index_array is None:\n self._set_index_array()\n index_array = self.index_array[self.batch_size * idx:self.batch_size *\n (idx + 1)]\n return self._get_batches_of_transformed_samples(index_array)\n\n def __len__(self):\n return (self.n + self.batch_size - 1) // self.batch_size # round up\n\n def on_epoch_end(self):\n self._set_index_array()\n\n def reset(self):\n self.batch_index = 0\n\n def _flow_index(self):\n # Ensure self.batch_index is 0.\n self.reset()\n while 1:\n if self.seed is not None:\n np.random.seed(self.seed + self.total_batches_seen)\n if self.batch_index == 0:\n self._set_index_array()\n\n if self.n == 0:\n # Avoiding modulo by zero error\n current_index = 0\n else:\n current_index = (self.batch_index * self.batch_size) % self.n\n if self.n > current_index + self.batch_size:\n self.batch_index += 1\n else:\n self.batch_index = 0\n self.total_batches_seen += 1\n yield self.index_array[current_index:current_index + self.batch_size]\n\n def __iter__(self):\n # Needed if we want to do something like:\n # for x, y in data_gen.flow(...):\n return self\n\n def __next__(self, *args, **kwargs):\n return self.next(*args, **kwargs)\n\n def next(self):\n \"\"\"For python 2.x.\n\n Returns:\n The next batch.\n \"\"\"\n with self.lock:\n index_array = next(self.index_generator)\n # The transformation of images is not under thread lock\n # so it can be done in parallel\n return self._get_batches_of_transformed_samples(index_array)\n\n def _get_batches_of_transformed_samples(self, index_array):\n \"\"\"Gets a batch of transformed samples.\n\n Args:\n index_array: Array of sample indices to include in batch.\n Returns:\n A batch of transformed samples.\n \"\"\"\n raise NotImplementedError\n\n\ndef _iter_valid_files(directory, white_list_formats, follow_links):\n \"\"\"Iterates on files with extension.\n\n Args:\n directory: Absolute path to the directory\n containing files to be counted\n white_list_formats: Set of strings containing allowed extensions for\n the files to be counted.\n follow_links: Boolean, follow symbolic links to subdirectories.\n Yields:\n Tuple of (root, filename) with extension in `white_list_formats`.\n \"\"\"\n\n def _recursive_list(subpath):\n return sorted(\n os.walk(subpath, followlinks=follow_links), key=lambda x: x[0])\n\n for root, _, files in _recursive_list(directory):\n for fname in sorted(files):\n if fname.lower().endswith('.tiff'):\n warnings.warn('Using \".tiff\" files with multiple bands '\n 'will cause distortion. Please verify your output.')\n if fname.lower().endswith(white_list_formats):\n yield root, fname\n\n\ndef _list_valid_filenames_in_directory(directory, white_list_formats, split,\n class_indices, follow_links):\n \"\"\"Lists paths of files in `subdir` with extensions in `white_list_formats`.\n\n Args:\n directory: absolute path to a directory containing the files to list.\n The directory name is used as class label\n and must be a key of `class_indices`.\n white_list_formats: set of strings containing allowed extensions for\n the files to be counted.\n split: tuple of floats (e.g. `(0.2, 0.6)`) to only take into\n account a certain fraction of files in each directory.\n E.g.: `segment=(0.6, 1.0)` would only account for last 40 percent\n of images in each directory.\n class_indices: dictionary mapping a class name to its index.\n follow_links: boolean, follow symbolic links to subdirectories.\n\n Returns:\n classes: a list of class indices\n filenames: the path of valid files in `directory`, relative from\n `directory`'s parent (e.g., if `directory` is \"dataset/class1\",\n the filenames will be\n `[\"class1/file1.jpg\", \"class1/file2.jpg\", ...]`).\n \"\"\"\n dirname = os.path.basename(directory)\n if split:\n all_files = list(\n _iter_valid_files(directory, white_list_formats, follow_links))\n num_files = len(all_files)\n start, stop = int(split[0] * num_files), int(split[1] * num_files)\n valid_files = all_files[start:stop]\n else:\n valid_files = _iter_valid_files(directory, white_list_formats, follow_links)\n classes = []\n filenames = []\n for root, fname in valid_files:\n classes.append(class_indices[dirname])\n absolute_path = os.path.join(root, fname)\n relative_path = os.path.join(dirname,\n os.path.relpath(absolute_path, directory))\n filenames.append(relative_path)\n\n return classes, filenames\n\n\nclass BatchFromFilesMixin():\n \"\"\"Adds methods related to getting batches from filenames.\n\n It includes the logic to transform image files to batches.\n \"\"\"\n\n def set_processing_attrs(self, image_data_generator, target_size, color_mode,\n data_format, save_to_dir, save_prefix, save_format,\n subset, interpolation, keep_aspect_ratio):\n \"\"\"Sets attributes to use later for processing files into a batch.\n\n Args:\n image_data_generator: Instance of `ImageDataGenerator`\n to use for random transformations and normalization.\n target_size: tuple of integers, dimensions to resize input images\n to.\n color_mode: One of `\"rgb\"`, `\"rgba\"`, `\"grayscale\"`.\n Color mode to read images.\n data_format: String, one of `channels_first`, `channels_last`.\n save_to_dir: Optional directory where to save the pictures\n being yielded, in a viewable format. This is useful\n for visualizing the random transformations being\n applied, for debugging purposes.\n save_prefix: String prefix to use for saving sample\n images (if `save_to_dir` is set).\n save_format: Format to use for saving sample images\n (if `save_to_dir` is set).\n subset: Subset of data (`\"training\"` or `\"validation\"`) if\n validation_split is set in ImageDataGenerator.\n interpolation: Interpolation method used to resample the image if the\n target size is different from that of the loaded image.\n Supported methods are \"nearest\", \"bilinear\", and \"bicubic\".\n If PIL version 1.1.3 or newer is installed, \"lanczos\" is also\n supported. If PIL version 3.4.0 or newer is installed, \"box\" and\n \"hamming\" are also supported. By default, \"nearest\" is used.\n keep_aspect_ratio: Boolean, whether to resize images to a target size\n without aspect ratio distortion. The image is cropped in the center\n with target aspect ratio before resizing.\n \"\"\"\n self.image_data_generator = image_data_generator\n self.target_size = tuple(target_size)\n self.keep_aspect_ratio = keep_aspect_ratio\n if color_mode not in {'rgb', 'rgba', 'grayscale'}:\n raise ValueError('Invalid color mode:', color_mode,\n '; expected \"rgb\", \"rgba\", or \"grayscale\".')\n self.color_mode = color_mode\n self.data_format = data_format\n if self.color_mode == 'rgba':\n if self.data_format == 'channels_last':\n self.image_shape = self.target_size + (4,)\n else:\n self.image_shape = (4,) + self.target_size\n elif self.color_mode == 'rgb':\n if self.data_format == 'channels_last':\n self.image_shape = self.target_size + (3,)\n else:\n self.image_shape = (3,) + self.target_size\n else:\n if self.data_format == 'channels_last':\n self.image_shape = self.target_size + (1,)\n else:\n self.image_shape = (1,) + self.target_size\n self.save_to_dir = save_to_dir\n self.save_prefix = save_prefix\n self.save_format = save_format\n self.interpolation = interpolation\n if subset is not None:\n validation_split = self.image_data_generator._validation_split # pylint: disable=protected-access\n if subset == 'validation':\n split = (0, validation_split)\n elif subset == 'training':\n split = (validation_split, 1)\n else:\n raise ValueError('Invalid subset name: %s;'\n 'expected \"training\" or \"validation\"' % (subset,))\n else:\n split = None\n self.split = split\n self.subset = subset\n\n def _get_batches_of_transformed_samples(self, index_array):\n \"\"\"Gets a batch of transformed samples.\n\n Args:\n index_array: Array of sample indices to include in batch.\n Returns:\n A batch of transformed samples.\n \"\"\"\n batch_x = np.zeros((len(index_array),) + self.image_shape, dtype=self.dtype)\n # build batch of image data\n # self.filepaths is dynamic, is better to call it once outside the loop\n filepaths = self.filepaths\n for i, j in enumerate(index_array):\n img = load_img(\n filepaths[j],\n color_mode=self.color_mode,\n target_size=self.target_size,\n interpolation=self.interpolation,\n keep_aspect_ratio=self.keep_aspect_ratio)\n x = img_to_array(img, data_format=self.data_format)\n # Pillow images should be closed after `load_img`,\n # but not PIL images.\n if hasattr(img, 'close'):\n img.close()\n if self.image_data_generator:\n params = self.image_data_generator.get_random_transform(x.shape)\n x = self.image_data_generator.apply_transform(x, params)\n x = self.image_data_generator.standardize(x)\n batch_x[i] = x\n # optionally save augmented images to disk for debugging purposes\n if self.save_to_dir:\n for i, j in enumerate(index_array):\n img = array_to_img(batch_x[i], self.data_format, scale=True)\n fname = '{prefix}_{index}_{hash}.{format}'.format(\n prefix=self.save_prefix,\n index=j,\n hash=np.random.randint(1e7),\n format=self.save_format)\n img.save(os.path.join(self.save_to_dir, fname))\n # build batch of labels\n if self.class_mode == 'input':\n batch_y = batch_x.copy()\n elif self.class_mode in {'binary', 'sparse'}:\n batch_y = np.empty(len(batch_x), dtype=self.dtype)\n for i, n_observation in enumerate(index_array):\n batch_y[i] = self.classes[n_observation]\n elif self.class_mode == 'categorical':\n batch_y = np.zeros((len(batch_x), len(self.class_indices)),\n dtype=self.dtype)\n for i, n_observation in enumerate(index_array):\n batch_y[i, self.classes[n_observation]] = 1.\n elif self.class_mode == 'multi_output':\n batch_y = [output[index_array] for output in self.labels]\n elif self.class_mode == 'raw':\n batch_y = self.labels[index_array]\n else:\n return batch_x\n if self.sample_weight is None:\n return batch_x, batch_y\n else:\n return batch_x, batch_y, self.sample_weight[index_array]\n\n @property\n def filepaths(self):\n \"\"\"List of absolute paths to image files.\"\"\"\n raise NotImplementedError(\n '`filepaths` property method has not been implemented in {}.'.format(\n type(self).__name__))\n\n @property\n def labels(self):\n \"\"\"Class labels of every observation.\"\"\"\n raise NotImplementedError(\n '`labels` property method has not been implemented in {}.'.format(\n type(self).__name__))\n\n @property\n def sample_weight(self):\n raise NotImplementedError(\n '`sample_weight` property method has not been implemented in {}.'\n .format(type(self).__name__))\n\n\n@keras_export('keras.preprocessing.image.DirectoryIterator')\nclass DirectoryIterator(BatchFromFilesMixin, Iterator):\n \"\"\"Iterator capable of reading images from a directory on disk.\n\n Warning: `tf.keras.preprocessing.image.DirectoryIterator` is not recommended\n for new code. Prefer loading images with\n `tf.keras.utils.image_dataset_from_directory` and transforming the output\n `tf.data.Dataset` with preprocessing layers. For more information, see the\n tutorials for [loading images](\n https://www.tensorflow.org/tutorials/load_data/images) and\n [augmenting images](\n https://www.tensorflow.org/tutorials/images/data_augmentation), as well as\n the [preprocessing layer guide](\n https://www.tensorflow.org/guide/keras/preprocessing_layers).\n\n Args:\n directory: Path to the directory to read images from. Each subdirectory in\n this directory will be considered to contain images from one class, or\n alternatively you could specify class subdirectories via the `classes`\n argument.\n image_data_generator: Instance of `ImageDataGenerator` to use for random\n transformations and normalization.\n target_size: tuple of integers, dimensions to resize input images to.\n color_mode: One of `\"rgb\"`, `\"rgba\"`, `\"grayscale\"`. Color mode to read\n images.\n classes: Optional list of strings, names of subdirectories containing\n images from each class (e.g. `[\"dogs\", \"cats\"]`). It will be computed\n automatically if not set.\n class_mode: Mode for yielding the targets:\n - `\"binary\"`: binary targets (if there are only two classes),\n - `\"categorical\"`: categorical targets,\n - `\"sparse\"`: integer targets,\n - `\"input\"`: targets are images identical to input images (mainly used\n to work with autoencoders),\n - `None`: no targets get yielded (only input images are yielded).\n batch_size: Integer, size of a batch.\n shuffle: Boolean, whether to shuffle the data between epochs.\n seed: Random seed for data shuffling.\n data_format: String, one of `channels_first`, `channels_last`.\n save_to_dir: Optional directory where to save the pictures being yielded,\n in a viewable format. This is useful for visualizing the random\n transformations being applied, for debugging purposes.\n save_prefix: String prefix to use for saving sample images (if\n `save_to_dir` is set).\n save_format: Format to use for saving sample images (if `save_to_dir` is\n set).\n subset: Subset of data (`\"training\"` or `\"validation\"`) if\n validation_split is set in ImageDataGenerator.\n interpolation: Interpolation method used to resample the image if the\n target size is different from that of the loaded image. Supported\n methods are \"nearest\", \"bilinear\", and \"bicubic\". If PIL version 1.1.3\n or newer is installed, \"lanczos\" is also supported. If PIL version 3.4.0\n or newer is installed, \"box\" and \"hamming\" are also supported. By\n default, \"nearest\" is used.\n keep_aspect_ratio: Boolean, whether to resize images to a target size\n without aspect ratio distortion. The image is cropped in the center\n with target aspect ratio before resizing.\n dtype: Dtype to use for generated arrays.\n \"\"\"\n allowed_class_modes = {'categorical', 'binary', 'sparse', 'input', None}\n\n def __init__(self,\n directory,\n image_data_generator,\n target_size=(256, 256),\n color_mode='rgb',\n classes=None,\n class_mode='categorical',\n batch_size=32,\n shuffle=True,\n seed=None,\n data_format=None,\n save_to_dir=None,\n save_prefix='',\n save_format='png',\n follow_links=False,\n subset=None,\n interpolation='nearest',\n keep_aspect_ratio=False,\n dtype=None):\n if data_format is None:\n data_format = backend.image_data_format()\n if dtype is None:\n dtype = backend.floatx()\n super().set_processing_attrs(image_data_generator, target_size, color_mode,\n data_format, save_to_dir, save_prefix,\n save_format, subset, interpolation,\n keep_aspect_ratio)\n self.directory = directory\n self.classes = classes\n if class_mode not in self.allowed_class_modes:\n raise ValueError('Invalid class_mode: {}; expected one of: {}'\n .format(class_mode, self.allowed_class_modes))\n self.class_mode = class_mode\n self.dtype = dtype\n # First, count the number of samples and classes.\n self.samples = 0\n\n if not classes:\n classes = []\n for subdir in sorted(os.listdir(directory)):\n if os.path.isdir(os.path.join(directory, subdir)):\n classes.append(subdir)\n self.num_classes = len(classes)\n self.class_indices = dict(zip(classes, range(len(classes))))\n\n pool = multiprocessing.pool.ThreadPool()\n\n # Second, build an index of the images\n # in the different class subfolders.\n results = []\n self.filenames = []\n i = 0\n for dirpath in (os.path.join(directory, subdir) for subdir in classes):\n results.append(\n pool.apply_async(_list_valid_filenames_in_directory,\n (dirpath, self.white_list_formats, self.split,\n self.class_indices, follow_links)))\n classes_list = []\n for res in results:\n classes, filenames = res.get()\n classes_list.append(classes)\n self.filenames += filenames\n self.samples = len(self.filenames)\n self.classes = np.zeros((self.samples,), dtype='int32')\n for classes in classes_list:\n self.classes[i:i + len(classes)] = classes\n i += len(classes)\n\n print('Found %d images belonging to %d classes.' %\n (self.samples, self.num_classes))\n pool.close()\n pool.join()\n self._filepaths = [\n os.path.join(self.directory, fname) for fname in self.filenames\n ]\n super().__init__(self.samples, batch_size, shuffle, seed)\n\n @property\n def filepaths(self):\n return self._filepaths\n\n @property\n def labels(self):\n return self.classes\n\n @property # mixin needs this property to work\n def sample_weight(self):\n # no sample weights will be returned\n return None\n\n\n@keras_export('keras.preprocessing.image.NumpyArrayIterator')\nclass NumpyArrayIterator(Iterator):\n \"\"\"Iterator yielding data from a Numpy array.\n\n Warning: `tf.keras.preprocessing.image.NumpyArrayIterator` is not recommended\n for new code. Prefer loading images with\n `tf.keras.utils.image_dataset_from_directory` and transforming the output\n `tf.data.Dataset` with preprocessing layers. For more information, see the\n tutorials for [loading images](\n https://www.tensorflow.org/tutorials/load_data/images) and\n [augmenting images](\n https://www.tensorflow.org/tutorials/images/data_augmentation), as well as\n the [preprocessing layer guide](\n https://www.tensorflow.org/guide/keras/preprocessing_layers).\n\n Args:\n x: Numpy array of input data or tuple. If tuple, the second elements is\n either another numpy array or a list of numpy arrays, each of which gets\n passed through as an output without any modifications.\n y: Numpy array of targets data.\n image_data_generator: Instance of `ImageDataGenerator` to use for random\n transformations and normalization.\n batch_size: Integer, size of a batch.\n shuffle: Boolean, whether to shuffle the data between epochs.\n sample_weight: Numpy array of sample weights.\n seed: Random seed for data shuffling.\n data_format: String, one of `channels_first`, `channels_last`.\n save_to_dir: Optional directory where to save the pictures being yielded,\n in a viewable format. This is useful for visualizing the random\n transformations being applied, for debugging purposes.\n save_prefix: String prefix to use for saving sample images (if\n `save_to_dir` is set).\n save_format: Format to use for saving sample images (if `save_to_dir` is\n set).\n subset: Subset of data (`\"training\"` or `\"validation\"`) if\n validation_split is set in ImageDataGenerator.\n ignore_class_split: Boolean (default: False), ignore difference\n in number of classes in labels across train and validation\n split (useful for non-classification tasks)\n dtype: Dtype to use for the generated arrays.\n \"\"\"\n\n def __init__(self,\n x,\n y,\n image_data_generator,\n batch_size=32,\n shuffle=False,\n sample_weight=None,\n seed=None,\n data_format=None,\n save_to_dir=None,\n save_prefix='',\n save_format='png',\n subset=None,\n ignore_class_split=False,\n dtype=None):\n if data_format is None:\n data_format = backend.image_data_format()\n if dtype is None:\n dtype = backend.floatx()\n self.dtype = dtype\n if isinstance(x, tuple) or isinstance(x, list):\n if not isinstance(x[1], list):\n x_misc = [np.asarray(x[1])]\n else:\n x_misc = [np.asarray(xx) for xx in x[1]]\n x = x[0]\n for xx in x_misc:\n if len(x) != len(xx):\n raise ValueError('All of the arrays in `x` '\n 'should have the same length. '\n 'Found a pair with: len(x[0]) = %s, len(x[?]) = %s' %\n (len(x), len(xx)))\n else:\n x_misc = []\n\n if y is not None and len(x) != len(y):\n raise ValueError('`x` (images tensor) and `y` (labels) '\n 'should have the same length. '\n 'Found: x.shape = %s, y.shape = %s' %\n (np.asarray(x).shape, np.asarray(y).shape))\n if sample_weight is not None and len(x) != len(sample_weight):\n raise ValueError('`x` (images tensor) and `sample_weight` '\n 'should have the same length. '\n 'Found: x.shape = %s, sample_weight.shape = %s' %\n (np.asarray(x).shape, np.asarray(sample_weight).shape))\n if subset is not None:\n if subset not in {'training', 'validation'}:\n raise ValueError('Invalid subset name:', subset,\n '; expected \"training\" or \"validation\".')\n split_idx = int(len(x) * image_data_generator._validation_split)\n\n if (y is not None and not ignore_class_split and not np.array_equal(\n np.unique(y[:split_idx]), np.unique(y[split_idx:]))):\n raise ValueError('Training and validation subsets '\n 'have different number of classes after '\n 'the split. If your numpy arrays are '\n 'sorted by the label, you might want '\n 'to shuffle them.')\n\n if subset == 'validation':\n x = x[:split_idx]\n x_misc = [np.asarray(xx[:split_idx]) for xx in x_misc]\n if y is not None:\n y = y[:split_idx]\n else:\n x = x[split_idx:]\n x_misc = [np.asarray(xx[split_idx:]) for xx in x_misc]\n if y is not None:\n y = y[split_idx:]\n\n self.x = np.asarray(x, dtype=self.dtype)\n self.x_misc = x_misc\n if self.x.ndim != 4:\n raise ValueError(\n 'Input data in `NumpyArrayIterator` '\n 'should have rank 4. You passed an array '\n 'with shape', self.x.shape)\n channels_axis = 3 if data_format == 'channels_last' else 1\n if self.x.shape[channels_axis] not in {1, 3, 4}:\n warnings.warn('NumpyArrayIterator is set to use the '\n 'data format convention \"' + data_format + '\" '\n '(channels on axis ' + str(channels_axis) +\n '), i.e. expected either 1, 3, or 4 '\n 'channels on axis ' + str(channels_axis) + '. '\n 'However, it was passed an array with shape ' +\n str(self.x.shape) + ' (' +\n str(self.x.shape[channels_axis]) + ' channels).')\n if y is not None:\n self.y = np.asarray(y)\n else:\n self.y = None\n if sample_weight is not None:\n self.sample_weight = np.asarray(sample_weight)\n else:\n self.sample_weight = None\n self.image_data_generator = image_data_generator\n self.data_format = data_format\n self.save_to_dir = save_to_dir\n self.save_prefix = save_prefix\n self.save_format = save_format\n super().__init__(x.shape[0], batch_size, shuffle, seed)\n\n def _get_batches_of_transformed_samples(self, index_array):\n batch_x = np.zeros(\n tuple([len(index_array)] + list(self.x.shape)[1:]), dtype=self.dtype)\n for i, j in enumerate(index_array):\n x = self.x[j]\n params = self.image_data_generator.get_random_transform(x.shape)\n x = self.image_data_generator.apply_transform(\n x.astype(self.dtype), params)\n x = self.image_data_generator.standardize(x)\n batch_x[i] = x\n\n if self.save_to_dir:\n for i, j in enumerate(index_array):\n img = array_to_img(batch_x[i], self.data_format, scale=True)\n fname = '{prefix}_{index}_{hash}.{format}'.format(\n prefix=self.save_prefix,\n index=j,\n hash=np.random.randint(1e4),\n format=self.save_format)\n img.save(os.path.join(self.save_to_dir, fname))\n batch_x_miscs = [xx[index_array] for xx in self.x_misc]\n output = (batch_x if not batch_x_miscs else [batch_x] + batch_x_miscs,)\n if self.y is None:\n return output[0]\n output += (self.y[index_array],)\n if self.sample_weight is not None:\n output += (self.sample_weight[index_array],)\n return output\n\n\ndef validate_filename(filename, white_list_formats):\n \"\"\"Check if a filename refers to a valid file.\n\n Args:\n filename: String, absolute path to a file\n white_list_formats: Set, allowed file extensions\n Returns:\n A boolean value indicating if the filename is valid or not\n \"\"\"\n return (filename.lower().endswith(white_list_formats) and\n os.path.isfile(filename))\n\n\nclass DataFrameIterator(BatchFromFilesMixin, Iterator):\n \"\"\"Iterator capable of reading images from a directory on disk as a dataframe.\n\n Args:\n dataframe: Pandas dataframe containing the filepaths relative to\n `directory` (or absolute paths if `directory` is None) of the images in\n a string column. It should include other column/s depending on the\n `class_mode`: - if `class_mode` is `\"categorical\"` (default value) it\n must include the `y_col` column with the class/es of each image.\n Values in column can be string/list/tuple if a single class or\n list/tuple if multiple classes. - if `class_mode` is `\"binary\"` or\n `\"sparse\"` it must include the given `y_col` column with class values\n as strings. - if `class_mode` is `\"raw\"` or `\"multi_output\"` it should\n contain the columns specified in `y_col`. - if `class_mode` is\n `\"input\"` or `None` no extra column is needed.\n directory: string, path to the directory to read images from. If `None`,\n data in `x_col` column should be absolute paths.\n image_data_generator: Instance of `ImageDataGenerator` to use for random\n transformations and normalization. If None, no transformations and\n normalizations are made.\n x_col: string, column in `dataframe` that contains the filenames (or\n absolute paths if `directory` is `None`).\n y_col: string or list, column/s in `dataframe` that has the target data.\n weight_col: string, column in `dataframe` that contains the sample\n weights. Default: `None`.\n target_size: tuple of integers, dimensions to resize input images to.\n color_mode: One of `\"rgb\"`, `\"rgba\"`, `\"grayscale\"`. Color mode to read\n images.\n classes: Optional list of strings, classes to use (e.g. `[\"dogs\",\n \"cats\"]`). If None, all classes in `y_col` will be used.\n class_mode: one of \"binary\", \"categorical\", \"input\", \"multi_output\",\n \"raw\", \"sparse\" or None. Default: \"categorical\".\n Mode for yielding the targets:\n - `\"binary\"`: 1D numpy array of binary labels,\n - `\"categorical\"`: 2D numpy array of one-hot encoded labels. Supports\n multi-label output.\n - `\"input\"`: images identical to input images (mainly used to work\n with autoencoders),\n - `\"multi_output\"`: list with the values of the different columns,\n - `\"raw\"`: numpy array of values in `y_col` column(s),\n - `\"sparse\"`: 1D numpy array of integer labels, - `None`, no targets\n are returned (the generator will only yield batches of image data,\n which is useful to use in `model.predict()`).\n batch_size: Integer, size of a batch.\n shuffle: Boolean, whether to shuffle the data between epochs.\n seed: Random seed for data shuffling.\n data_format: String, one of `channels_first`, `channels_last`.\n save_to_dir: Optional directory where to save the pictures being yielded,\n in a viewable format. This is useful for visualizing the random\n transformations being applied, for debugging purposes.\n save_prefix: String prefix to use for saving sample images (if\n `save_to_dir` is set).\n save_format: Format to use for saving sample images (if `save_to_dir` is\n set).\n subset: Subset of data (`\"training\"` or `\"validation\"`) if\n validation_split is set in ImageDataGenerator.\n interpolation: Interpolation method used to resample the image if the\n target size is different from that of the loaded image. Supported\n methods are \"nearest\", \"bilinear\", and \"bicubic\". If PIL version 1.1.3\n or newer is installed, \"lanczos\" is also supported. If PIL version 3.4.0\n or newer is installed, \"box\" and \"hamming\" are also supported. By\n default, \"nearest\" is used.\n keep_aspect_ratio: Boolean, whether to resize images to a target size\n without aspect ratio distortion. The image is cropped in the center\n with target aspect ratio before resizing.\n dtype: Dtype to use for the generated arrays.\n validate_filenames: Boolean, whether to validate image filenames in\n `x_col`. If `True`, invalid images will be ignored. Disabling this\n option can lead to speed-up in the instantiation of this class. Default:\n `True`.\n \"\"\"\n allowed_class_modes = {\n 'binary', 'categorical', 'input', 'multi_output', 'raw', 'sparse', None\n }\n\n def __init__(self,\n dataframe,\n directory=None,\n image_data_generator=None,\n x_col='filename',\n y_col='class',\n weight_col=None,\n target_size=(256, 256),\n color_mode='rgb',\n classes=None,\n class_mode='categorical',\n batch_size=32,\n shuffle=True,\n seed=None,\n data_format='channels_last',\n save_to_dir=None,\n save_prefix='',\n save_format='png',\n subset=None,\n interpolation='nearest',\n keep_aspect_ratio=False,\n dtype='float32',\n validate_filenames=True):\n super().set_processing_attrs(image_data_generator, target_size, color_mode,\n data_format, save_to_dir, save_prefix,\n save_format, subset, interpolation,\n keep_aspect_ratio)\n df = dataframe.copy()\n self.directory = directory or ''\n self.class_mode = class_mode\n self.dtype = dtype\n # check that inputs match the required class_mode\n self._check_params(df, x_col, y_col, weight_col, classes)\n if validate_filenames: # check which image files are valid and keep them\n df = self._filter_valid_filepaths(df, x_col)\n if class_mode not in ['input', 'multi_output', 'raw', None]:\n df, classes = self._filter_classes(df, y_col, classes)\n num_classes = len(classes)\n # build an index of all the unique classes\n self.class_indices = dict(zip(classes, range(len(classes))))\n # retrieve only training or validation set\n if self.split:\n num_files = len(df)\n start = int(self.split[0] * num_files)\n stop = int(self.split[1] * num_files)\n df = df.iloc[start:stop, :]\n # get labels for each observation\n if class_mode not in ['input', 'multi_output', 'raw', None]:\n self.classes = self.get_classes(df, y_col)\n self.filenames = df[x_col].tolist()\n self._sample_weight = df[weight_col].values if weight_col else None\n\n if class_mode == 'multi_output':\n self._targets = [np.array(df[col].tolist()) for col in y_col]\n if class_mode == 'raw':\n self._targets = df[y_col].values\n self.samples = len(self.filenames)\n validated_string = 'validated' if validate_filenames else 'non-validated'\n if class_mode in ['input', 'multi_output', 'raw', None]:\n print(f'Found {self.samples} {validated_string} image filenames.')\n else:\n print(f'Found {self.samples} {validated_string} image filenames '\n f'belonging to {num_classes} classes.')\n self._filepaths = [\n os.path.join(self.directory, fname) for fname in self.filenames\n ]\n super().__init__(self.samples, batch_size, shuffle, seed)\n\n def _check_params(self, df, x_col, y_col, weight_col, classes):\n # check class mode is one of the currently supported\n if self.class_mode not in self.allowed_class_modes:\n raise ValueError('Invalid class_mode: {}; expected one of: {}'.format(\n self.class_mode, self.allowed_class_modes))\n # check that y_col has several column names if class_mode is multi_output\n if (self.class_mode == 'multi_output') and not isinstance(y_col, list):\n raise TypeError(\n 'If class_mode=\"{}\", y_col must be a list. Received {}.'.format(\n self.class_mode,\n type(y_col).__name__))\n # check that filenames/filepaths column values are all strings\n if not all(df[x_col].apply(lambda x: isinstance(x, str))):\n raise TypeError(\n 'All values in column x_col={} must be strings.'.format(x_col))\n # check labels are string if class_mode is binary or sparse\n if self.class_mode in {'binary', 'sparse'}:\n if not all(df[y_col].apply(lambda x: isinstance(x, str))):\n raise TypeError('If class_mode=\"{}\", y_col=\"{}\" column '\n 'values must be strings.'.format(\n self.class_mode, y_col))\n # check that if binary there are only 2 different classes\n if self.class_mode == 'binary':\n if classes:\n classes = set(classes)\n if len(classes) != 2:\n raise ValueError('If class_mode=\"binary\" there must be 2 '\n 'classes. {} class/es were given.'.format(\n len(classes)))\n elif df[y_col].nunique() != 2:\n raise ValueError('If class_mode=\"binary\" there must be 2 classes. '\n 'Found {} classes.'.format(df[y_col].nunique()))\n # check values are string, list or tuple if class_mode is categorical\n if self.class_mode == 'categorical':\n types = (str, list, tuple)\n if not all(df[y_col].apply(lambda x: isinstance(x, types))):\n raise TypeError('If class_mode=\"{}\", y_col=\"{}\" column '\n 'values must be type string, list or tuple.'.format(\n self.class_mode, y_col))\n # raise warning if classes are given but will be unused\n if classes and self.class_mode in {'input', 'multi_output', 'raw', None}:\n warnings.warn(\n '`classes` will be ignored given the class_mode=\"{}\"'.format(\n self.class_mode))\n # check that if weight column that the values are numerical\n if weight_col and not issubclass(df[weight_col].dtype.type, np.number):\n raise TypeError(\n 'Column weight_col={} must be numeric.'.format(weight_col))\n\n def get_classes(self, df, y_col):\n labels = []\n for label in df[y_col]:\n if isinstance(label, (list, tuple)):\n labels.append([self.class_indices[lbl] for lbl in label])\n else:\n labels.append(self.class_indices[label])\n return labels\n\n @staticmethod\n def _filter_classes(df, y_col, classes):\n df = df.copy()\n\n def remove_classes(labels, classes):\n if isinstance(labels, (list, tuple)):\n labels = [cls for cls in labels if cls in classes]\n return labels or None\n elif isinstance(labels, str):\n return labels if labels in classes else None\n else:\n raise TypeError(\n 'Expect string, list or tuple but found {} in {} column '.format(\n type(labels), y_col))\n\n if classes:\n # prepare for membership lookup\n classes = list(collections.OrderedDict.fromkeys(classes).keys())\n df[y_col] = df[y_col].apply(lambda x: remove_classes(x, classes))\n else:\n classes = set()\n for v in df[y_col]:\n if isinstance(v, (list, tuple)):\n classes.update(v)\n else:\n classes.add(v)\n classes = sorted(classes)\n return df.dropna(subset=[y_col]), classes\n\n def _filter_valid_filepaths(self, df, x_col):\n \"\"\"Keep only dataframe rows with valid filenames.\n\n Args:\n df: Pandas dataframe containing filenames in a column\n x_col: string, column in `df` that contains the filenames or filepaths\n Returns:\n absolute paths to image files\n \"\"\"\n filepaths = df[x_col].map(lambda fname: os.path.join(self.directory, fname))\n mask = filepaths.apply(validate_filename, args=(self.white_list_formats,))\n n_invalid = (~mask).sum()\n if n_invalid:\n warnings.warn('Found {} invalid image filename(s) in x_col=\"{}\". '\n 'These filename(s) will be ignored.'.format(\n n_invalid, x_col))\n return df[mask]\n\n @property\n def filepaths(self):\n return self._filepaths\n\n @property\n def labels(self):\n if self.class_mode in {'multi_output', 'raw'}:\n return self._targets\n else:\n return self.classes\n\n @property\n def sample_weight(self):\n return self._sample_weight\n\n\ndef flip_axis(x, axis):\n x = np.asarray(x).swapaxes(axis, 0)\n x = x[::-1, ...]\n x = x.swapaxes(0, axis)\n return x\n\n\n@keras_export('keras.preprocessing.image.ImageDataGenerator')\nclass ImageDataGenerator():\n \"\"\"Generate batches of tensor image data with real-time data augmentation.\n\n Warning: `tf.keras.preprocessing.image.ImageDataGenerator` is not recommended\n for new code. Prefer loading images with\n `tf.keras.utils.image_dataset_from_directory` and transforming the output\n `tf.data.Dataset` with preprocessing layers. For more information, see the\n tutorials for [loading images](\n https://www.tensorflow.org/tutorials/load_data/images) and\n [augmenting images](\n https://www.tensorflow.org/tutorials/images/data_augmentation), as well as\n the [preprocessing layer guide](\n https://www.tensorflow.org/guide/keras/preprocessing_layers).\n\n The data will be looped over (in batches).\n\n Args:\n featurewise_center: Boolean. Set input mean to 0 over the dataset,\n feature-wise.\n samplewise_center: Boolean. Set each sample mean to 0.\n featurewise_std_normalization: Boolean. Divide inputs by std of the\n dataset, feature-wise.\n samplewise_std_normalization: Boolean. Divide each input by its std.\n zca_epsilon: epsilon for ZCA whitening. Default is 1e-6.\n zca_whitening: Boolean. Apply ZCA whitening.\n rotation_range: Int. Degree range for random rotations.\n width_shift_range: Float, 1-D array-like or int\n - float: fraction of total width, if < 1, or pixels if >= 1.\n - 1-D array-like: random elements from the array.\n - int: integer number of pixels from interval `(-width_shift_range,\n +width_shift_range)` - With `width_shift_range=2` possible values\n are integers `[-1, 0, +1]`, same as with `width_shift_range=[-1, 0,\n +1]`, while with `width_shift_range=1.0` possible values are floats\n in the interval [-1.0, +1.0).\n height_shift_range: Float, 1-D array-like or int\n - float: fraction of total height, if < 1, or pixels if >= 1.\n - 1-D array-like: random elements from the array.\n - int: integer number of pixels from interval `(-height_shift_range,\n +height_shift_range)` - With `height_shift_range=2` possible values\n are integers `[-1, 0, +1]`, same as with `height_shift_range=[-1, 0,\n +1]`, while with `height_shift_range=1.0` possible values are floats\n in the interval [-1.0, +1.0).\n brightness_range: Tuple or list of two floats. Range for picking a\n brightness shift value from.\n shear_range: Float. Shear Intensity (Shear angle in counter-clockwise\n direction in degrees)\n zoom_range: Float or [lower, upper]. Range for random zoom. If a float,\n `[lower, upper] = [1-zoom_range, 1+zoom_range]`.\n channel_shift_range: Float. Range for random channel shifts.\n fill_mode: One of {\"constant\", \"nearest\", \"reflect\" or \"wrap\"}. Default is\n 'nearest'. Points outside the boundaries of the input are filled\n according to the given mode:\n - 'constant': kkkkkkkk|abcd|kkkkkkkk (cval=k)\n - 'nearest': aaaaaaaa|abcd|dddddddd\n - 'reflect': abcddcba|abcd|dcbaabcd\n - 'wrap': abcdabcd|abcd|abcdabcd\n cval: Float or Int. Value used for points outside the boundaries when\n `fill_mode = \"constant\"`.\n horizontal_flip: Boolean. Randomly flip inputs horizontally.\n vertical_flip: Boolean. Randomly flip inputs vertically.\n rescale: rescaling factor. Defaults to None. If None or 0, no rescaling is\n applied, otherwise we multiply the data by the value provided (after\n applying all other transformations).\n preprocessing_function: function that will be applied on each input. The\n function will run after the image is resized and augmented.\n The function should take one argument: one image (Numpy tensor with\n rank 3), and should output a Numpy tensor with the same shape.\n data_format: Image data format, either \"channels_first\" or\n \"channels_last\". \"channels_last\" mode means that the images should have\n shape `(samples, height, width, channels)`, \"channels_first\" mode means\n that the images should have shape `(samples, channels, height, width)`.\n It defaults to the `image_data_format` value found in your Keras config\n file at `~/.keras/keras.json`. If you never set it, then it will be\n \"channels_last\".\n validation_split: Float. Fraction of images reserved for validation\n (strictly between 0 and 1).\n dtype: Dtype to use for the generated arrays.\n\n Raises:\n ValueError: If the value of the argument, `data_format` is other than\n `\"channels_last\"` or `\"channels_first\"`.\n ValueError: If the value of the argument, `validation_split` > 1\n or `validation_split` < 0.\n\n Examples:\n\n Example of using `.flow(x, y)`:\n\n ```python\n (x_train, y_train), (x_test, y_test) = cifar10.load_data()\n y_train = utils.to_categorical(y_train, num_classes)\n y_test = utils.to_categorical(y_test, num_classes)\n datagen = ImageDataGenerator(\n featurewise_center=True,\n featurewise_std_normalization=True,\n rotation_range=20,\n width_shift_range=0.2,\n height_shift_range=0.2,\n horizontal_flip=True,\n validation_split=0.2)\n # compute quantities required for featurewise normalization\n # (std, mean, and principal components if ZCA whitening is applied)\n datagen.fit(x_train)\n # fits the model on batches with real-time data augmentation:\n model.fit(datagen.flow(x_train, y_train, batch_size=32,\n subset='training'),\n validation_data=datagen.flow(x_train, y_train,\n batch_size=8, subset='validation'),\n steps_per_epoch=len(x_train) / 32, epochs=epochs)\n # here's a more \"manual\" example\n for e in range(epochs):\n print('Epoch', e)\n batches = 0\n for x_batch, y_batch in datagen.flow(x_train, y_train, batch_size=32):\n model.fit(x_batch, y_batch)\n batches += 1\n if batches >= len(x_train) / 32:\n # we need to break the loop by hand because\n # the generator loops indefinitely\n break\n ```\n\n Example of using `.flow_from_directory(directory)`:\n\n ```python\n train_datagen = ImageDataGenerator(\n rescale=1./255,\n shear_range=0.2,\n zoom_range=0.2,\n horizontal_flip=True)\n test_datagen = ImageDataGenerator(rescale=1./255)\n train_generator = train_datagen.flow_from_directory(\n 'data/train',\n target_size=(150, 150),\n batch_size=32,\n class_mode='binary')\n validation_generator = test_datagen.flow_from_directory(\n 'data/validation',\n target_size=(150, 150),\n batch_size=32,\n class_mode='binary')\n model.fit(\n train_generator,\n steps_per_epoch=2000,\n epochs=50,\n validation_data=validation_generator,\n validation_steps=800)\n ```\n\n Example of transforming images and masks together.\n\n ```python\n # we create two instances with the same arguments\n data_gen_args = dict(featurewise_center=True,\n featurewise_std_normalization=True,\n rotation_range=90,\n width_shift_range=0.1,\n height_shift_range=0.1,\n zoom_range=0.2)\n image_datagen = ImageDataGenerator(**data_gen_args)\n mask_datagen = ImageDataGenerator(**data_gen_args)\n # Provide the same seed and keyword arguments to the fit and flow methods\n seed = 1\n image_datagen.fit(images, augment=True, seed=seed)\n mask_datagen.fit(masks, augment=True, seed=seed)\n image_generator = image_datagen.flow_from_directory(\n 'data/images',\n class_mode=None,\n seed=seed)\n mask_generator = mask_datagen.flow_from_directory(\n 'data/masks',\n class_mode=None,\n seed=seed)\n # combine generators into one which yields image and masks\n train_generator = zip(image_generator, mask_generator)\n model.fit(\n train_generator,\n steps_per_epoch=2000,\n epochs=50)\n ```\n \"\"\"\n\n def __init__(self,\n featurewise_center=False,\n samplewise_center=False,\n featurewise_std_normalization=False,\n samplewise_std_normalization=False,\n zca_whitening=False,\n zca_epsilon=1e-6,\n rotation_range=0,\n width_shift_range=0.,\n height_shift_range=0.,\n brightness_range=None,\n shear_range=0.,\n zoom_range=0.,\n channel_shift_range=0.,\n fill_mode='nearest',\n cval=0.,\n horizontal_flip=False,\n vertical_flip=False,\n rescale=None,\n preprocessing_function=None,\n data_format=None,\n validation_split=0.0,\n interpolation_order=1,\n dtype=None):\n if data_format is None:\n data_format = backend.image_data_format()\n if dtype is None:\n dtype = backend.floatx()\n\n self.featurewise_center = featurewise_center\n self.samplewise_center = samplewise_center\n self.featurewise_std_normalization = featurewise_std_normalization\n self.samplewise_std_normalization = samplewise_std_normalization\n self.zca_whitening = zca_whitening\n self.zca_epsilon = zca_epsilon\n self.rotation_range = rotation_range\n self.width_shift_range = width_shift_range\n self.height_shift_range = height_shift_range\n self.shear_range = shear_range\n self.zoom_range = zoom_range\n self.channel_shift_range = channel_shift_range\n self.fill_mode = fill_mode\n self.cval = cval\n self.horizontal_flip = horizontal_flip\n self.vertical_flip = vertical_flip\n self.rescale = rescale\n self.preprocessing_function = preprocessing_function\n self.dtype = dtype\n self.interpolation_order = interpolation_order\n\n if data_format not in {'channels_last', 'channels_first'}:\n raise ValueError('`data_format` should be `\"channels_last\"` '\n '(channel after row and column) or '\n '`\"channels_first\"` (channel before row and column). '\n 'Received: %s' % data_format)\n self.data_format = data_format\n if data_format == 'channels_first':\n self.channel_axis = 1\n self.row_axis = 2\n self.col_axis = 3\n if data_format == 'channels_last':\n self.channel_axis = 3\n self.row_axis = 1\n self.col_axis = 2\n if validation_split and not 0 < validation_split < 1:\n raise ValueError('`validation_split` must be strictly between 0 and 1. '\n ' Received: %s' % validation_split)\n self._validation_split = validation_split\n\n self.mean = None\n self.std = None\n self.zca_whitening_matrix = None\n\n if isinstance(zoom_range, (float, int)):\n self.zoom_range = [1 - zoom_range, 1 + zoom_range]\n elif (len(zoom_range) == 2 and\n all(isinstance(val, (float, int)) for val in zoom_range)):\n self.zoom_range = [zoom_range[0], zoom_range[1]]\n else:\n raise ValueError('`zoom_range` should be a float or '\n 'a tuple or list of two floats. '\n 'Received: %s' % (zoom_range,))\n if zca_whitening:\n if not featurewise_center:\n self.featurewise_center = True\n warnings.warn('This ImageDataGenerator specifies '\n '`zca_whitening`, which overrides '\n 'setting of `featurewise_center`.')\n if featurewise_std_normalization:\n self.featurewise_std_normalization = False\n warnings.warn('This ImageDataGenerator specifies '\n '`zca_whitening` '\n 'which overrides setting of'\n '`featurewise_std_normalization`.')\n if featurewise_std_normalization:\n if not featurewise_center:\n self.featurewise_center = True\n warnings.warn('This ImageDataGenerator specifies '\n '`featurewise_std_normalization`, '\n 'which overrides setting of '\n '`featurewise_center`.')\n if samplewise_std_normalization:\n if not samplewise_center:\n self.samplewise_center = True\n warnings.warn('This ImageDataGenerator specifies '\n '`samplewise_std_normalization`, '\n 'which overrides setting of '\n '`samplewise_center`.')\n if brightness_range is not None:\n if (not isinstance(brightness_range, (tuple, list)) or\n len(brightness_range) != 2):\n raise ValueError(\n '`brightness_range should be tuple or list of two floats. '\n 'Received: %s' % (brightness_range,))\n self.brightness_range = brightness_range\n\n def flow(self,\n x,\n y=None,\n batch_size=32,\n shuffle=True,\n sample_weight=None,\n seed=None,\n save_to_dir=None,\n save_prefix='',\n save_format='png',\n ignore_class_split=False,\n subset=None):\n \"\"\"Takes data & label arrays, generates batches of augmented data.\n\n Args:\n x: Input data. Numpy array of rank 4 or a tuple. If tuple, the first\n element should contain the images and the second element another numpy\n array or a list of numpy arrays that gets passed to the output without\n any modifications. Can be used to feed the model miscellaneous data\n along with the images. In case of grayscale data, the channels axis of\n the image array should have value 1, in case of RGB data, it should\n have value 3, and in case of RGBA data, it should have value 4.\n y: Labels.\n batch_size: Int (default: 32).\n shuffle: Boolean (default: True).\n sample_weight: Sample weights.\n seed: Int (default: None).\n save_to_dir: None or str (default: None). This allows you to optionally\n specify a directory to which to save the augmented pictures being\n generated (useful for visualizing what you are doing).\n save_prefix: Str (default: `''`). Prefix to use for filenames of saved\n pictures (only relevant if `save_to_dir` is set).\n save_format: one of \"png\", \"jpeg\", \"bmp\", \"pdf\", \"ppm\", \"gif\", \"tif\",\n \"jpg\" (only relevant if `save_to_dir` is set). Default: \"png\".\n ignore_class_split: Boolean (default: False), ignore difference\n in number of classes in labels across train and validation\n split (useful for non-classification tasks)\n subset: Subset of data (`\"training\"` or `\"validation\"`) if\n `validation_split` is set in `ImageDataGenerator`.\n\n Returns:\n An `Iterator` yielding tuples of `(x, y)`\n where `x` is a numpy array of image data\n (in the case of a single image input) or a list\n of numpy arrays (in the case with\n additional inputs) and `y` is a numpy array\n of corresponding labels. If 'sample_weight' is not None,\n the yielded tuples are of the form `(x, y, sample_weight)`.\n If `y` is None, only the numpy array `x` is returned.\n Raises:\n ValueError: If the Value of the argument, `subset` is other than\n \"training\" or \"validation\".\n\n \"\"\"\n return NumpyArrayIterator(\n x,\n y,\n self,\n batch_size=batch_size,\n shuffle=shuffle,\n sample_weight=sample_weight,\n seed=seed,\n data_format=self.data_format,\n save_to_dir=save_to_dir,\n save_prefix=save_prefix,\n save_format=save_format,\n ignore_class_split=ignore_class_split,\n subset=subset,\n dtype=self.dtype)\n\n def flow_from_directory(self,\n directory,\n target_size=(256, 256),\n color_mode='rgb',\n classes=None,\n class_mode='categorical',\n batch_size=32,\n shuffle=True,\n seed=None,\n save_to_dir=None,\n save_prefix='',\n save_format='png',\n follow_links=False,\n subset=None,\n interpolation='nearest',\n keep_aspect_ratio=False):\n \"\"\"Takes the path to a directory & generates batches of augmented data.\n\n Args:\n directory: string, path to the target directory. It should contain one\n subdirectory per class. Any PNG, JPG, BMP, PPM or TIF images inside\n each of the subdirectories directory tree will be included in the\n generator. See [this script](\n https://gist.github.com/fchollet/0830affa1f7f19fd47b06d4cf89ed44d)\n for more details.\n target_size: Tuple of integers `(height, width)`, defaults to `(256,\n 256)`. The dimensions to which all images found will be resized.\n color_mode: One of \"grayscale\", \"rgb\", \"rgba\". Default: \"rgb\". Whether\n the images will be converted to have 1, 3, or 4 channels.\n classes: Optional list of class subdirectories\n (e.g. `['dogs', 'cats']`). Default: None. If not provided, the list\n of classes will be automatically inferred from the subdirectory\n names/structure under `directory`, where each subdirectory will be\n treated as a different class (and the order of the classes, which\n will map to the label indices, will be alphanumeric). The\n dictionary containing the mapping from class names to class\n indices can be obtained via the attribute `class_indices`.\n class_mode: One of \"categorical\", \"binary\", \"sparse\",\n \"input\", or None. Default: \"categorical\".\n Determines the type of label arrays that are returned:\n - \"categorical\" will be 2D one-hot encoded labels,\n - \"binary\" will be 1D binary labels,\n \"sparse\" will be 1D integer labels,\n - \"input\" will be images identical\n to input images (mainly used to work with autoencoders).\n - If None, no labels are returned\n (the generator will only yield batches of image data,\n which is useful to use with `model.predict_generator()`).\n Please note that in case of class_mode None,\n the data still needs to reside in a subdirectory\n of `directory` for it to work correctly.\n batch_size: Size of the batches of data (default: 32).\n shuffle: Whether to shuffle the data (default: True) If set to False,\n sorts the data in alphanumeric order.\n seed: Optional random seed for shuffling and transformations.\n save_to_dir: None or str (default: None). This allows you to optionally\n specify a directory to which to save the augmented pictures being\n generated (useful for visualizing what you are doing).\n save_prefix: Str. Prefix to use for filenames of saved pictures (only\n relevant if `save_to_dir` is set).\n save_format: one of \"png\", \"jpeg\", \"bmp\", \"pdf\", \"ppm\", \"gif\", \"tif\",\n \"jpg\"\n (only relevant if `save_to_dir` is set). Default: \"png\".\n follow_links: Whether to follow symlinks inside\n class subdirectories (default: False).\n subset: Subset of data (`\"training\"` or `\"validation\"`) if\n `validation_split` is set in `ImageDataGenerator`.\n interpolation: Interpolation method used to resample the image if the\n target size is different from that of the loaded image. Supported\n methods are `\"nearest\"`, `\"bilinear\"`, and `\"bicubic\"`. If PIL version\n 1.1.3 or newer is installed, `\"lanczos\"` is also supported. If PIL\n version 3.4.0 or newer is installed, `\"box\"` and `\"hamming\"` are also\n supported. By default, `\"nearest\"` is used.\n keep_aspect_ratio: Boolean, whether to resize images to a target\n size without aspect ratio distortion. The image is cropped in\n the center with target aspect ratio before resizing.\n\n Returns:\n A `DirectoryIterator` yielding tuples of `(x, y)`\n where `x` is a numpy array containing a batch\n of images with shape `(batch_size, *target_size, channels)`\n and `y` is a numpy array of corresponding labels.\n \"\"\"\n return DirectoryIterator(\n directory,\n self,\n target_size=target_size,\n color_mode=color_mode,\n keep_aspect_ratio=keep_aspect_ratio,\n classes=classes,\n class_mode=class_mode,\n data_format=self.data_format,\n batch_size=batch_size,\n shuffle=shuffle,\n seed=seed,\n save_to_dir=save_to_dir,\n save_prefix=save_prefix,\n save_format=save_format,\n follow_links=follow_links,\n subset=subset,\n interpolation=interpolation,\n dtype=self.dtype)\n\n def flow_from_dataframe(self,\n dataframe,\n directory=None,\n x_col='filename',\n y_col='class',\n weight_col=None,\n target_size=(256, 256),\n color_mode='rgb',\n classes=None,\n class_mode='categorical',\n batch_size=32,\n shuffle=True,\n seed=None,\n save_to_dir=None,\n save_prefix='',\n save_format='png',\n subset=None,\n interpolation='nearest',\n validate_filenames=True,\n **kwargs):\n \"\"\"Takes the dataframe and the path to a directory + generates batches.\n\n The generated batches contain augmented/normalized data.\n\n **A simple tutorial can be found **[here](\n http://bit.ly/keras_flow_from_dataframe).\n\n Args:\n dataframe: Pandas dataframe containing the filepaths relative to\n `directory` (or absolute paths if `directory` is None) of the\n images in a string column. It should include other column/s\n depending on the `class_mode`:\n - if `class_mode` is `\"categorical\"` (default value) it must\n include the `y_col` column with the class/es of each image.\n Values in column can be string/list/tuple if a single class\n or list/tuple if multiple classes.\n - if `class_mode` is `\"binary\"` or `\"sparse\"` it must include\n the given `y_col` column with class values as strings.\n - if `class_mode` is `\"raw\"` or `\"multi_output\"` it should contain\n the columns specified in `y_col`.\n - if `class_mode` is `\"input\"` or `None` no extra column is needed.\n directory: string, path to the directory to read images from. If `None`,\n data in `x_col` column should be absolute paths.\n x_col: string, column in `dataframe` that contains the filenames (or\n absolute paths if `directory` is `None`).\n y_col: string or list, column/s in `dataframe` that has the target data.\n weight_col: string, column in `dataframe` that contains the sample\n weights. Default: `None`.\n target_size: tuple of integers `(height, width)`, default: `(256, 256)`.\n The dimensions to which all images found will be resized.\n color_mode: one of \"grayscale\", \"rgb\", \"rgba\". Default: \"rgb\". Whether\n the images will be converted to have 1 or 3 color channels.\n classes: optional list of classes (e.g. `['dogs', 'cats']`). Default is\n None. If not provided, the list of classes will be automatically\n inferred from the `y_col`, which will map to the label indices, will\n be alphanumeric). The dictionary containing the mapping from class\n names to class indices can be obtained via the attribute\n `class_indices`.\n class_mode: one of \"binary\", \"categorical\", \"input\", \"multi_output\",\n \"raw\", sparse\" or None. Default: \"categorical\".\n Mode for yielding the targets:\n - `\"binary\"`: 1D numpy array of binary labels,\n - `\"categorical\"`: 2D numpy array of one-hot encoded labels.\n Supports multi-label output.\n - `\"input\"`: images identical to input images (mainly used to work\n with autoencoders),\n - `\"multi_output\"`: list with the values of the different columns,\n - `\"raw\"`: numpy array of values in `y_col` column(s),\n - `\"sparse\"`: 1D numpy array of integer labels, - `None`, no targets\n are returned (the generator will only yield batches of image data,\n which is useful to use in `model.predict()`).\n batch_size: size of the batches of data (default: 32).\n shuffle: whether to shuffle the data (default: True)\n seed: optional random seed for shuffling and transformations.\n save_to_dir: None or str (default: None). This allows you to optionally\n specify a directory to which to save the augmented pictures being\n generated (useful for visualizing what you are doing).\n save_prefix: str. Prefix to use for filenames of saved pictures (only\n relevant if `save_to_dir` is set).\n save_format: one of \"png\", \"jpeg\", \"bmp\", \"pdf\", \"ppm\", \"gif\", \"tif\",\n \"jpg\" (only relevant if `save_to_dir` is set). Default: \"png\".\n subset: Subset of data (`\"training\"` or `\"validation\"`) if\n `validation_split` is set in `ImageDataGenerator`.\n interpolation: Interpolation method used to resample the image if the\n target size is different from that of the loaded image. Supported\n methods are `\"nearest\"`, `\"bilinear\"`, and `\"bicubic\"`. If PIL version\n 1.1.3 or newer is installed, `\"lanczos\"` is also supported. If PIL\n version 3.4.0 or newer is installed, `\"box\"` and `\"hamming\"` are also\n supported. By default, `\"nearest\"` is used.\n validate_filenames: Boolean, whether to validate image filenames in\n `x_col`. If `True`, invalid images will be ignored. Disabling this\n option can lead to speed-up in the execution of this function.\n Defaults to `True`.\n **kwargs: legacy arguments for raising deprecation warnings.\n\n Returns:\n A `DataFrameIterator` yielding tuples of `(x, y)`\n where `x` is a numpy array containing a batch\n of images with shape `(batch_size, *target_size, channels)`\n and `y` is a numpy array of corresponding labels.\n \"\"\"\n if 'has_ext' in kwargs:\n warnings.warn(\n 'has_ext is deprecated, filenames in the dataframe have '\n 'to match the exact filenames in disk.', DeprecationWarning)\n if 'sort' in kwargs:\n warnings.warn(\n 'sort is deprecated, batches will be created in the'\n 'same order than the filenames provided if shuffle'\n 'is set to False.', DeprecationWarning)\n if class_mode == 'other':\n warnings.warn(\n '`class_mode` \"other\" is deprecated, please use '\n '`class_mode` \"raw\".', DeprecationWarning)\n class_mode = 'raw'\n if 'drop_duplicates' in kwargs:\n warnings.warn(\n 'drop_duplicates is deprecated, you can drop duplicates '\n 'by using the pandas.DataFrame.drop_duplicates method.',\n DeprecationWarning)\n\n return DataFrameIterator(\n dataframe,\n directory,\n self,\n x_col=x_col,\n y_col=y_col,\n weight_col=weight_col,\n target_size=target_size,\n color_mode=color_mode,\n classes=classes,\n class_mode=class_mode,\n data_format=self.data_format,\n batch_size=batch_size,\n shuffle=shuffle,\n seed=seed,\n save_to_dir=save_to_dir,\n save_prefix=save_prefix,\n save_format=save_format,\n subset=subset,\n interpolation=interpolation,\n validate_filenames=validate_filenames,\n dtype=self.dtype)\n\n def standardize(self, x):\n \"\"\"Applies the normalization configuration in-place to a batch of inputs.\n\n `x` is changed in-place since the function is mainly used internally\n to standardize images and feed them to your network. If a copy of `x`\n would be created instead it would have a significant performance cost.\n If you want to apply this method without changing the input in-place\n you can call the method creating a copy before:\n\n standardize(np.copy(x))\n\n Args:\n x: Batch of inputs to be normalized.\n\n Returns:\n The inputs, normalized.\n \"\"\"\n if self.preprocessing_function:\n x = self.preprocessing_function(x)\n if self.rescale:\n x *= self.rescale\n if self.samplewise_center:\n x -= np.mean(x, keepdims=True)\n if self.samplewise_std_normalization:\n x /= (np.std(x, keepdims=True) + 1e-6)\n\n if self.featurewise_center:\n if self.mean is not None:\n x -= self.mean\n else:\n warnings.warn('This ImageDataGenerator specifies '\n '`featurewise_center`, but it hasn\\'t '\n 'been fit on any training data. Fit it '\n 'first by calling `.fit(numpy_data)`.')\n if self.featurewise_std_normalization:\n if self.std is not None:\n x /= (self.std + 1e-6)\n else:\n warnings.warn('This ImageDataGenerator specifies '\n '`featurewise_std_normalization`, '\n 'but it hasn\\'t '\n 'been fit on any training data. Fit it '\n 'first by calling `.fit(numpy_data)`.')\n if self.zca_whitening:\n if self.zca_whitening_matrix is not None:\n flat_x = x.reshape(-1, np.prod(x.shape[-3:]))\n white_x = flat_x @ self.zca_whitening_matrix\n x = np.reshape(white_x, x.shape)\n else:\n warnings.warn('This ImageDataGenerator specifies '\n '`zca_whitening`, but it hasn\\'t '\n 'been fit on any training data. Fit it '\n 'first by calling `.fit(numpy_data)`.')\n return x\n\n def get_random_transform(self, img_shape, seed=None):\n \"\"\"Generates random parameters for a transformation.\n\n Args:\n img_shape: Tuple of integers.\n Shape of the image that is transformed.\n seed: Random seed.\n\n Returns:\n A dictionary containing randomly chosen parameters describing the\n transformation.\n \"\"\"\n img_row_axis = self.row_axis - 1\n img_col_axis = self.col_axis - 1\n\n if seed is not None:\n np.random.seed(seed)\n\n if self.rotation_range:\n theta = np.random.uniform(-self.rotation_range, self.rotation_range)\n else:\n theta = 0\n\n if self.height_shift_range:\n try: # 1-D array-like or int\n tx = np.random.choice(self.height_shift_range)\n tx *= np.random.choice([-1, 1])\n except ValueError: # floating point\n tx = np.random.uniform(-self.height_shift_range,\n self.height_shift_range)\n if np.max(self.height_shift_range) < 1:\n tx *= img_shape[img_row_axis]\n else:\n tx = 0\n\n if self.width_shift_range:\n try: # 1-D array-like or int\n ty = np.random.choice(self.width_shift_range)\n ty *= np.random.choice([-1, 1])\n except ValueError: # floating point\n ty = np.random.uniform(-self.width_shift_range, self.width_shift_range)\n if np.max(self.width_shift_range) < 1:\n ty *= img_shape[img_col_axis]\n else:\n ty = 0\n\n if self.shear_range:\n shear = np.random.uniform(-self.shear_range, self.shear_range)\n else:\n shear = 0\n\n if self.zoom_range[0] == 1 and self.zoom_range[1] == 1:\n zx, zy = 1, 1\n else:\n zx, zy = np.random.uniform(self.zoom_range[0], self.zoom_range[1], 2)\n\n flip_horizontal = (np.random.random() < 0.5) * self.horizontal_flip\n flip_vertical = (np.random.random() < 0.5) * self.vertical_flip\n\n channel_shift_intensity = None\n if self.channel_shift_range != 0:\n channel_shift_intensity = np.random.uniform(-self.channel_shift_range,\n self.channel_shift_range)\n\n brightness = None\n if self.brightness_range is not None:\n brightness = np.random.uniform(self.brightness_range[0],\n self.brightness_range[1])\n\n transform_parameters = {\n 'theta': theta,\n 'tx': tx,\n 'ty': ty,\n 'shear': shear,\n 'zx': zx,\n 'zy': zy,\n 'flip_horizontal': flip_horizontal,\n 'flip_vertical': flip_vertical,\n 'channel_shift_intensity': channel_shift_intensity,\n 'brightness': brightness\n }\n\n return transform_parameters\n\n def apply_transform(self, x, transform_parameters):\n \"\"\"Applies a transformation to an image according to given parameters.\n\n Args:\n x: 3D tensor, single image.\n transform_parameters: Dictionary with string - parameter pairs\n describing the transformation.\n Currently, the following parameters\n from the dictionary are used:\n - `'theta'`: Float. Rotation angle in degrees.\n - `'tx'`: Float. Shift in the x direction.\n - `'ty'`: Float. Shift in the y direction.\n - `'shear'`: Float. Shear angle in degrees.\n - `'zx'`: Float. Zoom in the x direction.\n - `'zy'`: Float. Zoom in the y direction.\n - `'flip_horizontal'`: Boolean. Horizontal flip.\n - `'flip_vertical'`: Boolean. Vertical flip.\n - `'channel_shift_intensity'`: Float. Channel shift intensity.\n - `'brightness'`: Float. Brightness shift intensity.\n\n Returns:\n A transformed version of the input (same shape).\n \"\"\"\n # x is a single image, so it doesn't have image number at index 0\n img_row_axis = self.row_axis - 1\n img_col_axis = self.col_axis - 1\n img_channel_axis = self.channel_axis - 1\n\n x = apply_affine_transform(\n x,\n transform_parameters.get('theta', 0),\n transform_parameters.get('tx', 0),\n transform_parameters.get('ty', 0),\n transform_parameters.get('shear', 0),\n transform_parameters.get('zx', 1),\n transform_parameters.get('zy', 1),\n row_axis=img_row_axis,\n col_axis=img_col_axis,\n channel_axis=img_channel_axis,\n fill_mode=self.fill_mode,\n cval=self.cval,\n order=self.interpolation_order)\n\n if transform_parameters.get('channel_shift_intensity') is not None:\n x = apply_channel_shift(x,\n transform_parameters['channel_shift_intensity'],\n img_channel_axis)\n\n if transform_parameters.get('flip_horizontal', False):\n x = flip_axis(x, img_col_axis)\n\n if transform_parameters.get('flip_vertical', False):\n x = flip_axis(x, img_row_axis)\n\n if transform_parameters.get('brightness') is not None:\n x = apply_brightness_shift(x, transform_parameters['brightness'], False)\n\n return x\n\n def random_transform(self, x, seed=None):\n \"\"\"Applies a random transformation to an image.\n\n Args:\n x: 3D tensor, single image.\n seed: Random seed.\n\n Returns:\n A randomly transformed version of the input (same shape).\n \"\"\"\n params = self.get_random_transform(x.shape, seed)\n return self.apply_transform(x, params)\n\n def fit(self, x, augment=False, rounds=1, seed=None):\n \"\"\"Fits the data generator to some sample data.\n\n This computes the internal data stats related to the\n data-dependent transformations, based on an array of sample data.\n\n Only required if `featurewise_center` or\n `featurewise_std_normalization` or `zca_whitening` are set to True.\n\n When `rescale` is set to a value, rescaling is applied to\n sample data before computing the internal data stats.\n\n Args:\n x: Sample data. Should have rank 4.\n In case of grayscale data,\n the channels axis should have value 1, in case\n of RGB data, it should have value 3, and in case\n of RGBA data, it should have value 4.\n augment: Boolean (default: False).\n Whether to fit on randomly augmented samples.\n rounds: Int (default: 1).\n If using data augmentation (`augment=True`),\n this is how many augmentation passes over the data to use.\n seed: Int (default: None). Random seed.\n \"\"\"\n x = np.asarray(x, dtype=self.dtype)\n if x.ndim != 4:\n raise ValueError('Input to `.fit()` should have rank 4. '\n 'Got array with shape: ' + str(x.shape))\n if x.shape[self.channel_axis] not in {1, 3, 4}:\n warnings.warn('Expected input to be images (as Numpy array) '\n 'following the data format convention \"' +\n self.data_format + '\" (channels on axis ' +\n str(self.channel_axis) + '), i.e. expected '\n 'either 1, 3 or 4 channels on axis ' +\n str(self.channel_axis) + '. '\n 'However, it was passed an array with shape ' +\n str(x.shape) + ' (' + str(x.shape[self.channel_axis]) +\n ' channels).')\n\n if seed is not None:\n np.random.seed(seed)\n\n x = np.copy(x)\n if self.rescale:\n x *= self.rescale\n\n if augment:\n ax = np.zeros(\n tuple([rounds * x.shape[0]] + list(x.shape)[1:]), dtype=self.dtype)\n for r in range(rounds):\n for i in range(x.shape[0]):\n ax[i + r * x.shape[0]] = self.random_transform(x[i])\n x = ax\n\n if self.featurewise_center:\n self.mean = np.mean(x, axis=(0, self.row_axis, self.col_axis))\n broadcast_shape = [1, 1, 1]\n broadcast_shape[self.channel_axis - 1] = x.shape[self.channel_axis]\n self.mean = np.reshape(self.mean, broadcast_shape)\n x -= self.mean\n\n if self.featurewise_std_normalization:\n self.std = np.std(x, axis=(0, self.row_axis, self.col_axis))\n broadcast_shape = [1, 1, 1]\n broadcast_shape[self.channel_axis - 1] = x.shape[self.channel_axis]\n self.std = np.reshape(self.std, broadcast_shape)\n x /= (self.std + 1e-6)\n\n if self.zca_whitening:\n n = len(x)\n flat_x = np.reshape(x, (n, -1))\n\n u, s, _ = np.linalg.svd(flat_x.T, full_matrices=False)\n s_inv = np.sqrt(n) / (s + self.zca_epsilon)\n self.zca_whitening_matrix = (u * s_inv).dot(u.T)\n\n\n@keras_export('keras.preprocessing.image.random_rotation')\ndef random_rotation(x, rg, row_axis=1, col_axis=2, channel_axis=0,\n fill_mode='nearest', cval=0., interpolation_order=1):\n \"\"\"Performs a random rotation of a Numpy image tensor.\n\n Warning: `tf.keras.preprocessing.image.random_rotation` does not operate on\n tensors and is not recommended for new code. Prefer\n `tf.keras.layers.RandomRotation` which provides equivalent functionality as a\n preprocessing layer. For more information, see the tutorial for\n [augmenting images](\n https://www.tensorflow.org/tutorials/images/data_augmentation), as well as\n the [preprocessing layer guide](\n https://www.tensorflow.org/guide/keras/preprocessing_layers).\n\n Args:\n x: Input tensor. Must be 3D.\n rg: Rotation range, in degrees.\n row_axis: Index of axis for rows in the input tensor.\n col_axis: Index of axis for columns in the input tensor.\n channel_axis: Index of axis for channels in the input tensor.\n fill_mode: Points outside the boundaries of the input\n are filled according to the given mode\n (one of `{'constant', 'nearest', 'reflect', 'wrap'}`).\n cval: Value used for points outside the boundaries\n of the input if `mode='constant'`.\n interpolation_order: int, order of spline interpolation.\n see `ndimage.interpolation.affine_transform`\n\n Returns:\n Rotated Numpy image tensor.\n \"\"\"\n theta = np.random.uniform(-rg, rg)\n x = apply_affine_transform(x,\n theta=theta,\n row_axis=row_axis,\n col_axis=col_axis,\n channel_axis=channel_axis,\n fill_mode=fill_mode,\n cval=cval,\n order=interpolation_order)\n return x\n\n\n@keras_export('keras.preprocessing.image.random_shift')\ndef random_shift(x, wrg, hrg, row_axis=1, col_axis=2, channel_axis=0,\n fill_mode='nearest', cval=0., interpolation_order=1):\n \"\"\"Performs a random spatial shift of a Numpy image tensor.\n\n Warning: `tf.keras.preprocessing.image.random_shift` does not operate on\n tensors and is not recommended for new code. Prefer\n `tf.keras.layers.RandomTranslation` which provides equivalent functionality as\n a preprocessing layer. For more information, see the tutorial for\n [augmenting images](\n https://www.tensorflow.org/tutorials/images/data_augmentation), as well as\n the [preprocessing layer guide](\n https://www.tensorflow.org/guide/keras/preprocessing_layers).\n\n Args:\n x: Input tensor. Must be 3D.\n wrg: Width shift range, as a float fraction of the width.\n hrg: Height shift range, as a float fraction of the height.\n row_axis: Index of axis for rows in the input tensor.\n col_axis: Index of axis for columns in the input tensor.\n channel_axis: Index of axis for channels in the input tensor.\n fill_mode: Points outside the boundaries of the input\n are filled according to the given mode\n (one of `{'constant', 'nearest', 'reflect', 'wrap'}`).\n cval: Value used for points outside the boundaries\n of the input if `mode='constant'`.\n interpolation_order: int, order of spline interpolation.\n see `ndimage.interpolation.affine_transform`\n\n Returns:\n Shifted Numpy image tensor.\n \"\"\"\n h, w = x.shape[row_axis], x.shape[col_axis]\n tx = np.random.uniform(-hrg, hrg) * h\n ty = np.random.uniform(-wrg, wrg) * w\n x = apply_affine_transform(x,\n tx=tx,\n ty=ty,\n row_axis=row_axis,\n col_axis=col_axis,\n channel_axis=channel_axis,\n fill_mode=fill_mode,\n cval=cval,\n order=interpolation_order)\n return x\n\n\n@keras_export('keras.preprocessing.image.random_shear')\ndef random_shear(x, intensity, row_axis=1, col_axis=2, channel_axis=0,\n fill_mode='nearest', cval=0., interpolation_order=1):\n \"\"\"Performs a random spatial shear of a Numpy image tensor.\n\n Args:\n x: Input tensor. Must be 3D.\n intensity: Transformation intensity in degrees.\n row_axis: Index of axis for rows in the input tensor.\n col_axis: Index of axis for columns in the input tensor.\n channel_axis: Index of axis for channels in the input tensor.\n fill_mode: Points outside the boundaries of the input\n are filled according to the given mode\n (one of `{'constant', 'nearest', 'reflect', 'wrap'}`).\n cval: Value used for points outside the boundaries\n of the input if `mode='constant'`.\n interpolation_order: int, order of spline interpolation.\n see `ndimage.interpolation.affine_transform`\n\n Returns:\n Sheared Numpy image tensor.\n \"\"\"\n shear = np.random.uniform(-intensity, intensity)\n x = apply_affine_transform(\n x,\n shear=shear,\n row_axis=row_axis,\n col_axis=col_axis,\n channel_axis=channel_axis,\n fill_mode=fill_mode,\n cval=cval,\n order=interpolation_order)\n return x\n\n\n@keras_export('keras.preprocessing.image.random_zoom')\ndef random_zoom(x, zoom_range, row_axis=1, col_axis=2, channel_axis=0,\n fill_mode='nearest', cval=0., interpolation_order=1):\n \"\"\"Performs a random spatial zoom of a Numpy image tensor.\n\n Warning: `tf.keras.preprocessing.image.random_zoom` does not operate on\n tensors and is not recommended for new code. Prefer\n `tf.keras.layers.RandomZoom` which provides equivalent functionality as\n a preprocessing layer. For more information, see the tutorial for\n [augmenting images](\n https://www.tensorflow.org/tutorials/images/data_augmentation), as well as\n the [preprocessing layer guide](\n https://www.tensorflow.org/guide/keras/preprocessing_layers).\n\n Args:\n x: Input tensor. Must be 3D.\n zoom_range: Tuple of floats; zoom range for width and height.\n row_axis: Index of axis for rows in the input tensor.\n col_axis: Index of axis for columns in the input tensor.\n channel_axis: Index of axis for channels in the input tensor.\n fill_mode: Points outside the boundaries of the input\n are filled according to the given mode\n (one of `{'constant', 'nearest', 'reflect', 'wrap'}`).\n cval: Value used for points outside the boundaries\n of the input if `mode='constant'`.\n interpolation_order: int, order of spline interpolation.\n see `ndimage.interpolation.affine_transform`\n\n Returns:\n Zoomed Numpy image tensor.\n\n Raises:\n ValueError: if `zoom_range` isn't a tuple.\n \"\"\"\n if len(zoom_range) != 2:\n raise ValueError('`zoom_range` should be a tuple or list of two'\n ' floats. Received: %s' % (zoom_range,))\n\n if zoom_range[0] == 1 and zoom_range[1] == 1:\n zx, zy = 1, 1\n else:\n zx, zy = np.random.uniform(zoom_range[0], zoom_range[1], 2)\n x = apply_affine_transform(\n x,\n zx=zx,\n zy=zy,\n row_axis=row_axis,\n col_axis=col_axis,\n channel_axis=channel_axis,\n fill_mode=fill_mode,\n cval=cval,\n order=interpolation_order)\n return x\n\n\n@keras_export('keras.preprocessing.image.apply_channel_shift')\ndef apply_channel_shift(x, intensity, channel_axis=0):\n \"\"\"Performs a channel shift.\n\n Args:\n x: Input tensor. Must be 3D.\n intensity: Transformation intensity.\n channel_axis: Index of axis for channels in the input tensor.\n\n Returns:\n Numpy image tensor.\n \"\"\"\n x = np.rollaxis(x, channel_axis, 0)\n min_x, max_x = np.min(x), np.max(x)\n channel_images = [\n np.clip(x_channel + intensity, min_x, max_x) for x_channel in x]\n x = np.stack(channel_images, axis=0)\n x = np.rollaxis(x, 0, channel_axis + 1)\n return x\n\n\n@keras_export('keras.preprocessing.image.random_channel_shift')\ndef random_channel_shift(x, intensity_range, channel_axis=0):\n \"\"\"Performs a random channel shift.\n\n Args:\n x: Input tensor. Must be 3D.\n intensity_range: Transformation intensity.\n channel_axis: Index of axis for channels in the input tensor.\n\n Returns:\n Numpy image tensor.\n \"\"\"\n intensity = np.random.uniform(-intensity_range, intensity_range)\n return apply_channel_shift(x, intensity, channel_axis=channel_axis)\n\n\n@keras_export('keras.preprocessing.image.apply_brightness_shift')\ndef apply_brightness_shift(x, brightness, scale=True):\n \"\"\"Performs a brightness shift.\n\n Args:\n x: Input tensor. Must be 3D.\n brightness: Float. The new brightness value.\n scale: Whether to rescale the image such that minimum and maximum values\n are 0 and 255 respectively. Default: True.\n\n Returns:\n Numpy image tensor.\n\n Raises:\n ImportError: if PIL is not available.\n \"\"\"\n if ImageEnhance is None:\n raise ImportError('Using brightness shifts requires PIL. '\n 'Install PIL or Pillow.')\n x_min, x_max = np.min(x), np.max(x)\n local_scale = (x_min < 0) or (x_max > 255)\n x = array_to_img(x, scale=local_scale or scale)\n x = imgenhancer_Brightness = ImageEnhance.Brightness(x)\n x = imgenhancer_Brightness.enhance(brightness)\n x = img_to_array(x)\n if not scale and local_scale:\n x = x / 255 * (x_max - x_min) + x_min\n return x\n\n\n@keras_export('keras.preprocessing.image.random_brightness')\ndef random_brightness(x, brightness_range, scale=True):\n \"\"\"Performs a random brightness shift.\n\n Warning: `tf.keras.preprocessing.image.random_brightness` does not operate on\n tensors and is not recommended for new code. Prefer\n `tf.keras.layers.RandomBrightness` which provides equivalent functionality as\n a preprocessing layer. For more information, see the tutorial for\n [augmenting images](\n https://www.tensorflow.org/tutorials/images/data_augmentation), as well as\n the [preprocessing layer guide](\n https://www.tensorflow.org/guide/keras/preprocessing_layers).\n\n Args:\n x: Input tensor. Must be 3D.\n brightness_range: Tuple of floats; brightness range.\n scale: Whether to rescale the image such that minimum and maximum values\n are 0 and 255 respectively. Default: True.\n\n Returns:\n Numpy image tensor.\n\n Raises:\n ValueError if `brightness_range` isn't a tuple.\n \"\"\"\n if len(brightness_range) != 2:\n raise ValueError(\n '`brightness_range should be tuple or list of two floats. '\n 'Received: %s' % (brightness_range,))\n\n u = np.random.uniform(brightness_range[0], brightness_range[1])\n return apply_brightness_shift(x, u, scale)\n\n\ndef transform_matrix_offset_center(matrix, x, y):\n o_x = float(x) / 2 - 0.5\n o_y = float(y) / 2 - 0.5\n offset_matrix = np.array([[1, 0, o_x], [0, 1, o_y], [0, 0, 1]])\n reset_matrix = np.array([[1, 0, -o_x], [0, 1, -o_y], [0, 0, 1]])\n transform_matrix = np.dot(np.dot(offset_matrix, matrix), reset_matrix)\n return transform_matrix\n\n\n@keras_export('keras.preprocessing.image.apply_affine_transform')\ndef apply_affine_transform(x, theta=0, tx=0, ty=0, shear=0, zx=1, zy=1,\n row_axis=1, col_axis=2, channel_axis=0,\n fill_mode='nearest', cval=0., order=1):\n \"\"\"Applies an affine transformation specified by the parameters given.\n\n Args:\n x: 3D numpy array - a 2D image with one or more channels.\n theta: Rotation angle in degrees.\n tx: Width shift.\n ty: Heigh shift.\n shear: Shear angle in degrees.\n zx: Zoom in x direction.\n zy: Zoom in y direction\n row_axis: Index of axis for rows (aka Y axis) in the input\n image. Direction: left to right.\n col_axis: Index of axis for columns (aka X axis) in the input\n image. Direction: top to bottom.\n channel_axis: Index of axis for channels in the input image.\n fill_mode: Points outside the boundaries of the input\n are filled according to the given mode\n (one of `{'constant', 'nearest', 'reflect', 'wrap'}`).\n cval: Value used for points outside the boundaries\n of the input if `mode='constant'`.\n order: int, order of interpolation\n\n Returns:\n The transformed version of the input.\n\n Raises:\n ImportError: if SciPy is not available.\n \"\"\"\n if scipy is None:\n raise ImportError('Image transformations require SciPy. '\n 'Install SciPy.')\n\n # Input sanity checks:\n # 1. x must 2D image with one or more channels (i.e., a 3D tensor)\n # 2. channels must be either first or last dimension\n if np.unique([row_axis, col_axis, channel_axis]).size != 3:\n raise ValueError(\"'row_axis', 'col_axis', and 'channel_axis'\"\n \" must be distinct\")\n\n # shall we support negative indices?\n valid_indices = set([0, 1, 2])\n actual_indices = set([row_axis, col_axis, channel_axis])\n if actual_indices != valid_indices:\n raise ValueError(\n f'Invalid axis\\' indices: {actual_indices - valid_indices}')\n\n if x.ndim != 3:\n raise ValueError('Input arrays must be multi-channel 2D images.')\n if channel_axis not in [0, 2]:\n raise ValueError('Channels are allowed and the first and last dimensions.')\n\n transform_matrix = None\n if theta != 0:\n theta = np.deg2rad(theta)\n rotation_matrix = np.array([[np.cos(theta), -np.sin(theta), 0],\n [np.sin(theta), np.cos(theta), 0],\n [0, 0, 1]])\n transform_matrix = rotation_matrix\n\n if tx != 0 or ty != 0:\n shift_matrix = np.array([[1, 0, tx],\n [0, 1, ty],\n [0, 0, 1]])\n if transform_matrix is None:\n transform_matrix = shift_matrix\n else:\n transform_matrix = np.dot(transform_matrix, shift_matrix)\n\n if shear != 0:\n shear = np.deg2rad(shear)\n shear_matrix = np.array([[1, -np.sin(shear), 0],\n [0, np.cos(shear), 0],\n [0, 0, 1]])\n if transform_matrix is None:\n transform_matrix = shear_matrix\n else:\n transform_matrix = np.dot(transform_matrix, shear_matrix)\n\n if zx != 1 or zy != 1:\n zoom_matrix = np.array([[zx, 0, 0],\n [0, zy, 0],\n [0, 0, 1]])\n if transform_matrix is None:\n transform_matrix = zoom_matrix\n else:\n transform_matrix = np.dot(transform_matrix, zoom_matrix)\n\n if transform_matrix is not None:\n h, w = x.shape[row_axis], x.shape[col_axis]\n transform_matrix = transform_matrix_offset_center(\n transform_matrix, h, w)\n x = np.rollaxis(x, channel_axis, 0)\n\n # Matrix construction assumes that coordinates are x, y (in that order).\n # However, regular numpy arrays use y,x (aka i,j) indexing.\n # Possible solution is:\n # 1. Swap the x and y axes.\n # 2. Apply transform.\n # 3. Swap the x and y axes again to restore image-like data ordering.\n # Mathematically, it is equivalent to the following transformation:\n # M' = PMP, where P is the permutation matrix, M is the original\n # transformation matrix.\n if col_axis > row_axis:\n transform_matrix[:, [0, 1]] = transform_matrix[:, [1, 0]]\n transform_matrix[[0, 1]] = transform_matrix[[1, 0]]\n final_affine_matrix = transform_matrix[:2, :2]\n final_offset = transform_matrix[:2, 2]\n\n channel_images = [ndimage.interpolation.affine_transform( # pylint: disable=g-complex-comprehension\n x_channel,\n final_affine_matrix,\n final_offset,\n order=order,\n mode=fill_mode,\n cval=cval) for x_channel in x]\n x = np.stack(channel_images, axis=0)\n x = np.rollaxis(x, 0, channel_axis + 1)\n return x\n"
] | [
[
"numpy.random.seed",
"numpy.asarray",
"numpy.copy",
"numpy.stack",
"numpy.rollaxis",
"numpy.reshape",
"numpy.random.choice",
"numpy.cos",
"numpy.unique",
"numpy.mean",
"numpy.deg2rad",
"numpy.random.uniform",
"numpy.sqrt",
"numpy.zeros",
"numpy.arange",
"numpy.max",
"tensorflow.python.util.tf_export.keras_export",
"numpy.min",
"numpy.prod",
"numpy.std",
"numpy.random.permutation",
"numpy.linalg.svd",
"numpy.random.random",
"numpy.clip",
"scipy.ndimage.interpolation.affine_transform",
"numpy.array",
"numpy.sin",
"numpy.dot",
"numpy.random.randint"
]
] |
sunpy/xrayvision | [
"905042be8227688c4088800423dfa8db79e56566"
] | [
"xrayvision/tests/test_clean.py"
] | [
"import numpy as np\nimport astropy.units as u\nfrom astropy.convolution.kernels import Gaussian2DKernel\n\nfrom scipy import signal\n\nfrom ..clean import clean, ms_clean, component, radial_prolate_sphereoidal,\\\n vec_radial_prolate_sphereoidal\nfrom ..transform import dft_map, idft_map\n\n\ndef test_clean_ideal():\n n = m = 65\n pos1 = [15, 30]\n pos2 = [40, 32]\n\n clean_map = np.zeros((n, m))\n clean_map[pos1[0], pos1[1]] = 10.\n clean_map[pos2[0], pos2[1]] = 7.\n\n dirty_beam = np.zeros((n, m))\n dirty_beam[(n-1)//4:(n-1)//4 + (n-1)//2, (m-1)//2] = 0.75\n dirty_beam[(n-1)//2, (m-1)//4:(m-1)//4 + (m-1)//2, ] = 0.75\n dirty_beam[(n-1)//2, (m-1)//2] = 0.8\n dirty_beam = np.pad(dirty_beam, (65, 65), 'constant')\n\n dirty_map = signal.convolve(clean_map, dirty_beam, mode='same')\n\n # Disable convolution of model with gaussian for testing\n out_map = clean(dirty_map, dirty_beam, clean_beam_width=0.0)\n\n # Within threshold default threshold of 0.1\n assert np.allclose(clean_map, (out_map[0]+out_map[1]), out_map, atol=dirty_beam.max() * 0.1)\n\n\ndef test_component():\n comp = np.zeros((3, 3))\n comp[1, 1] = 1.0\n\n res = component(scale=0, shape=(3, 3))\n assert np.array_equal(res, comp)\n\n res = component(scale=1, shape=(3, 3))\n assert np.array_equal(res, comp)\n\n res = component(scale=2, shape=(6, 6))\n assert np.all(res[0, :] == 0.0)\n assert np.all(res[:, 0] == 0.0)\n assert np.all(res[2:4, 2:4] == res.max())\n\n res = component(scale=3, shape=(7, 7))\n assert np.all(res[0, :] == 0.0)\n assert np.all(res[:, 0] == 0.0)\n assert res[3, 3] == 1\n\n\ndef test_radial_prolate_spheroidal():\n amps = [radial_prolate_sphereoidal(r) for r in [-1.0, 0.0, 0.5, 1.0, 2.0]]\n assert amps[0] == 1.0\n assert amps[1] == 1.0\n assert amps[2] == 0.36106538453111797\n assert amps[3] == 0.0\n assert amps[4] == 0.0\n\n\ndef test_vec_radial_prolate_spheroidal():\n radii = np.linspace(-0.5, 1.5, 1000)\n amps1 = [radial_prolate_sphereoidal(r) for r in radii]\n amps2 = vec_radial_prolate_sphereoidal(radii)\n assert np.allclose(amps1, amps2)\n\n\ndef test_ms_clean_ideal():\n n = m = 65\n pos1 = [15, 30]\n pos2 = [40, 32]\n\n clean_map = np.zeros((n, m))\n clean_map[pos1[0], pos1[1]] = 10.\n clean_map[pos2[0], pos2[1]] = 7.\n\n dirty_beam = np.zeros((n, m))\n dirty_beam[(n-1)//4:(n-1)//4 + (n-1)//2, (m-1)//2] = 0.75\n dirty_beam[(n-1)//2, (m-1)//4:(m-1)//4 + (m-1)//2, ] = 0.75\n dirty_beam[(n-1)//2, (m-1)//2] = 1.0\n dirty_beam = np.pad(dirty_beam, (65, 65), 'constant')\n\n dirty_map = signal.convolve2d(clean_map, dirty_beam, mode='same')\n\n # Disable convolution of model with gaussian for testing\n model, res = ms_clean(dirty_map, dirty_beam, scales=[1], clean_beam_width=0.0)\n recovered = model + res\n\n # Within threshold default threshold\n assert np.allclose(clean_map, recovered, atol=dirty_beam.max() * 0.1)\n\n\ndef test_clean_sim():\n n = m = 32\n data = Gaussian2DKernel(stddev=3.0, x_size=n, y_size=m).array\n # data = np.zeros((n, m))\n # data[13,13] = 10.0\n # data[12:14,12:14] = 10.0/4.0\n\n half_log_space = np.logspace(np.log10(0.03030303), np.log10(0.48484848), 10)\n\n theta = np.linspace(0, 2*np.pi, 32)\n theta = theta[np.newaxis, :]\n theta = np.repeat(theta, 10, axis=0)\n\n r = half_log_space\n r = r[:, np.newaxis]\n r = np.repeat(r, 32, axis=1)\n\n x = r * np.sin(theta)\n y = r * np.cos(theta)\n\n sub_uv = np.vstack([x.flatten(), y.flatten()])\n sub_uv = np.hstack([sub_uv, np.zeros((2, 1))]) / u.arcsec\n\n # Factor of 9 is compensate for the factor of 3 * 3 increase in size\n dirty_beam = idft_map(np.ones(321)*9, (n*3, m*3), sub_uv)\n\n vis = dft_map(data, sub_uv)\n\n dirty_map = idft_map(vis, (n, m), sub_uv)\n\n clean_map, res = clean(dirty_map, dirty_beam, clean_beam_width=0)\n np.allclose(data, clean_map + res, atol=dirty_beam.max() * 0.1)\n\n"
] | [
[
"numpy.allclose",
"numpy.ones",
"numpy.zeros",
"numpy.repeat",
"numpy.cos",
"scipy.signal.convolve",
"numpy.all",
"numpy.log10",
"scipy.signal.convolve2d",
"numpy.array_equal",
"numpy.sin",
"numpy.pad",
"numpy.linspace"
]
] |
DedeKite/wxPlotLab | [
"808d457aeb897ceb37535bcd11d15b65a0a14cd1"
] | [
"mplotlab/graphics/Navigation.py"
] | [
"# -*-coding:Utf-8 -*\r\n\r\nfrom mplotlab import App\r\nfrom matplotlib.backend_bases import NavigationToolbar2\r\n\r\nimport wx\r\n\r\nclass Cursors:\r\n # this class is only used as a simple namespace\r\n HAND, POINTER, SELECT_REGION, MOVE = list(range(4))\r\ncursors = Cursors()\r\n\r\ncursord = {\r\n cursors.MOVE : wx.CURSOR_HAND,\r\n cursors.HAND : wx.CURSOR_HAND,\r\n cursors.POINTER : wx.CURSOR_ARROW,\r\n cursors.SELECT_REGION : wx.CURSOR_CROSS,\r\n }\r\n\r\nclass Navigation(NavigationToolbar2):\r\n def __init__(self,*a,**k):\r\n NavigationToolbar2.__init__(self, *a,**k)\r\n \r\n def _init_toolbar(self,*args,**kwargs):\r\n pass\r\n \r\n def set_message(self,s): \r\n \"\"\" display in the status bar\r\n the mouseover data (x,y) \r\n \"\"\" \r\n try:\r\n App().mainWin.GetStatusBar().SetStatusText(s,0)\r\n except:\r\n pass\r\n\r\n def set_cursor(self, cursor):\r\n cursor =wx.StockCursor(cursord[cursor])\r\n self.canvas.SetCursor( cursor )\r\n\r\n def dynamic_update(self):\r\n d = self._idle\r\n self._idle = False\r\n if d:\r\n self.canvas.draw()\r\n self._idle = True\r\n\r\n def press(self, event):\r\n if self._active == 'ZOOM':\r\n self.wxoverlay = wx.Overlay()\r\n\r\n def release(self, event):\r\n if self._active == 'ZOOM':\r\n # When the mouse is released we reset the overlay and it\r\n # restores the former content to the window.\r\n self.wxoverlay.Reset()\r\n del self.wxoverlay\r\n\r\n def draw_rubberband(self, event, x0, y0, x1, y1):\r\n # Use an Overlay to draw a rubberband-like bounding box.\r\n\r\n dc = wx.ClientDC(self.canvas)\r\n odc = wx.DCOverlay(self.wxoverlay, dc)\r\n odc.Clear()\r\n\r\n # Mac's DC is already the same as a GCDC, and it causes\r\n # problems with the overlay if we try to use an actual\r\n # wx.GCDC so don't try it.\r\n if 'wxMac' not in wx.PlatformInfo:\r\n dc = wx.GCDC(dc)\r\n\r\n height = self.canvas.figure.bbox.height\r\n y1 = height - y1\r\n y0 = height - y0\r\n\r\n if y1<y0: y0, y1 = y1, y0\r\n if x1<y0: x0, x1 = x1, x0\r\n\r\n w = x1 - x0\r\n h = y1 - y0\r\n rect = wx.Rect(x0, y0, w, h)\r\n\r\n rubberBandColor = '#C0C0FF' # or load from config?\r\n\r\n # Set a pen for the border\r\n color = wx.NamedColour(rubberBandColor)\r\n dc.SetPen(wx.Pen(color, 1))\r\n\r\n # use the same color, plus alpha for the brush\r\n r, g, b = color.Get()\r\n color.Set(r,g,b, 0x60)\r\n dc.SetBrush(wx.Brush(color))\r\n dc.DrawRectangleRect(rect)\r\n "
] | [
[
"matplotlib.backend_bases.NavigationToolbar2.__init__"
]
] |
Walon1998/dace | [
"95ddfd3e9a5c654f0f0d66d026e0b64ec0f028a0",
"95ddfd3e9a5c654f0f0d66d026e0b64ec0f028a0",
"95ddfd3e9a5c654f0f0d66d026e0b64ec0f028a0",
"95ddfd3e9a5c654f0f0d66d026e0b64ec0f028a0"
] | [
"samples/polybench/lu.py",
"tests/symbol_dependent_transients_test.py",
"samples/tensorflow/dataset_reader.py",
"tests/transformations/maptoforloop_test.py"
] | [
"# Copyright 2019-2021 ETH Zurich and the DaCe authors. All rights reserved.\nimport math\nimport numpy as np\nimport dace\nimport polybench\n\nN = dace.symbol('N')\n\n#datatypes = [dace.float64, dace.int32, dace.float32]\ndatatype = dace.float64\n\n# Dataset sizes\nsizes = [{N: 40}, {N: 120}, {N: 400}, {N: 2000}, {N: 4000}]\n\nargs = [([N, N], datatype)]\n\n\ndef init_array(A):\n n = N.get()\n\n for i in range(0, n, 1):\n for j in range(0, i + 1, 1):\n # Python does modulo, while C does remainder ...\n A[i, j] = datatype(-(j % n)) / n + 1\n for j in range(i + 1, n, 1):\n A[i, j] = datatype(0)\n A[i, i] = datatype(1)\n\n A[:] = np.dot(A, np.transpose(A))\n\n\n@dace.program(datatype[N, N])\ndef lu(A):\n for i in range(0, N, 1):\n for j in range(0, i, 1):\n\n @dace.map\n def k_loop1(k: _[0:j]):\n i_in << A[i, k]\n j_in << A[k, j]\n out >> A(1, lambda x, y: x + y)[i, j]\n out = -i_in * j_in\n\n @dace.tasklet\n def div():\n ij_in << A[i, j]\n jj_in << A[j, j]\n out >> A[i, j]\n out = ij_in / jj_in\n\n for j in range(i, N, 1):\n\n @dace.map\n def k_loop2(k: _[0:i]):\n i_in << A[i, k]\n j_in << A[k, j]\n out >> A(1, lambda x, y: x + y)[i, j]\n out = -i_in * j_in\n\n\nif __name__ == '__main__':\n polybench.main(sizes, args, [(0, 'A')], init_array, lu)\n",
"# Copyright 2019-2021 ETH Zurich and the DaCe authors. All rights reserved.\nimport dace\nimport numpy as np\nimport pytest\nfrom dace.libraries import standard\nfrom dace.transformation import interstate\n\n\ndef _make_sdfg(name, storage=dace.dtypes.StorageType.CPU_Heap, isview=False):\n\n N = dace.symbol('N', dtype=dace.int32, integer=True, positive=True)\n i = dace.symbol('i', dtype=dace.int32, integer=True)\n\n sdfg = dace.SDFG(name)\n _, A = sdfg.add_array('A', [N, N, N], dtype=dace.float64)\n _, B = sdfg.add_array('B', [N], dtype=dace.float64)\n if isview:\n _, tmp1 = sdfg.add_view('tmp1', [N - 4, N - 4, N - i], dtype=dace.float64, storage=storage, strides=A.strides)\n else:\n _, tmp1 = sdfg.add_transient('tmp1', [N - 4, N - 4, N - i], dtype=dace.float64, storage=storage)\n _, tmp2 = sdfg.add_transient('tmp2', [1], dtype=dace.float64, storage=storage)\n\n begin_state = sdfg.add_state(\"begin\", is_start_state=True)\n guard_state = sdfg.add_state(\"guard\")\n body1_state = sdfg.add_state(\"body1\")\n body2_state = sdfg.add_state(\"body2\")\n body3_state = sdfg.add_state(\"body3\")\n end_state = sdfg.add_state(\"end\")\n\n sdfg.add_edge(begin_state, guard_state, dace.InterstateEdge(assignments=dict(i='0')))\n sdfg.add_edge(guard_state, body1_state, dace.InterstateEdge(condition=f'i<{N}'))\n sdfg.add_edge(guard_state, end_state, dace.InterstateEdge(condition=f'i>={N}'))\n sdfg.add_edge(body1_state, body2_state, dace.InterstateEdge())\n sdfg.add_edge(body2_state, body3_state, dace.InterstateEdge())\n sdfg.add_edge(body3_state, guard_state, dace.InterstateEdge(assignments=dict(i='i+1')))\n\n if not isview:\n read_a = body1_state.add_read('A')\n write_tmp1 = body1_state.add_write('tmp1')\n body1_state.add_nedge(read_a, write_tmp1, dace.Memlet(f'A[2:{N}-2, 2:{N}-2, i:{N}]'))\n\n if isview:\n read_a = body2_state.add_read('A')\n read_tmp1 = body2_state.add_access('tmp1')\n body2_state.add_nedge(read_a, read_tmp1, dace.Memlet(f'A[2:{N}-2, 2:{N}-2, i:{N}]'))\n else:\n read_tmp1 = body2_state.add_read('tmp1')\n rednode = standard.Reduce(wcr='lambda a, b : a + b', identity=0)\n if storage == dace.dtypes.StorageType.GPU_Global:\n rednode.implementation = 'CUDA (device)'\n elif storage == dace.dtypes.StorageType.FPGA_Global:\n rednode.implementation = 'FPGAPartialReduction'\n body2_state.add_node(rednode)\n write_tmp2 = body2_state.add_write('tmp2')\n body2_state.add_nedge(read_tmp1, rednode, dace.Memlet.from_array('tmp1', tmp1))\n body2_state.add_nedge(rednode, write_tmp2, dace.Memlet('tmp2[0]'))\n\n read_tmp2 = body3_state.add_read('tmp2')\n write_b = body3_state.add_write('B')\n body3_state.add_nedge(read_tmp2, write_b, dace.Memlet('B[i]'))\n\n return sdfg\n\n\ndef test_symbol_dependent_heap_array():\n A = np.random.randn(10, 10, 10)\n B = np.ndarray(10, dtype=np.float64)\n sdfg = _make_sdfg(\"symbol_dependent_heap_array\")\n # Compile manually to avoid simplification\n sdfg_exec = sdfg.compile()\n sdfg_exec(A=A, B=B, N=10)\n del sdfg_exec\n B_ref = np.ndarray(10, dtype=np.float64)\n for i in range(10):\n tmp = A[2:-2, 2:-2, i:]\n B_ref[i] = np.sum(tmp)\n assert (np.allclose(B, B_ref))\n\n\ndef test_symbol_dependent_register_array():\n A = np.random.randn(10, 10, 10)\n B = np.ndarray(10, dtype=np.float64)\n sdfg = _make_sdfg(\"symbol_dependent_register_array\", storage=dace.dtypes.StorageType.Register)\n # Compile manually to avoid simplification\n sdfg_exec = sdfg.compile()\n sdfg_exec(A=A, B=B, N=10)\n del sdfg_exec\n B_ref = np.ndarray(10, dtype=np.float64)\n for i in range(10):\n tmp = A[2:-2, 2:-2, i:]\n B_ref[i] = np.sum(tmp)\n assert (np.allclose(B, B_ref))\n\n\ndef test_symbol_dependent_threadlocal_array():\n A = np.random.randn(10, 10, 10)\n B = np.ndarray(10, dtype=np.float64)\n sdfg = _make_sdfg(\"symbol_dependent_threadlocal_array\", storage=dace.dtypes.StorageType.CPU_ThreadLocal)\n # Compile manually to avoid simplification\n sdfg_exec = sdfg.compile()\n sdfg_exec(A=A, B=B, N=10)\n del sdfg_exec\n B_ref = np.ndarray(10, dtype=np.float64)\n for i in range(10):\n tmp = A[2:-2, 2:-2, i:]\n B_ref[i] = np.sum(tmp)\n assert (np.allclose(B, B_ref))\n\n\ndef test_symbol_dependent_cpu_view():\n A = np.random.randn(10, 10, 10)\n B = np.ndarray(10, dtype=np.float64)\n sdfg = _make_sdfg(\"symbol_dependent_cpu_view\", isview=True)\n # Compile manually to avoid simplification\n sdfg_exec = sdfg.compile()\n sdfg_exec(A=A, B=B, N=10)\n del sdfg_exec\n B_ref = np.ndarray(10, dtype=np.float64)\n for i in range(10):\n tmp = A[2:-2, 2:-2, i:]\n B_ref[i] = np.sum(tmp)\n assert (np.allclose(B, B_ref))\n\n\n@pytest.mark.gpu\ndef test_symbol_dependent_gpu_global_array():\n A = np.random.randn(10, 10, 10)\n B = np.ndarray(10, dtype=np.float64)\n sdfg = _make_sdfg(\"symbol_dependent_gpu_global_array\", storage=dace.dtypes.StorageType.GPU_Global)\n # Compile manually to avoid simplification\n sdfg_exec = sdfg.compile()\n sdfg_exec(A=A, B=B, N=10)\n del sdfg_exec\n B_ref = np.ndarray(10, dtype=np.float64)\n for i in range(10):\n tmp = A[2:-2, 2:-2, i:]\n B_ref[i] = np.sum(tmp)\n assert (np.allclose(B, B_ref))\n\n\n@pytest.mark.gpu\ndef test_symbol_dependent_pinned_array():\n A = np.random.randn(10, 10, 10)\n B = np.ndarray(10, dtype=np.float64)\n sdfg = _make_sdfg(\"symbol_dependent_pinned_array\", storage=dace.dtypes.StorageType.CPU_Pinned)\n # Compile manually to avoid simplification\n sdfg_exec = sdfg.compile()\n sdfg_exec(A=A, B=B, N=10)\n del sdfg_exec\n B_ref = np.ndarray(10, dtype=np.float64)\n for i in range(10):\n tmp = A[2:-2, 2:-2, i:]\n B_ref[i] = np.sum(tmp)\n assert (np.allclose(B, B_ref))\n\n\n@pytest.mark.skip # @pytest.mark.gpu\ndef test_symbol_dependent_gpu_view():\n # NOTE: This test cannot produce the correct result since the input\n # data of the reduction are not contiguous and cub:reduce doesn't support\n # such data.\n A = np.random.randn(10, 10, 10)\n B = np.ndarray(10, dtype=np.float64)\n sdfg = _make_sdfg(\"symbol_dependent_gpu_view\", storage=dace.dtypes.StorageType.GPU_Global, isview=True)\n # Compile manually to avoid simplification\n sdfg_exec = sdfg.compile()\n sdfg_exec(A=A, B=B, N=10)\n del sdfg_exec\n B_ref = np.ndarray(10, dtype=np.float64)\n for i in range(10):\n tmp = A[2:-2, 2:-2, i:]\n B_ref[i] = np.sum(tmp)\n assert (np.allclose(B, B_ref))\n\n\n@pytest.mark.skip\ndef test_symbol_dependent_fpga_global_array():\n A = np.random.randn(10, 10, 10)\n B = np.ndarray(10, dtype=np.float64)\n sdfg = _make_sdfg(\"symbol_dependent_fpga_global_array\", storage=dace.dtypes.StorageType.FPGA_Global)\n # Compile manually to avoid simplification\n sdfg_exec = sdfg.compile()\n sdfg_exec(A=A, B=B, N=10)\n del sdfg_exec\n B_ref = np.ndarray(10, dtype=np.float64)\n for i in range(10):\n tmp = A[2:-2, 2:-2, i:]\n B_ref[i] = np.sum(tmp)\n assert (np.allclose(B, B_ref))\n\n\ndef test_symbol_dependent_array_in_map():\n @dace.program\n def symbol_dependent_array_in_map(A: dace.float32[10]):\n out = np.ndarray(10, dtype=np.float32)\n for i in dace.map[0:10]:\n tmp = A[0:i + 1]\n out[i] = np.sum(tmp)\n return out\n\n # Compile manually to avoid simplification\n sdfg = symbol_dependent_array_in_map.to_sdfg(simplify=False)\n sdfg.apply_transformations_repeated(interstate.StateFusion)\n sdfg.apply_transformations_repeated(interstate.InlineSDFG)\n # NOTE: Temporary fix for issue with symbols/free_symbols\n if 'i' in sdfg.free_symbols:\n sdfg.remove_symbol('i')\n func = sdfg.compile()\n A = np.random.randn(10).astype(np.float32)\n val = func(A=A)\n ref = np.cumsum(A)\n assert (np.allclose(val, ref))\n\n\nif __name__ == '__main__':\n test_symbol_dependent_heap_array()\n test_symbol_dependent_register_array()\n test_symbol_dependent_threadlocal_array()\n test_symbol_dependent_cpu_view()\n test_symbol_dependent_gpu_global_array()\n test_symbol_dependent_pinned_array()\n # test_symbol_dependent_gpu_view()\n # test_symbol_dependent_fpga_global_array()\n test_symbol_dependent_array_in_map()\n",
"# Copyright 2019-2021 ETH Zurich and the DaCe authors. All rights reserved.\nimport tensorflow as tf\nimport numpy as np\nfrom dace.frontend.tensorflow import TFSession\nimport matplotlib.pyplot as plt\nimport sys\n\n\ndef data_input_fn(filenames, batch_size=2, shuffle=False):\n def _parser(record):\n features = {\n \"label\": tf.FixedLenFeature([], tf.int64),\n \"image_raw\": tf.FixedLenFeature([], tf.string),\n }\n parsed_record = tf.parse_single_example(record, features)\n image = tf.decode_raw(parsed_record[\"image_raw\"], tf.float32)\n image = tf.reshape(image, [28, 28])\n\n label = tf.cast(parsed_record[\"label\"], tf.int32)\n label = tf.one_hot(indices=label, depth=10, on_value=1, off_value=0)\n return image, tf.one_hot(label, depth=10)\n\n def _input_fn():\n dataset = tf.data.TFRecordDataset(filenames).map(_parser)\n if shuffle:\n dataset = dataset.shuffle(buffer_size=10000)\n\n dataset = dataset.batch(batch_size, drop_remainder=True)\n\n iterator = dataset.make_one_shot_iterator()\n features, labels = iterator.get_next()\n\n return features, labels\n\n return _input_fn\n\n\nif __name__ == '__main__':\n if len(sys.argv) < 2:\n print('USAGE: dataset_reader.py <FILENAME> [FILENAMES...]')\n exit(1)\n\n filenames = list(sys.argv[1:])\n\n with tf.Session() as sess:\n output_tf = sess.run(data_input_fn(filenames)())[0]\n for _out in output_tf:\n _out = np.multiply(255.0, _out)\n _out = _out.astype(np.uint8)\n plt.imshow(_out)\n plt.show()\n\n with TFSession() as sess:\n output_dace = sess.run(data_input_fn(filenames)())[0]\n for _out in output_dace:\n _out = np.multiply(255.0, _out)\n _out = _out.astype(np.uint8)\n plt.imshow(_out)\n plt.show()\n",
"# Copyright 2019-2021 ETH Zurich and the DaCe authors. All rights reserved.\n\"\"\" A test for the MapToForLoop transformation. \"\"\"\n\nimport dace\nimport numpy as np\nfrom dace.transformation.dataflow import MapExpansion, MapToForLoop\n\n\n@dace.program\ndef map2for(A: dace.float64[20, 20, 20]):\n for k in range(1, 19):\n for i, j in dace.map[0:20, 0:20]:\n with dace.tasklet:\n inp << A[i, j, k]\n inp2 << A[i, j, k - 1]\n out >> A[i, j, k + 1]\n out = inp + inp2\n\n\ndef test_map2for_overlap():\n A = np.random.rand(20, 20, 20)\n expected = np.copy(A)\n for k in range(1, 19):\n expected[:, :, k + 1] = expected[:, :, k] + expected[:, :, k - 1]\n\n sdfg = map2for.to_sdfg()\n assert sdfg.apply_transformations([MapExpansion, MapToForLoop]) == 2\n sdfg(A=A)\n assert np.allclose(A, expected)\n\n\nif __name__ == '__main__':\n test_map2for_overlap()"
] | [
[
"numpy.transpose"
],
[
"numpy.allclose",
"numpy.sum",
"numpy.cumsum",
"numpy.random.randn",
"numpy.ndarray"
],
[
"tensorflow.data.TFRecordDataset",
"numpy.multiply",
"tensorflow.reshape",
"tensorflow.parse_single_example",
"tensorflow.decode_raw",
"tensorflow.cast",
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.show",
"tensorflow.one_hot",
"tensorflow.FixedLenFeature",
"tensorflow.Session"
],
[
"numpy.allclose",
"numpy.random.rand",
"numpy.copy"
]
] |
Theomat/MPSEAS | [
"91f9c991e2061a7d230e491210d2c93005fd2236"
] | [
"pseas/runnable/print_table_step1.py"
] | [
"import pandas as pd\nimport numpy as np\n\nCOLORS_QTY: int = 5\n# =============================================================================\n# Argument parsing.\n# =============================================================================\nimport argparse\n\nfrom scipy import integrate\nargument_parser: argparse.ArgumentParser = argparse.ArgumentParser(\n description=\"Plot figures based on run data.\")\n\nargument_default_values = {\n\t\"suffix\": 'kissat_ibm',\n \"folder\": \".\"\n}\n\nargument_parser.add_argument('-f', '--folder',\n type=str,\n action='store',\n default=argument_default_values['folder'],\n help=\"Folder in which to look for the file (default: '.')\"\n )\nargument_parser.add_argument('-s', '--suffix',\n type=str,\n action='store',\n default=argument_default_values['suffix'],\n help=\"File suffix used in produce_run_data (default: 'kissat_ibm')\"\n )\nparsed_parameters = argument_parser.parse_args()\n\nfolder: str = parsed_parameters.folder \nsuffix: str = parsed_parameters.suffix\n# =============================================================================\n# Finished parsing\n# =============================================================================\ndef __rename_strategies__(df: pd.DataFrame) -> pd.DataFrame:\n df[\"strategy\"] = df[\"strategy\"].str.replace(\n \".*-discrimination-based\", \"discrimination-based\", regex=True)\n df[\"strategy\"] = df[\"strategy\"].str.replace(\n \"Info. over Decision/Time\", \"information-based\", regex=False)\n df[\"strategy\"] = df[\"strategy\"].str.replace(\n \"Random\", \"random\", regex=False)\n\n # Rename discrimination component\n df[\"strategy\"] = df[\"strategy\"].str.replace(\" 10100%\", \"\", regex=False)\n df[\"strategy\"] = df[\"strategy\"].str.replace(\".00%\", \"%\", regex=False)\n df[\"strategy\"] = df[\"strategy\"].str.replace(\n \"Subset\", \"subset\", regex=False)\n\n df[\"selection\"] = df[\"strategy\"].str.extract(r'^([^+]*) \\+ .*')\n df[\"discrimination\"] = df[\"strategy\"].str.extract(r'^[^+]* \\+ (.*)')\n return df\n\ndef __filter_best_strategies__(df: pd.DataFrame) -> pd.DataFrame:\n # Remove all that don't have timeout correction\n df[\"baseline\"] = df[\"selection\"].str.contains(\n \"random\") | df[\"discrimination\"].str.contains(\"subset\")\n return df\n\n\ndico = {}\nfor i, configurations in enumerate(range(10, 60, 10)):\n for j, split in enumerate(range(10, 60, 10)):\n ratio = split / 100\n detailed_df = pd.read_csv(f\"{folder}/detailed_runs_{suffix}_{configurations}_{ratio}.csv\")\n detailed_df = detailed_df.drop(\"Unnamed: 0\", axis=1)\n detailed_df = __rename_strategies__(detailed_df)\n df = __filter_best_strategies__(detailed_df)\n # Remove subset\n df = df[~df[\"discrimination\"].str.contains(\"subset\")]\n # Take mean performance\n df = df.groupby([\"selection\", \"time\"]).mean().reset_index()\n df[\"prediction\"] *= 100\n\n for method in df[\"selection\"].unique():\n if method not in dico:\n dico[method] = np.zeros((5, 5))\n\n data = df[df[\"selection\"] == method]\n data = data[[\"prediction\", \"time\"]].to_numpy()\n auc = integrate.trapezoid(data[:, 0], dx=1, axis=0)\n dico[method][i, j] = auc / 10000 * 100\n\nCOLOR_NAMES = [f\"color{i+1}\" for i in range(COLORS_QTY)]\n\nfor method, values in dico.items():\n print(\"\\\\begin{table}\")\n print(\"\\t\\\\centering\")\n print(\"\\t\\\\caption{Percentage of total AUC Evolution for \" + method + \" on \" + suffix.replace(\"_\", \" \") + \"}\")\n print(\"\\t\\\\begin{tabular}{\"+ (\"c\" * 6) + \"}\")\n print(\"\\t\\t\\\\toprule\")\n print(\"\\t\\tConfigurations & 10 & 20 & 30 & 40 & 50 \\\\\\\\\")\n mini = np.min(values) \n maxi = np.max(values)\n scale = maxi - mini\n unit = scale / (len(COLOR_NAMES) - 1)\n for j, percent in enumerate(range(10, 60, 10)):\n line_values = [float(values[i, j])\n for i, _ in enumerate(range(10, 60, 10))]\n colors = [COLOR_NAMES[round((x - mini) / unit)] for x in line_values]\n print(f\"\\t\\t{percent}\\\\% & \" + \" & \".join(f\"\\\\colorbox{{{color}!30}}{{{val:.1f}}}\" for color, val in zip(colors, line_values)) + \"\\\\\\\\\")\n print(\"\\t\\t\\\\bottomrule\")\n print(\"\\t\\\\end{tabular}\")\n print(\"\\\\end{table}\")\n\n\n"
] | [
[
"numpy.zeros",
"pandas.read_csv",
"numpy.max",
"numpy.min",
"scipy.integrate.trapezoid"
]
] |
KoutaOhishi/burger_war_dev | [
"9a7e21d631dc7e82f5341450ddafdc8ed32d2ac1"
] | [
"burger_war_dev/scripts/waypoint.py"
] | [
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport csv\nimport math\nimport numpy as np\n\nFIELD_SCORE_NUM_OFFSET=6\n\nclass Waypoints:\n\n def __init__(self, path, side):\n self.points = []\n self.number = 0\n self.Waypoints_Lap = 0\n self.next_target_idx = -1\n self.all_field_score = np.ones([18]) # field score state\n self._load_waypoints(path, side)\n print ('[waypoint]number of waypoints: '+str(len(self.points)))\n\n def _load_waypoints(self, path, side):\n with open(path) as f:\n lines = csv.reader(f)\n for l in lines:\n # x,y,radian,target_idx(refer main code)\n point = [float(n) for n in l]\n point[2] = point[2]*math.pi/180.0\n if side == 'r':\n point[3] = int(point[3])\n else:\n point[3] = int(point[4])\n print(\" \"+str(point))\n self.points.append(point[0:4])\n\n def get_next_waypoint(self):\n self.number = self.number+1\n if self.number == len(self.points):\n self.Waypoints_Lap = self.Waypoints_Lap+1\n print(\"[waypoint]next lap!!!!!!\")\n self.number = 0\n\n #print(\"[waypoint]search target !!!!!!\", self.all_field_score)\n for i in range(self.number, len(self.points))+range(self.number):\n score_num = self.points[i][3]\n #print(\"[waypoint]\"+str(score_num))\n\n # 得点と関係ないwaypoint\n if score_num == -1:\n # 1週目は得点と関係ないwaypointも辿る。\n if self.Waypoints_Lap == 0:\n return self.points[self.number][0:3]\n continue\n\n # 得点と関係あるwaypoint\n if self.all_field_score[score_num - FIELD_SCORE_NUM_OFFSET] == 0:\n # if already get score, skip search\n continue\n else:\n # if not get score, go to target\n print(\"[waypoint]\"+str(i)+\"/\"+str(len(self.points)))\n self.number = i\n return self.points[i][0:3]\n\n print(\"[waypoint]got all field score !!!\")\n return self.points[self.number][0:3]\n\n def get_current_waypoint(self):\n return self.points[self.number]\n\n def get_current_target_number(self):\n # target No.\n return self.points[self.number][3]\n\n def get_any_waypoint(self, n):\n return self.points[n]\n\n def set_number(self, n):\n self.number = n\n\n def set_field_score(self, n):\n self.all_field_score = n\n # print(self.all_field_score)\n\n def check_if_get_field_score(self, n):\n score_num = n\n if self.all_field_score[score_num - FIELD_SCORE_NUM_OFFSET] == 0:\n return True\n else:\n return False\n\n\n# if __name__ == \"__main__\":\n # Waypoints('waypoints.csv')\n"
] | [
[
"numpy.ones"
]
] |
kiss2u/google-research | [
"5b70d349a6af2f5ec1694bfd5341e6b3fb526947"
] | [
"saccader/visual_attention/saccader_classnet.py"
] | [
"# coding=utf-8\n# Copyright 2020 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Saccader-Classification network model.\n\nSaccader model is an image classification model with a hard attention mechanism.\nThe model uses the saccader model for visual attention\nand uses a separate network for classification.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\n\nfrom __future__ import print_function\n\nimport tensorflow.compat.v1 as tf\n\nfrom saccader import utils\nfrom saccader.visual_attention import saccader\nfrom tensorflow.contrib import slim as contrib_slim\nfrom tensorflow_models.slim.nets import nets_factory\nfrom tensorflow_models.slim.nets.nasnet import nasnet\n\n\nslim = contrib_slim\nSaccader = saccader.Saccader\n\n\nclass SaccaderClassNet(Saccader):\n \"\"\"Saccader-Classification Model.\n\n Network that performs classification on images by taking glimpses at\n different locations on an image.\n\n Attributes:\n num_classes: (Integer) Number of classification classes.\n variable_scope: (String) Name of model variable scope.\n attention_groups: (Integer) Number of groups in attention network.\n attention_layers_per_group: (Integer) Number of layers in each group in\n attention network.\n saccader_cell: Saccader Cell object.\n representation_network: Representation network object.\n glimpse_shape: 2-D tuple of integers indicating glimpse shape.\n glimpse_shape_classnet: 2-D tuple of integers indicating classification\n network glimpse shape.\n glimpse_shape_saccader: 2-D tuple of integers indicating saccader\n glimpse shape.\n var_list_representation_network: List of variables for the representation\n network.\n var_list_attention_network: List of variables for the attention network.\n var_list_saccader_cell: List of variables for the saccader cell.\n var_list_location: List of variables for the location network.\n var_list_classification: List of variables for the classification network.\n var_list_classnet: List of variables for the classification network.\n var_list: List of all model variables.\n init_op: Initialization operations for model variables.\n \"\"\"\n\n def __init__(self, config, variable_scope=\"saccader_classnet\"):\n Saccader.__init__(self, config, variable_scope=variable_scope+\"/saccader\")\n self.var_list_saccader = []\n self.var_list_classnet = []\n self.classnet_type = config.classnet_type\n self.num_classes = config.num_classes\n self.variable_scope_classnet = variable_scope+\"/\"+self.classnet_type\n self.glimpse_shape_saccader = (-1, -1)\n self.glimpse_shape_classnet = config.glimpse_shape\n\n def __call__(self,\n images_saccader,\n images_classnet,\n num_times,\n is_training_saccader=False,\n is_training_classnet=False,\n policy=\"learned\",\n stop_gradient_after_representation=False):\n\n logits, locations_t, best_locations_t, endpoints = Saccader.__call__(\n self,\n images_saccader,\n num_times,\n is_training=is_training_saccader,\n policy=policy,\n stop_gradient_after_representation=stop_gradient_after_representation)\n\n self.glimpse_shape_saccader = self.glimpse_shape\n image_size_saccader = images_saccader.shape.as_list()[1]\n image_size_classnet = images_classnet.shape.as_list()[1]\n if self.glimpse_shape_classnet[0] < 0:\n self.glimpse_shape_classnet = tuple([int(\n image_size_classnet / image_size_saccader *\n self.glimpse_shape[0])] * 2)\n self.glimpse_shape = self.glimpse_shape_classnet\n\n images_glimpse_t = []\n for locations in locations_t:\n images_glimpse = utils.extract_glimpse(\n images_classnet, size=self.glimpse_shape_classnet, offsets=locations)\n images_glimpse_t.append(images_glimpse)\n\n batch_size = images_classnet.shape.as_list()[0]\n images_glimpse_t = tf.concat(images_glimpse_t, axis=0)\n\n variables_before = set(tf.global_variables())\n reuse = True if self.var_list_classnet else False\n with tf.variable_scope(self.variable_scope_classnet, reuse=reuse):\n if self.classnet_type == \"nasnet\":\n classnet_config = nasnet.large_imagenet_config()\n classnet_config.use_aux_head = 0\n classnet_config.drop_path_keep_prob = 1.0\n with slim.arg_scope(nasnet.nasnet_large_arg_scope()):\n classnet_logits, endpoints_ = nasnet.build_nasnet_large(\n images_glimpse_t, self.num_classes,\n is_training=is_training_classnet,\n config=classnet_config)\n elif self.classnet_type == \"resnet_v2_50\":\n network = nets_factory.get_network_fn(\n \"resnet_v2_50\", self.num_classes, is_training=is_training_classnet)\n classnet_logits, endpoints_ = network(images_glimpse_t)\n\n endpoints[\"classnet\"] = endpoints_\n variables_after = set(tf.global_variables())\n logits_t = tf.reshape(classnet_logits, (num_times, batch_size, -1))\n logits = tf.reduce_mean(logits_t, axis=0)\n if not reuse:\n self.var_list_saccader = self.var_list_classification + self.var_list_location\n self.var_list_classnet = [\n v for v in list(variables_after-variables_before)\n if \"global_step\" not in v.op.name]\n self.var_list.extend(self.var_list_classnet)\n self.init_op = tf.variables_initializer(var_list=self.var_list)\n\n return logits, locations_t, best_locations_t, endpoints\n"
] | [
[
"tensorflow.compat.v1.reduce_mean",
"tensorflow.compat.v1.global_variables",
"tensorflow.compat.v1.concat",
"tensorflow.compat.v1.variable_scope",
"tensorflow.compat.v1.variables_initializer",
"tensorflow.compat.v1.reshape"
]
] |
qzhong0605/tensorboardplugins | [
"92bfc7ca96b933cdbdf074a08f26f5c715d8421d"
] | [
"tensorboard/plugins/interactive_inference/witwidget/notebook/base.py"
] | [
"# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport base64\nimport json\nimport googleapiclient.discovery\nimport os\nimport tensorflow as tf\nfrom IPython import display\nfrom google.protobuf import json_format\nfrom numbers import Number\nfrom six import ensure_str\nfrom tensorboard.plugins.interactive_inference.utils import inference_utils\n\n# Constants used in mutant inference generation.\nNUM_MUTANTS_TO_GENERATE = 10\nNUM_EXAMPLES_FOR_MUTANT_ANALYSIS = 50\n\n# Custom user agent for tracking number of calls to Cloud AI Platform.\nUSER_AGENT_FOR_CAIP_TRACKING = 'WhatIfTool'\n\nclass WitWidgetBase(object):\n \"\"\"WIT widget base class for common code between Jupyter and Colab.\"\"\"\n\n def __init__(self, config_builder):\n \"\"\"Constructor for WitWidgetBase.\n\n Args:\n config_builder: WitConfigBuilder object containing settings for WIT.\n \"\"\"\n tf.logging.set_verbosity(tf.logging.WARN)\n config = config_builder.build()\n copied_config = dict(config)\n self.estimator_and_spec = (\n dict(config.get('estimator_and_spec'))\n if 'estimator_and_spec' in config else {})\n self.compare_estimator_and_spec = (\n dict(config.get('compare_estimator_and_spec'))\n if 'compare_estimator_and_spec' in config else {})\n if 'estimator_and_spec' in copied_config:\n del copied_config['estimator_and_spec']\n if 'compare_estimator_and_spec' in copied_config:\n del copied_config['compare_estimator_and_spec']\n\n self.custom_predict_fn = (\n config.get('custom_predict_fn')\n if 'custom_predict_fn' in config else None)\n self.compare_custom_predict_fn = (\n config.get('compare_custom_predict_fn')\n if 'compare_custom_predict_fn' in config else None)\n self.adjust_prediction_fn = (\n config.get('adjust_prediction')\n if 'adjust_prediction' in config else None)\n self.compare_adjust_prediction_fn = (\n config.get('compare_adjust_prediction')\n if 'compare_adjust_prediction' in config else None)\n self.adjust_example_fn = (\n config.get('adjust_example')\n if 'adjust_example' in config else None)\n self.compare_adjust_example_fn = (\n config.get('compare_adjust_example')\n if 'compare_adjust_example' in config else None)\n if 'custom_predict_fn' in copied_config:\n del copied_config['custom_predict_fn']\n if 'compare_custom_predict_fn' in copied_config:\n del copied_config['compare_custom_predict_fn']\n if 'adjust_prediction' in copied_config:\n del copied_config['adjust_prediction']\n if 'compare_adjust_prediction' in copied_config:\n del copied_config['compare_adjust_prediction']\n if 'adjust_example' in copied_config:\n del copied_config['adjust_example']\n if 'compare_adjust_example' in copied_config:\n del copied_config['compare_adjust_example']\n\n self.set_examples(config['examples'])\n del copied_config['examples']\n\n self.config = copied_config\n\n # If using AI Platform for prediction, set the correct custom prediction\n # functions.\n if self.config.get('use_aip'):\n self.custom_predict_fn = self._predict_aip_model\n if self.config.get('compare_use_aip'):\n self.compare_custom_predict_fn = self._predict_aip_compare_model\n\n def _get_element_html(self):\n return \"\"\"\n <link rel=\"import\" href=\"/nbextensions/wit-widget/wit_jupyter.html\">\"\"\"\n\n def set_examples(self, examples):\n \"\"\"Sets the examples shown in WIT.\n\n The examples are initially set by the examples specified in the config\n builder during construction. This method can change which examples WIT\n displays.\n \"\"\"\n self.examples = [json_format.MessageToJson(ex) for ex in examples]\n self.updated_example_indices = set(range(len(examples)))\n\n def json_to_proto(self, json):\n ex = (tf.train.SequenceExample()\n if self.config.get('are_sequence_examples')\n else tf.train.Example())\n json_format.Parse(json, ex)\n return ex\n\n def infer_impl(self):\n \"\"\"Performs inference on examples that require inference.\"\"\"\n indices_to_infer = sorted(self.updated_example_indices)\n examples_to_infer = [\n self.json_to_proto(self.examples[index]) for index in indices_to_infer]\n infer_objs = []\n attribution_objs = []\n serving_bundle = inference_utils.ServingBundle(\n self.config.get('inference_address'),\n self.config.get('model_name'),\n self.config.get('model_type'),\n self.config.get('model_version'),\n self.config.get('model_signature'),\n self.config.get('uses_predict_api'),\n self.config.get('predict_input_tensor'),\n self.config.get('predict_output_tensor'),\n self.estimator_and_spec.get('estimator'),\n self.estimator_and_spec.get('feature_spec'),\n self.custom_predict_fn)\n (predictions, attributions) = (\n inference_utils.run_inference_for_inference_results(\n examples_to_infer, serving_bundle))\n infer_objs.append(predictions)\n attribution_objs.append(attributions)\n if ('inference_address_2' in self.config or\n self.compare_estimator_and_spec.get('estimator') or\n self.compare_custom_predict_fn):\n serving_bundle = inference_utils.ServingBundle(\n self.config.get('inference_address_2'),\n self.config.get('model_name_2'),\n self.config.get('model_type'),\n self.config.get('model_version_2'),\n self.config.get('model_signature_2'),\n self.config.get('uses_predict_api'),\n self.config.get('predict_input_tensor'),\n self.config.get('predict_output_tensor'),\n self.compare_estimator_and_spec.get('estimator'),\n self.compare_estimator_and_spec.get('feature_spec'),\n self.compare_custom_predict_fn)\n (predictions, attributions) = (\n inference_utils.run_inference_for_inference_results(\n examples_to_infer, serving_bundle))\n infer_objs.append(predictions)\n attribution_objs.append(attributions)\n self.updated_example_indices = set()\n return {\n 'inferences': {'indices': indices_to_infer, 'results': infer_objs},\n 'label_vocab': self.config.get('label_vocab'),\n 'attributions': attribution_objs}\n\n def infer_mutants_impl(self, info):\n \"\"\"Performs mutant inference on specified examples.\"\"\"\n example_index = int(info['example_index'])\n feature_name = info['feature_name']\n examples = (self.examples if example_index == -1\n else [self.examples[example_index]])\n examples = [self.json_to_proto(ex) for ex in examples]\n scan_examples = [self.json_to_proto(ex) for ex in self.examples[0:50]]\n serving_bundles = []\n serving_bundles.append(inference_utils.ServingBundle(\n self.config.get('inference_address'),\n self.config.get('model_name'),\n self.config.get('model_type'),\n self.config.get('model_version'),\n self.config.get('model_signature'),\n self.config.get('uses_predict_api'),\n self.config.get('predict_input_tensor'),\n self.config.get('predict_output_tensor'),\n self.estimator_and_spec.get('estimator'),\n self.estimator_and_spec.get('feature_spec'),\n self.custom_predict_fn))\n if ('inference_address_2' in self.config or\n self.compare_estimator_and_spec.get('estimator') or\n self.compare_custom_predict_fn):\n serving_bundles.append(inference_utils.ServingBundle(\n self.config.get('inference_address_2'),\n self.config.get('model_name_2'),\n self.config.get('model_type'),\n self.config.get('model_version_2'),\n self.config.get('model_signature_2'),\n self.config.get('uses_predict_api'),\n self.config.get('predict_input_tensor'),\n self.config.get('predict_output_tensor'),\n self.compare_estimator_and_spec.get('estimator'),\n self.compare_estimator_and_spec.get('feature_spec'),\n self.compare_custom_predict_fn))\n viz_params = inference_utils.VizParams(\n info['x_min'], info['x_max'],\n scan_examples, 10,\n info['feature_index_pattern'])\n return inference_utils.mutant_charts_for_feature(\n examples, feature_name, serving_bundles, viz_params)\n\n def get_eligible_features_impl(self):\n \"\"\"Returns information about features eligible for mutant inference.\"\"\"\n examples = [self.json_to_proto(ex) for ex in self.examples[\n 0:NUM_EXAMPLES_FOR_MUTANT_ANALYSIS]]\n return inference_utils.get_eligible_features(\n examples, NUM_MUTANTS_TO_GENERATE)\n\n def create_sprite(self):\n \"\"\"Returns an encoded image of thumbnails for image examples.\"\"\"\n # Generate a sprite image for the examples if the examples contain the\n # standard encoded image feature.\n if not self.examples:\n return None\n example_to_check = self.json_to_proto(self.examples[0])\n feature_list = (example_to_check.context.feature\n if self.config.get('are_sequence_examples')\n else example_to_check.features.feature)\n if 'image/encoded' in feature_list:\n example_strings = [\n self.json_to_proto(ex).SerializeToString()\n for ex in self.examples]\n encoded = ensure_str(base64.b64encode(\n inference_utils.create_sprite_image(example_strings)))\n return 'data:image/png;base64,{}'.format(encoded)\n else:\n return None\n\n def _json_from_tf_examples(self, tf_examples):\n json_exs = []\n feature_names = self.config.get('feature_names')\n for ex in tf_examples:\n # Create a JSON list or dict for each example depending on settings.\n # Strip out any explicitly-labeled target feature from the example.\n # This is needed because AI Platform models that accept JSON cannot handle\n # when non-input features are provided as part of the object to run\n # prediction on.\n if self.config.get('uses_json_list'):\n json_ex = []\n for feat in ex.features.feature:\n if feature_names and feat in feature_names:\n feat_idx = feature_names.index(feat)\n else:\n feat_idx = int(feat)\n if (feat == self.config.get('target_feature') or\n feat_idx == self.config.get('target_feature')):\n continue\n # Ensure the example value list is long enough to add the next feature\n # from the tf.Example.\n if feat_idx >= len(json_ex):\n json_ex.extend([None] * (feat_idx - len(json_ex) + 1))\n if ex.features.feature[feat].HasField('int64_list'):\n json_ex[feat_idx] = ex.features.feature[feat].int64_list.value[0]\n elif ex.features.feature[feat].HasField('float_list'):\n json_ex[feat_idx] = ex.features.feature[feat].float_list.value[0]\n else:\n json_ex[feat_idx] = ensure_str(\n ex.features.feature[feat].bytes_list.value[0])\n else:\n json_ex = {}\n for feat in ex.features.feature:\n if feat == self.config.get('target_feature'):\n continue\n if ex.features.feature[feat].HasField('int64_list'):\n json_ex[feat] = ex.features.feature[feat].int64_list.value[0]\n elif ex.features.feature[feat].HasField('float_list'):\n json_ex[feat] = ex.features.feature[feat].float_list.value[0]\n else:\n json_ex[feat] = ensure_str(\n ex.features.feature[feat].bytes_list.value[0])\n json_exs.append(json_ex)\n return json_exs\n\n def _predict_aip_model(self, examples):\n return self._predict_aip_impl(\n examples, self.config.get('inference_address'),\n self.config.get('model_name'), self.config.get('model_signature'),\n self.config.get('force_json_input'), self.adjust_example_fn,\n self.adjust_prediction_fn)\n\n def _predict_aip_compare_model(self, examples):\n return self._predict_aip_impl(\n examples, self.config.get('inference_address_2'),\n self.config.get('model_name_2'), self.config.get('model_signature_2'),\n self.config.get('compare_force_json_input'),\n self.compare_adjust_example_fn,\n self.compare_adjust_prediction_fn)\n\n def _predict_aip_impl(self, examples, project, model, version, force_json,\n adjust_example, adjust_prediction):\n \"\"\"Custom prediction function for running inference through AI Platform.\"\"\"\n\n # Set up environment for GCP call for specified project.\n os.environ['GOOGLE_CLOUD_PROJECT'] = project\n\n service = googleapiclient.discovery.build('ml', 'v1', cache_discovery=False)\n name = 'projects/{}/models/{}'.format(project, model)\n if version is not None:\n name += '/versions/{}'.format(version)\n\n # Properly package the examples to send for prediction.\n if self.config.get('uses_json_input') or force_json:\n examples_for_predict = self._json_from_tf_examples(examples)\n else:\n examples_for_predict = [{'b64': base64.b64encode(\n example.SerializeToString()).decode('utf-8') }\n for example in examples]\n\n # If there is a user-specified input example adjustment to make, make it.\n if adjust_example:\n examples_for_predict = [\n adjust_example(ex) for ex in examples_for_predict]\n\n # Send request, including custom user-agent for tracking.\n request_builder = service.projects().predict(\n name=name,\n body={'instances': examples_for_predict}\n )\n user_agent = request_builder.headers.get('user-agent')\n request_builder.headers['user-agent'] = (\n USER_AGENT_FOR_CAIP_TRACKING + ('-' + user_agent if user_agent else ''))\n response = request_builder.execute()\n\n if 'error' in response:\n raise RuntimeError(response['error'])\n\n # Get the key to extract the prediction results from.\n results_key = self.config.get('predict_output_tensor')\n if results_key is None:\n if self.config.get('model_type') == 'classification':\n results_key = 'probabilities'\n else:\n results_key = 'outputs'\n\n # Parse the results from the response and return them.\n results = []\n attributions = (response['attributions']\n if 'attributions' in response else None)\n for pred in response['predictions']:\n # If the prediction contains a key to fetch the prediction, use it.\n if isinstance(pred, dict):\n pred = pred[results_key]\n # If the model is regression and the response is a list, extract the\n # score by taking the first element.\n if (self.config.get('model_type') == 'regression' and\n isinstance(pred, list)):\n pred = pred[0]\n # If an prediction adjustment function was provided, use it to adjust\n # the prediction.\n if adjust_prediction:\n pred = adjust_prediction(pred)\n results.append(pred)\n return {'predictions': results, 'attributions': attributions}\n"
] | [
[
"tensorflow.logging.set_verbosity",
"tensorflow.train.SequenceExample",
"tensorflow.train.Example"
]
] |
J-Z-Z/akshare | [
"0a9ca71b381a272e2f56211e455ff2493dfed17a",
"0a9ca71b381a272e2f56211e455ff2493dfed17a",
"0a9ca71b381a272e2f56211e455ff2493dfed17a"
] | [
"akshare/futures_derivative/nh_index_price.py",
"akshare/stock/stock_rank_forecast.py",
"akshare/index/index_cflp.py"
] | [
"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n\"\"\"\nDate: 2021/12/20 14:52\nDesc: 南华期货-商品指数历史走势-价格指数-数值\nhttp://www.nanhua.net/nhzc/varietytrend.html\n1000 点开始, 用收益率累计\nhttp://www.nanhua.net/ianalysis/varietyindex/price/A.json?t=1574932974280\n\"\"\"\nimport time\n\nimport requests\nimport pandas as pd\n\n\ndef futures_nh_index_symbol_table() -> pd.DataFrame:\n \"\"\"\n 南华期货-南华指数所有品种一览表\n http://www.nanhua.net/ianalysis/varietyindex/price/A.json?t=1574932974280\n :return: 南华指数所有品种一览表\n :rtype: pandas.DataFrame\n \"\"\"\n url = \"http://www.nanhua.net/ianalysis/plate-variety.json\"\n r = requests.get(url)\n data_json = r.json()\n temp_df = pd.DataFrame(data_json)\n temp_df['firstday'] = pd.to_datetime(temp_df['firstday']).dt.date\n return temp_df\n\n\ndef futures_nh_price_index(symbol: str = \"A\") -> pd.DataFrame:\n \"\"\"\n 南华期货-南华指数单品种-价格-所有历史数据\n http://www.nanhua.net/ianalysis/varietyindex/price/A.json?t=1574932974280\n :param symbol: 通过 ak.futures_nh_index_symbol_table() 获取\n :type symbol: str\n :return: 南华期货-南华指数单品种-价格-所有历史数据\n :rtype: pandas.Series\n \"\"\"\n symbol_df = futures_nh_index_symbol_table()\n if symbol in symbol_df[\"code\"].tolist():\n t = time.time()\n url = f\"http://www.nanhua.net/ianalysis/varietyindex/price/{symbol}.json?t={int(round(t * 1000))}\"\n r = requests.get(url)\n data_json = r.json()\n temp_df = pd.DataFrame(data_json)\n temp_df.columns = [\"date\", \"value\"]\n temp_df['date'] = pd.to_datetime(temp_df[\"date\"], unit='ms').dt.date\n return temp_df\n\n\nif __name__ == \"__main__\":\n futures_nh_index_symbol_table_df = futures_nh_index_symbol_table()\n print(futures_nh_index_symbol_table_df)\n\n futures_nh_price_index_df = futures_nh_price_index(symbol=\"NHAI\")\n print(futures_nh_price_index_df)\n",
"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n\"\"\"\nDate: 2021/9/12 18:29\nDesc: 巨潮资讯-数据中心-评级预测-投资评级\nhttp://webapi.cninfo.com.cn/#/thematicStatistics?name=%E6%8A%95%E8%B5%84%E8%AF%84%E7%BA%A7\n\"\"\"\nimport time\nfrom py_mini_racer import py_mini_racer\nimport requests\nimport pandas as pd\n\njs_str = \"\"\"\n function mcode(input) { \n var keyStr = \"ABCDEFGHIJKLMNOP\" + \"QRSTUVWXYZabcdef\" + \"ghijklmnopqrstuv\" + \"wxyz0123456789+/\" + \"=\"; \n var output = \"\"; \n var chr1, chr2, chr3 = \"\"; \n var enc1, enc2, enc3, enc4 = \"\"; \n var i = 0; \n do { \n chr1 = input.charCodeAt(i++); \n chr2 = input.charCodeAt(i++); \n chr3 = input.charCodeAt(i++); \n enc1 = chr1 >> 2; \n enc2 = ((chr1 & 3) << 4) | (chr2 >> 4); \n enc3 = ((chr2 & 15) << 2) | (chr3 >> 6); \n enc4 = chr3 & 63; \n if (isNaN(chr2)) { \n enc3 = enc4 = 64; \n } else if (isNaN(chr3)) { \n enc4 = 64; \n } \n output = output + keyStr.charAt(enc1) + keyStr.charAt(enc2) \n + keyStr.charAt(enc3) + keyStr.charAt(enc4); \n chr1 = chr2 = chr3 = \"\"; \n enc1 = enc2 = enc3 = enc4 = \"\"; \n } while (i < input.length); \n \n return output; \n } \n\"\"\"\n\n\ndef stock_rank_forecast_cninfo(date: str = \"20210910\") -> pd.DataFrame:\n \"\"\"\n 巨潮资讯-数据中心-评级预测-投资评级\n http://webapi.cninfo.com.cn/#/thematicStatistics?name=%E6%8A%95%E8%B5%84%E8%AF%84%E7%BA%A7\n :param date: 查询日期\n :type date: str\n :return: 投资评级\n :rtype: pandas.DataFrame\n \"\"\"\n url = \"http://webapi.cninfo.com.cn/api/sysapi/p_sysapi1089\"\n params = {\"tdate\": \"-\".join([date[:4], date[4:6], date[6:]])}\n random_time_str = str(int(time.time()))\n js_code = py_mini_racer.MiniRacer()\n js_code.eval(js_str)\n mcode = js_code.call(\"mcode\", random_time_str)\n headers = {\n \"Accept\": \"*/*\",\n \"Accept-Encoding\": \"gzip, deflate\",\n \"Accept-Language\": \"zh-CN,zh;q=0.9,en;q=0.8\",\n \"Cache-Control\": \"no-cache\",\n \"Content-Length\": \"0\",\n \"Host\": \"webapi.cninfo.com.cn\",\n \"mcode\": mcode,\n \"Origin\": \"http://webapi.cninfo.com.cn\",\n \"Pragma\": \"no-cache\",\n \"Proxy-Connection\": \"keep-alive\",\n \"Referer\": \"http://webapi.cninfo.com.cn/\",\n \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/93.0.4577.63 Safari/537.36\",\n \"X-Requested-With\": \"XMLHttpRequest\",\n }\n r = requests.post(url, params=params, headers=headers)\n data_json = r.json()\n temp_df = pd.DataFrame(data_json[\"records\"])\n temp_df.columns = [\n \"证券简称\",\n \"发布日期\",\n \"前一次投资评级\",\n \"评级变化\",\n \"目标价格-上限\",\n \"是否首次评级\",\n \"投资评级\",\n \"研究员名称\",\n \"研究机构简称\",\n \"目标价格-下限\",\n \"证券代码\",\n ]\n temp_df = temp_df[[\n \"证券代码\",\n \"证券简称\",\n \"发布日期\",\n \"研究机构简称\",\n \"研究员名称\",\n \"投资评级\",\n \"是否首次评级\",\n \"评级变化\",\n \"前一次投资评级\",\n \"目标价格-下限\",\n \"目标价格-上限\",\n ]]\n temp_df[\"目标价格-上限\"] = pd.to_numeric(temp_df[\"目标价格-上限\"], errors=\"coerce\")\n temp_df[\"目标价格-下限\"] = pd.to_numeric(temp_df[\"目标价格-下限\"], errors=\"coerce\")\n return temp_df\n\n\nif __name__ == \"__main__\":\n stock_rank_forecast_cninfo_df = stock_rank_forecast_cninfo(date=\"20210907\")\n print(stock_rank_forecast_cninfo_df)\n",
"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n\"\"\"\nDate: 2021/12/27 15:47\nDesc: 中国公路物流运价、运量指数\nhttp://index.0256.cn/expx.htm\n\"\"\"\nimport pandas as pd\nimport requests\n\n\ndef index_cflp_price(symbol: str = \"周指数\") -> pd.DataFrame:\n \"\"\"\n 中国公路物流运价指数\n http://index.0256.cn/expx.htm\n :param symbol: choice of {\"周指数\", \"月指数\", \"季度指数\", \"年度指数\"}\n :type symbol: str\n :return: 中国公路物流运价指数\n :rtype: pandas.DataFrame\n \"\"\"\n symbol_map = {\n \"周指数\": \"2\",\n \"月指数\": \"3\",\n \"季度指数\": \"4\",\n \"年度指数\": \"5\",\n }\n url = \"http://index.0256.cn/expcenter_trend.action\"\n params = {\n \"marketId\": \"1\",\n \"attribute1\": \"5\",\n \"exponentTypeId\": symbol_map[symbol],\n \"cateId\": \"2\",\n \"attribute2\": \"华北\",\n \"city\": \"\",\n \"startLine\": \"\",\n \"endLine\": \"\",\n }\n headers = {\n \"Origin\": \"http://index.0256.cn\",\n \"Referer\": \"http://index.0256.cn/expx.htm\",\n \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/90.0.4430.212 Safari/537.36\",\n }\n r = requests.post(url, data=params, headers=headers)\n data_json = r.json()\n temp_df = pd.DataFrame(\n [\n data_json[\"chart1\"][\"xLebal\"],\n data_json[\"chart1\"][\"yLebal\"],\n data_json[\"chart2\"][\"yLebal\"],\n data_json[\"chart3\"][\"yLebal\"],\n ]\n ).T\n temp_df.columns = [\"日期\", \"定基指数\", \"环比指数\", \"同比指数\"]\n temp_df[\"日期\"] = pd.to_datetime(temp_df[\"日期\"]).dt.date\n temp_df[\"定基指数\"] = pd.to_numeric(temp_df[\"定基指数\"])\n temp_df[\"环比指数\"] = pd.to_numeric(temp_df[\"环比指数\"])\n temp_df[\"同比指数\"] = pd.to_numeric(temp_df[\"同比指数\"])\n return temp_df\n\n\ndef index_cflp_volume(symbol: str = \"月指数\") -> pd.DataFrame:\n \"\"\"\n 中国公路物流运量指数\n http://index.0256.cn/expx.htm\n :param symbol: choice of {\"月指数\", \"季度指数\", \"年度指数\"}\n :type symbol: str\n :return: 中国公路物流运量指数\n :rtype: pandas.DataFrame\n \"\"\"\n symbol_map = {\n \"月指数\": \"3\",\n \"季度指数\": \"4\",\n \"年度指数\": \"5\",\n }\n url = \"http://index.0256.cn/volume_query.action\"\n params = {\n \"type\": \"1\",\n \"marketId\": \"1\",\n \"expTypeId\": symbol_map[symbol],\n \"startDate1\": \"\",\n \"endDate1\": \"\",\n \"city\": \"\",\n \"startDate3\": \"\",\n \"endDate3\": \"\",\n }\n headers = {\n \"Origin\": \"http://index.0256.cn\",\n \"Referer\": \"http://index.0256.cn/expx.htm\",\n \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/90.0.4430.212 Safari/537.36\",\n }\n r = requests.post(url, data=params, headers=headers)\n data_json = r.json()\n temp_df = pd.DataFrame(\n [\n data_json[\"chart1\"][\"xLebal\"],\n data_json[\"chart1\"][\"yLebal\"],\n data_json[\"chart2\"][\"yLebal\"],\n data_json[\"chart3\"][\"yLebal\"],\n ]\n ).T\n temp_df.columns = [\"日期\", \"定基指数\", \"环比指数\", \"同比指数\"]\n temp_df[\"日期\"] = pd.to_datetime(temp_df[\"日期\"]).dt.date\n temp_df[\"定基指数\"] = pd.to_numeric(temp_df[\"定基指数\"])\n temp_df[\"环比指数\"] = pd.to_numeric(temp_df[\"环比指数\"])\n temp_df[\"同比指数\"] = pd.to_numeric(temp_df[\"同比指数\"])\n return temp_df\n\n\nif __name__ == \"__main__\":\n index_cflp_price_df = index_cflp_price(symbol=\"周指数\")\n print(index_cflp_price_df)\n\n index_cflp_price_df = index_cflp_price(symbol=\"月指数\")\n print(index_cflp_price_df)\n\n index_cflp_price_df = index_cflp_price(symbol=\"季度指数\")\n print(index_cflp_price_df)\n\n index_cflp_price_df = index_cflp_price(symbol=\"年度指数\")\n print(index_cflp_price_df)\n\n index_cflp_volume_df = index_cflp_volume(symbol=\"月指数\")\n print(index_cflp_volume_df)\n\n index_cflp_volume_df = index_cflp_volume(symbol=\"季度指数\")\n print(index_cflp_volume_df)\n\n index_cflp_volume_df = index_cflp_volume(symbol=\"年度指数\")\n print(index_cflp_volume_df)\n"
] | [
[
"pandas.to_datetime",
"pandas.DataFrame"
],
[
"pandas.to_numeric",
"pandas.DataFrame"
],
[
"pandas.to_numeric",
"pandas.to_datetime",
"pandas.DataFrame"
]
] |
ZhangXiao96/RecommenderSystems4Python | [
"f125536436f83696e133e6b98c22430a47df287d"
] | [
"TraditionalRecommenderSystems/MatrixFactorization/MatrixFactorization.py"
] | [
"from lib.utils import top_k\nfrom TraditionalRecommenderSystems.MatrixFactorization.Models import BaseMF\nimport numpy as np\nimport pandas as pd\nimport torch\nfrom torch import nn\nimport torch.utils.data as data\nfrom tqdm import tqdm\n\n\nclass MatrixFactorization(object):\n def __init__(self, user_item_pairs, user_list, item_list, nb_factor=40, drop_rate=0.5, batch_size=32, lr=1e-1,\n optimizer=torch.optim.Adam, loss_func=nn.MSELoss(reduction='mean'), sparse=False,\n weight_decay=0., device='cuda', pro_process=None):\n \"\"\"\n Matrix Factorization based on Pytorch.\n :param user_item_pairs: list. [(user, item, rating)].\n :param user_list: list. The list of all the users (with no repeat).\n :param item_list: list. The list of all the items (with no repeat).\n :param nb_factor: int. The number of factors.\n :param drop_rate: float 0~1. Drop rate of the dropout layer.\n :param batch_size: int. Batch size of training\n :param lr: float. Learning rate.\n :param optimizer: torch.optim. Optimizer utilized to train the model.\n :param loss_func: torch.nn.*Loss. Loss function of training.\n :param sparse: boolean. The gradient requires to be sparse or not.\n :param weight_decay: float. L2 regularization.\n :param device: 'cpu' or 'cuda'.\n :param pro_process: nn.Module.\n \"\"\"\n self.user_item_pairs = pd.DataFrame(user_item_pairs)\n\n # build index-user, index-item\n self.index_2_user = np.array(user_list)\n self.index_2_item = np.array(item_list)\n assert len(self.index_2_user) == len(set(self.index_2_user))\n assert len(self.index_2_item) == len(set(self.index_2_item))\n self.user_2_index = {self.index_2_user[i]: i for i in range(len(self.index_2_user))}\n self.item_2_index = {self.index_2_item[i]: i for i in range(len(self.index_2_item))}\n self.nb_user, self.nb_item = len(user_list), len(item_list)\n\n # prepare training loader\n train_user_indices = torch.from_numpy(self.users_to_indices(self.user_item_pairs[0].values)).long()\n train_item_indices = torch.from_numpy(self.items_to_indices(self.user_item_pairs[1].values)).long()\n train_ratings = torch.from_numpy(self.user_item_pairs[2].values.reshape(-1, 1)).float()\n self.train_data_loader = data.DataLoader(data.TensorDataset(train_user_indices, train_item_indices,\n train_ratings), batch_size=batch_size, shuffle=True)\n\n # build model\n self.nb_factor = nb_factor\n self.lr = lr\n self.batch_size = batch_size\n self.loss_func = loss_func\n self.weight_decay = weight_decay\n self.device = device\n self.sparse = sparse\n self.process = pro_process\n self.model = BaseMF(self.nb_user, self.nb_item, nb_factor, drop_rate, sparse, pro_process=self.process).to(device)\n self.optimizer = optimizer(self.model.parameters(), lr=lr, weight_decay=weight_decay)\n\n # build history rating matrix\n self.pred_rating_matrix = None\n self.history_rating_matrix = None\n self.update_history_rating_matrix()\n\n def train(self, epochs, test_data=None, test_epoch_step=1):\n \"\"\"\n Train the model.\n :param epochs: int. The epochs of training.\n :param test_data: [(user, item, rating)]. None if no validation is applied.\n :param test_epoch_step: int. The step of validation.\n :return: (list of training loss, list of test loss) if validation is applied, else only the list of training loss.\n \"\"\"\n hist_train_loss, hist_test_loss = [], []\n if test_data is not None:\n test_data = pd.DataFrame(test_data)\n for epoch in range(epochs):\n print('Epoch-{}/{}:'.format(epoch+1, epochs))\n self.model.train()\n train_loss = self.train_epoch()\n hist_train_loss.append(train_loss)\n if (test_data is not None) and (epoch % test_epoch_step == 0):\n self.model.eval()\n test_loss = self.eval(test_data.iloc[:, [0, 1]].values, ground_truth=test_data[2].values)\n hist_test_loss.append(test_loss)\n print('training loss = {}, test loss = {}'.format(train_loss, test_loss))\n else:\n print('training loss = {}'.format(train_loss))\n self.update_pred_rating_matrix()\n return hist_train_loss, hist_test_loss\n\n def train_epoch(self):\n \"\"\"\n :return: training loss.\n \"\"\"\n self.model.train()\n epoch_loss = 0.\n for id_user, id_item, id_rating in tqdm(self.train_data_loader):\n batch_loss = self.train_on_batch(id_user, id_item, id_rating)\n epoch_loss += batch_loss\n epoch_loss /= len(self.train_data_loader)\n return epoch_loss\n\n def train_on_batch(self, user_indices, item_indices, ratings):\n users, items, ratings = user_indices.to(self.device), item_indices.to(self.device), ratings.to(self.device)\n self.optimizer.zero_grad()\n outputs = self.model(users, items)\n loss = self.loss_func(outputs, ratings)\n loss.backward()\n self.optimizer.step()\n return loss.item()\n\n def eval(self, user_item_pairs, ground_truth, batch_size=100):\n \"\"\"\n Predict the ratings of the pairs of (user, item).\n :param user_item_pairs: list of (user, item).\n :param ground_truth: the ground truth rating.\n :param batch_size: batch_size of predicting.\n :return: ratings. size=[nb_pairs]\n \"\"\"\n self.model.eval()\n outputs = self.predict(user_item_pairs, batch_size=batch_size).ravel()\n loss = np.mean((outputs-ground_truth.ravel())**2)\n return loss\n\n def predict(self, user_item_pairs, batch_size=100):\n \"\"\"\n Predict the ratings of the pairs of (user, item).\n :param user_item_pairs: list of (user, item)\n :param batch_size: batch_size of predicting.\n :return: ratings. size=[nb_pairs]\n \"\"\"\n pairs = pd.DataFrame(user_item_pairs)\n user_indices = self.users_to_indices(pairs[0].values)\n item_indices = self.items_to_indices(pairs[1].values)\n self.model.eval()\n outputs = []\n with torch.no_grad():\n start_id = 0\n end_id = min(batch_size, len(pairs))\n while start_id < len(pairs):\n outputs.append(self.predict_on_batch(user_indices[start_id:end_id], item_indices[start_id:end_id]))\n start_id += batch_size\n end_id = min(start_id+batch_size, len(pairs))\n return np.concatenate(outputs, axis=0)\n\n def predict_on_batch(self, user_indices, item_indices):\n users = torch.from_numpy(user_indices).long().to(self.device)\n items = torch.from_numpy(item_indices).long().to(self.device)\n outputs = self.model(users, items)\n return outputs.data.cpu().numpy()\n\n def update_history_rating_matrix(self):\n \"\"\"\n Update history rating matrix.\n :return: self.\n \"\"\"\n self.history_rating_matrix = pd.DataFrame(index=self.index_2_user, columns=self.index_2_item)\n for i, j, k in self.user_item_pairs.values:\n if i and j and k:\n self.history_rating_matrix[j][i] = k\n return self\n\n def update_pred_rating_matrix(self):\n \"\"\"\n Update prediction rating matrix.\n :return: self.\n \"\"\"\n pred_matrix = self.model.get_rating_matrix().data.cpu().numpy()\n self.pred_rating_matrix = np.where(self.history_rating_matrix.isna(), pred_matrix, np.nan)\n return self\n\n # def get_single_rating(self, i, j):\n # return self.pred_rating_matrix[i][j] if not np.isnan(self.pred_rating_matrix[i][j])\\\n # else self.history_rating_matrix.values[i][j]\n #\n # def predict_ratings_with_matrix(self, user_item_pairs):\n # \"\"\"\n # Predict the ratings of the pairs of (user, item).\n # :param user_item_pairs: list of (user, item)\n # :return: ratings. size=[nb_pairs]\n # \"\"\"\n # pairs = pd.DataFrame(user_item_pairs)\n # users = self.users_to_indices(pairs[0])\n # items = self.items_to_indices(pairs[1])\n # return np.array([self.get_single_rating(users[i], items[i]) for i in range(len(user_item_pairs))])\n\n def predict_ratings(self, user_item_pairs):\n \"\"\"\n Predict the ratings of the pairs of (user, item).\n :param user_item_pairs: list of (user, item)\n :return: ratings. size=[nb_pairs]\n \"\"\"\n return self.predict(user_item_pairs).ravel()\n\n def recommend(self, users, nb_recommendation):\n \"\"\"\n return the recommendations and their corresponding ratings.\n :param users: array of users\n :param nb_recommendation: The number of items to be recommended.\n :return: Indices of recommended items and their corresponding scores.\n \"\"\"\n user_indices = self.users_to_indices(users)\n id_recommend, rating_recommend = top_k(np.where(np.isnan(self.pred_rating_matrix[user_indices, :]),\n -np.inf, self.pred_rating_matrix[user_indices, :]),\n k=nb_recommendation, axis=-1, reverse=True, sort=True)\n return id_recommend, rating_recommend\n\n def users_to_indices(self, users):\n return np.array([self.user_2_index[user] for user in users]).ravel()\n\n def indices_to_users(self, indices):\n return self.index_2_user[np.array(indices).ravel()]\n\n def items_to_indices(self, items):\n return np.array([self.item_2_index[item] for item in items]).ravel()\n\n def indices_to_items(self, indices):\n return self.index_2_item[np.array(indices).ravel()]\n"
] | [
[
"torch.nn.MSELoss",
"numpy.concatenate",
"pandas.DataFrame",
"torch.no_grad",
"torch.from_numpy",
"numpy.isnan",
"numpy.array",
"torch.utils.data.TensorDataset"
]
] |
cww97/Jordan | [
"00234927d5c33e2dd301c5dae57eb89cd5e54c79"
] | [
"brain/mcts_alphaZero.py"
] | [
"import numpy as np\nimport copy \n\n\ndef softmax(x):\n probs = np.exp(x - np.max(x))\n probs /= np.sum(probs)\n return probs\n\nclass TreeNode(object):\n \"\"\"A node in the MCTS tree. Each node keeps track of its own value Q, prior probability P, and\n its visit-count-adjusted prior score u.\n \"\"\"\n\n def __init__(self, parent, prior_p):\n self._parent = parent\n self._children = {} # a map from action to TreeNode\n self._n_visits = 0\n self._Q = 0\n self._u = 0\n self._P = prior_p\n\n def expand(self, action_priors):\n \"\"\"Expand tree by creating new children.\n action_priors -- output from policy function - a list of tuples of actions\n and their prior probability according to the policy function.\n \"\"\"\n for action, prob in action_priors:\n if action not in self._children:\n self._children[action] = TreeNode(self, prob)\n\n def select(self, c_puct):\n \"\"\"Select action among children that gives maximum action value, Q plus bonus u(P).\n Returns:\n A tuple of (action, next_node)\n \"\"\"\n return max(self._children.items(), key=lambda act_node: act_node[1].get_value(c_puct))\n\n def update(self, leaf_value):\n \"\"\"Update node values from leaf evaluation.\n Arguments:\n leaf_value -- the value of subtree evaluation from the current player's perspective. \n \"\"\"\n # Count visit.\n self._n_visits += 1\n # Update Q, a running average of values for all visits.\n self._Q += 1.0*(leaf_value - self._Q) / self._n_visits\n\n def update_recursive(self, leaf_value):\n \"\"\"Like a call to update(), but applied recursively for all ancestors.\n \"\"\"\n # If it is not root, this node's parent should be updated first.\n if self._parent:\n self._parent.update_recursive(-leaf_value)\n self.update(leaf_value)\n\n def get_value(self, c_puct):\n \"\"\"Calculate and return the value for this node: a combination of leaf evaluations, Q, and\n this node's prior adjusted for its visit count, u\n c_puct -- a number in (0, inf) controlling the relative impact of values, Q, and\n prior probability, P, on this node's score.\n \"\"\"\n self._u = c_puct * self._P * np.sqrt(self._parent._n_visits) / (1 + self._n_visits)\n return self._Q + self._u\n\n def is_leaf(self):\n \"\"\"Check if leaf node (i.e. no nodes below this have been expanded).\n \"\"\"\n return self._children == {}\n\n def is_root(self):\n return self._parent is None\n\n\nclass MCTS(object):\n \"\"\"A simple implementation of Monte Carlo Tree Search.\n \"\"\"\n\n def __init__(self, policy_value_fn, c_puct=5, n_playout=10000):\n \"\"\"Arguments:\n policy_value_fn -- a function that takes in a board state and outputs a list of (action, probability)\n tuples and also a score in [-1, 1] (i.e. the expected value of the end game score from \n the current player's perspective) for the current player.\n c_puct -- a number in (0, inf) that controls how quickly exploration converges to the\n maximum-value policy, where a higher value means relying on the prior more\n \"\"\"\n self._root = TreeNode(None, 1.0)\n self._policy = policy_value_fn\n self._c_puct = c_puct\n self._n_playout = n_playout\n\n def _playout(self, state):\n \"\"\"Run a single playout from the root to the leaf, getting a value at the leaf and\n propagating it back through its parents. State is modified in-place, so a copy must be\n provided.\n Arguments:\n state -- a copy of the state.\n \"\"\"\n node = self._root\n while(1): \n if node.is_leaf():\n break \n # Greedily select next move.\n action, node = node.select(self._c_puct) \n state.do_move(action)\n\n # Evaluate the leaf using a network which outputs a list of (action, probability)\n # tuples p and also a score v in [-1, 1] for the current player.\n action_probs, leaf_value = self._policy(state)\n # Check for end of game.\n end, winner = state.game_end()\n if not end:\n node.expand(action_probs)\n else:\n # for end state,return the \"true\" leaf_value\n if winner == -1: # tie\n leaf_value = 0.0\n else:\n leaf_value = 1.0 if winner == state.get_current_player() else -1.0\n\n # Update value and visit count of nodes in this traversal.\n node.update_recursive(-leaf_value)\n\n def get_move_probs(self, state, temp=1e-3):\n \"\"\"Runs all playouts sequentially and returns the available actions and their corresponding probabilities \n Arguments:\n state -- the current state, including both game state and the current player.\n temp -- temperature parameter in (0, 1] that controls the level of exploration\n Returns:\n the available actions and the corresponding probabilities \n \"\"\" \n for n in range(self._n_playout):\n state_copy = copy.deepcopy(state)\n self._playout(state_copy)\n \n # calc the move probabilities based on the visit counts at the root node\n act_visits = [(act, node._n_visits) for act, node in self._root._children.items()]\n acts, visits = zip(*act_visits)\n act_probs = softmax(1.0/temp * np.log(np.array(visits) + 1e-10)) \n \n return acts, act_probs\n\n def update_with_move(self, last_move):\n \"\"\"Step forward in the tree, keeping everything we already know about the subtree.\n \"\"\"\n if last_move in self._root._children:\n self._root = self._root._children[last_move]\n self._root._parent = None\n else:\n self._root = TreeNode(None, 1.0)\n\n def __str__(self):\n return \"MCTS\"\n \n\nclass MCTSPlayer(object):\n \"\"\"AI player based on MCTS\"\"\"\n def __init__(self, policy_value_function, c_puct=5, n_playout=2000, is_selfplay=0):\n self.mcts = MCTS(policy_value_function, c_puct, n_playout)\n self._is_selfplay = is_selfplay\n \n def set_player_ind(self, p):\n self.player = p\n\n def reset_player(self):\n self.mcts.update_with_move(-1) \n\n def get_action(self, board, temp=1e-3, return_prob=0):\n sensible_moves = board.availables\n move_probs = np.zeros(board.width*board.height) # the pi vector returned by MCTS as in the alphaGo Zero paper\n if len(sensible_moves) > 0:\n acts, probs = self.mcts.get_move_probs(board, temp)\n move_probs[list(acts)] = probs \n if self._is_selfplay:\n # add Dirichlet Noise for exploration (needed for self-play training)\n move = np.random.choice(acts, p=0.75*probs + 0.25*np.random.dirichlet(0.3*np.ones(len(probs)))) \n self.mcts.update_with_move(move) # update the root node and reuse the search tree\n else:\n # with the default temp=1e-3, this is almost equivalent to choosing the move with the highest prob\n move = np.random.choice(acts, p=probs) \n # reset the root node\n self.mcts.update_with_move(-1) \n# location = board.move_to_location(move)\n# print(\"AI move: %d,%d\\n\" % (location[0], location[1]))\n \n if return_prob:\n return move, move_probs\n else:\n return move\n else: \n print(\"WARNING: the board is full\")\n\n def __str__(self):\n return \"MCTS {}\".format(self.player) "
] | [
[
"numpy.sum",
"numpy.zeros",
"numpy.random.choice",
"numpy.max",
"numpy.sqrt",
"numpy.array"
]
] |
zqma/IIC | [
"9d4e30b51535c6ca381389d9c22ce45be4d11883"
] | [
"proj/archs/segmentation/baselines/net10a_doersch.py"
] | [
"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom proj.archs.cluster.vgg import VGGNet\nfrom proj.archs.segmentation.net10a import SegmentationNet10aTrunk, \\\n SegmentationNet10a\nfrom proj.utils.segmentation.baselines.general import get_patches\n\n__all__ = [\"SegmentationNet10aDoersch\"]\n\n\nclass DoerschHead(nn.Module):\n def __init__(self, config):\n super(DoerschHead, self).__init__()\n self.patch_side = config.doersch_patch_side\n\n self.siamese_branch = nn.Sequential(\n nn.Conv2d(in_channels=SegmentationNet10a.cfg[-1][0], out_channels=1024,\n kernel_size=3, stride=1, padding=1, bias=False),\n nn.BatchNorm2d(1024),\n nn.ReLU(inplace=True)\n )\n\n self.joint = nn.Sequential(\n nn.Linear(2 * 1024 * self.patch_side * self.patch_side, 1024),\n nn.ReLU(True),\n nn.Dropout(),\n nn.Linear(1024, 9) # 9 gt positions, N, NE... NW.\n )\n\n def forward(self, patches1, patches2):\n patches1 = self.siamese_branch(patches1)\n patches2 = self.siamese_branch(patches2)\n\n ni, k, h, w = patches1.size()\n ni2, k2, h2, w2 = patches1.size()\n\n if not ((ni == ni2) and (k == k2) and (h == h2) and (w == w2) and \\\n (h == self.patch_side) and (w == self.patch_side)):\n print(ni, k, h, w)\n print(ni2, k2, h2, w2)\n assert (False)\n\n # flatten all but first dim\n patches1 = patches1.contiguous() # otherwise view may behave funny\n patches2 = patches2.contiguous()\n\n patches1 = patches1.view(patches1.size(0), -1)\n patches2 = patches2.view(patches2.size(0), -1)\n concatenated = torch.cat((patches1, patches2), dim=1)\n\n ni3, nf = concatenated.size()\n if not ((ni3 == ni) and (nf == (2 * 1024 * self.patch_side *\n self.patch_side))):\n print(ni, k, h, w)\n print(ni2, k2, h2, w2)\n print(patches1.size())\n print(patches2.size())\n print(ni3, nf)\n assert (False)\n\n return self.joint(concatenated)\n\n\nclass SegmentationNet10aDoersch(VGGNet):\n def __init__(self, config):\n super(SegmentationNet10aDoersch, self).__init__()\n\n self.patch_side = config.doersch_patch_side\n self.input_sz = config.input_sz\n self.features_sz = SegmentationNet10a.cfg[-1][0]\n\n print(\"SegmentationNet10aDoersch: %d %d %d\" % (self.patch_side,\n self.input_sz,\n self.features_sz))\n\n self.features = SegmentationNet10aTrunk(config, cfg=SegmentationNet10a.cfg)\n self.doersch_head = DoerschHead(config)\n\n self._initialize_weights()\n\n def forward(self, x, centre=None, other=None, penultimate=False):\n x = self.features(x)\n x = F.interpolate(x, size=self.input_sz, mode=\"bilinear\")\n\n if not penultimate:\n assert ((centre is not None) and (other is not None))\n patches1, patches2 = \\\n get_patches(x, centre, other, self.patch_side)\n\n # predicted position distribution, no softmax - using\n # torch.CrossEntropyLoss\n # shape: bn, 9\n x = self.doersch_head(patches1, patches2)\n\n return x\n"
] | [
[
"torch.nn.BatchNorm2d",
"torch.nn.Linear",
"torch.nn.Dropout",
"torch.nn.Conv2d",
"torch.nn.ReLU",
"torch.cat",
"torch.nn.functional.interpolate"
]
] |
sdc50/bokeh | [
"4f0a77c96f0045d380e5e9edb606a9f3c7832d9f"
] | [
"tests/unit/bokeh/core/test_properties.py"
] | [
"#-----------------------------------------------------------------------------\n# Copyright (c) 2012 - 2022, Anaconda, Inc., and Bokeh Contributors.\n# All rights reserved.\n#\n# The full license is in the file LICENSE.txt, distributed with this software.\n#-----------------------------------------------------------------------------\n\n#-----------------------------------------------------------------------------\n# Boilerplate\n#-----------------------------------------------------------------------------\nfrom __future__ import annotations # isort:skip\n\nimport pytest ; pytest\n\n#-----------------------------------------------------------------------------\n# Imports\n#-----------------------------------------------------------------------------\n\n# External imports\nimport numpy as np\n\n# Bokeh imports\nfrom bokeh._testing.util.api import verify_all\nfrom bokeh.core.has_props import HasProps\nfrom bokeh.core.properties import (\n Alias,\n Dict,\n Enum,\n Float,\n Instance,\n Int,\n List,\n Nullable,\n NumberSpec,\n Override,\n String,\n)\nfrom bokeh.models import Plot\n\n# Module under test\nimport bokeh.core.properties as bcp # isort:skip\n\n#-----------------------------------------------------------------------------\n# Setup\n#-----------------------------------------------------------------------------\n\nALL = (\n 'Alias',\n 'Alpha',\n 'AlphaSpec',\n 'Angle',\n 'AngleSpec',\n 'Any',\n 'AnyRef',\n 'Array',\n 'Auto',\n 'Base64String',\n 'Bool',\n 'Byte',\n 'Color',\n 'ColorHex',\n 'ColorSpec',\n 'ColumnData',\n 'Complex',\n 'DashPattern',\n 'DataSpec',\n 'Date',\n 'Datetime',\n 'Dict',\n 'DistanceSpec',\n 'Either',\n 'Enum',\n 'Factor',\n 'FactorSeq',\n 'Float',\n 'FontSize',\n 'FontSizeSpec',\n 'HatchPatternSpec',\n 'HatchPatternType',\n 'Image',\n 'Include',\n 'Instance',\n 'Int',\n 'Interval',\n 'JSON',\n 'List',\n 'MarkerSpec',\n 'MarkerType',\n 'MathString',\n 'MinMaxBounds',\n 'NonNegativeInt',\n 'NonNullable',\n 'Null',\n 'NullStringSpec',\n 'Nullable',\n 'NumberSpec',\n 'Override',\n 'PandasDataFrame',\n 'PandasGroupBy',\n 'Percent',\n 'PositiveInt',\n 'RGB',\n 'Readonly',\n 'Regex',\n 'RelativeDelta',\n 'RestrictedDict',\n 'Seq',\n 'Size',\n 'SizeSpec',\n 'String',\n 'StringSpec',\n 'Struct',\n 'TimeDelta',\n 'TextLike',\n 'Tuple',\n 'UnitsSpec',\n 'expr',\n 'field',\n 'validate',\n 'value',\n 'without_property_validation'\n)\n\n#-----------------------------------------------------------------------------\n# General API\n#----------------------------------------------------------------------------\n\n# TODO (bev) These tests should be moved to better places\n\n\nclass TestBasic:\n def test_simple_class(self) -> None:\n class Foo(HasProps):\n x = Int(12)\n y = String(\"hello\")\n z = List(Int, [1, 2, 3])\n zz = Dict(String, Int)\n s = Nullable(String(None))\n\n f = Foo()\n assert f.x == 12\n assert f.y == \"hello\"\n assert np.array_equal(np.array([1, 2, 3]), f.z)\n assert f.s is None\n\n\n assert {\"x\", \"y\", \"z\", \"zz\", \"s\"} == f.properties()\n with_defaults = f.properties_with_values(include_defaults=True)\n assert dict(x=12, y=\"hello\", z=[1,2,3], zz={}, s=None) == with_defaults\n without_defaults = f.properties_with_values(include_defaults=False)\n assert dict() == without_defaults\n\n f.x = 18\n assert f.x == 18\n\n f.y = \"bar\"\n assert f.y == \"bar\"\n\n without_defaults = f.properties_with_values(include_defaults=False)\n assert dict(x=18, y=\"bar\") == without_defaults\n\n f.z[0] = 100\n\n without_defaults = f.properties_with_values(include_defaults=False)\n assert dict(x=18, y=\"bar\", z=[100,2,3]) == without_defaults\n\n f.zz = {'a': 10}\n\n without_defaults = f.properties_with_values(include_defaults=False)\n assert dict(x=18, y=\"bar\", z=[100,2,3], zz={'a': 10}) == without_defaults\n\n def test_enum(self) -> None:\n class Foo(HasProps):\n x = Enum(\"blue\", \"red\", \"green\") # the first item is the default\n y = Enum(\"small\", \"medium\", \"large\", default=\"large\")\n\n f = Foo()\n assert f.x == \"blue\"\n assert f.y == \"large\"\n\n f.x = \"red\"\n assert f.x == \"red\"\n\n with pytest.raises(ValueError):\n f.x = \"yellow\"\n\n f.y = \"small\"\n assert f.y == \"small\"\n\n with pytest.raises(ValueError):\n f.y = \"yellow\"\n\n def test_inheritance(self) -> None:\n class Base(HasProps):\n x = Int(12)\n y = String(\"hello\")\n\n class Child(Base):\n z = Float(3.14)\n\n c = Child()\n assert frozenset(['x', 'y', 'z']) == frozenset(c.properties())\n assert c.y == \"hello\"\n\n def test_set(self) -> None:\n class Foo(HasProps):\n x = Int(12)\n y = Enum(\"red\", \"blue\", \"green\")\n z = String(\"blah\")\n\n f = Foo()\n assert f.x == 12\n assert f.y == \"red\"\n assert f.z == \"blah\"\n f.update(**dict(x=20, y=\"green\", z=\"hello\"))\n assert f.x == 20\n assert f.y == \"green\"\n assert f.z == \"hello\"\n with pytest.raises(ValueError):\n f.update(y=\"orange\")\n\n def test_accurate_properties_sets(self) -> None:\n class Base(HasProps):\n num = Int(12)\n container = List(String)\n child = Instance(HasProps)\n\n class Mixin(HasProps):\n mixin_num = Int(12)\n mixin_container = List(String)\n mixin_child = Instance(HasProps)\n\n class Sub(Base, Mixin):\n sub_num = Int(12)\n sub_container = List(String)\n sub_child = Instance(HasProps)\n\n b = Base()\n assert {\"child\"} == set(b.properties_with_refs())\n assert {\"num\", \"container\", \"child\"} == b.properties()\n\n m = Mixin()\n assert set(m.properties_with_refs()) == {\"mixin_child\"}\n assert m.properties() == {\"mixin_num\", \"mixin_container\", \"mixin_child\"}\n\n s = Sub()\n assert set(s.properties_with_refs()) == {\"child\", \"sub_child\", \"mixin_child\"}\n assert s.properties() == {\"num\", \"container\", \"child\", \"mixin_num\", \"mixin_container\", \"mixin_child\", \"sub_num\", \"sub_container\", \"sub_child\"}\n\n # verify caching\n assert s.properties_with_refs() is s.properties_with_refs()\n assert s.properties() is s.properties()\n\n def test_accurate_dataspecs(self) -> None:\n class Base(HasProps):\n num = NumberSpec(12)\n not_a_dataspec = Float(10)\n\n class Mixin(HasProps):\n mixin_num = NumberSpec(14)\n\n class Sub(Base, Mixin):\n sub_num = NumberSpec(16)\n\n base = Base()\n mixin = Mixin()\n sub = Sub()\n\n assert {\"num\"} == set(base.dataspecs())\n assert {\"mixin_num\"} == set(mixin.dataspecs())\n assert {\"num\", \"mixin_num\", \"sub_num\"} == set(sub.dataspecs())\n\n def test_not_serialized(self) -> None:\n class NotSerialized(HasProps):\n x = Int(12, serialized=False)\n y = String(\"hello\")\n\n o = NotSerialized()\n assert o.x == 12\n assert o.y == 'hello'\n\n # non-serialized props are still in the list of props\n assert 'x' in o.properties()\n assert 'y' in o.properties()\n\n # but they aren't in the dict of props with values, since their\n # values are not important (already included in other values,\n # as with the _units properties)\n assert 'x' not in o.properties_with_values(include_defaults=True)\n assert 'y' in o.properties_with_values(include_defaults=True)\n assert 'x' not in o.properties_with_values(include_defaults=False)\n assert 'y' not in o.properties_with_values(include_defaults=False)\n\n o.x = 42\n o.y = 'world'\n\n assert 'x' not in o.properties_with_values(include_defaults=True)\n assert 'y' in o.properties_with_values(include_defaults=True)\n assert 'x' not in o.properties_with_values(include_defaults=False)\n assert 'y' in o.properties_with_values(include_defaults=False)\n\n def test_readonly(self) -> None:\n class Readonly(HasProps):\n x = Int(12, readonly=True) # with default\n y = Nullable(Int(), readonly=True) # without default\n z = String(\"hello\")\n\n o = Readonly()\n assert o.x == 12\n assert o.y == None\n assert o.z == 'hello'\n\n # readonly props are still in the list of props\n assert 'x' in o.properties()\n assert 'y' in o.properties()\n assert 'z' in o.properties()\n\n assert 'x' in o.properties_with_values(include_defaults=True)\n assert 'y' in o.properties_with_values(include_defaults=True)\n assert 'z' in o.properties_with_values(include_defaults=True)\n\n assert 'x' not in o.properties_with_values(include_defaults=False)\n assert 'y' not in o.properties_with_values(include_defaults=False)\n assert 'z' not in o.properties_with_values(include_defaults=False)\n\n with pytest.raises(RuntimeError):\n o.x = 7\n with pytest.raises(RuntimeError):\n o.y = 7\n o.z = \"xyz\"\n\n assert o.x == 12\n assert o.y == None\n assert o.z == 'xyz'\n\n def test_include_defaults(self) -> None:\n class IncludeDefaultsTest(HasProps):\n x = Int(12)\n y = String(\"hello\")\n\n o = IncludeDefaultsTest()\n assert o.x == 12\n assert o.y == 'hello'\n\n assert 'x' in o.properties_with_values(include_defaults=True)\n assert 'y' in o.properties_with_values(include_defaults=True)\n assert 'x' not in o.properties_with_values(include_defaults=False)\n assert 'y' not in o.properties_with_values(include_defaults=False)\n\n o.x = 42\n o.y = 'world'\n\n assert 'x' in o.properties_with_values(include_defaults=True)\n assert 'y' in o.properties_with_values(include_defaults=True)\n assert 'x' in o.properties_with_values(include_defaults=False)\n assert 'y' in o.properties_with_values(include_defaults=False)\n\n def test_include_defaults_with_kwargs(self) -> None:\n class IncludeDefaultsKwargsTest(HasProps):\n x = Int(12)\n y = String(\"hello\")\n\n o = IncludeDefaultsKwargsTest(x=14, y=\"world\")\n assert o.x == 14\n assert o.y == 'world'\n\n assert 'x' in o.properties_with_values(include_defaults=True)\n assert 'y' in o.properties_with_values(include_defaults=True)\n assert 'x' in o.properties_with_values(include_defaults=False)\n assert 'y' in o.properties_with_values(include_defaults=False)\n\n def test_include_defaults_set_to_same(self) -> None:\n class IncludeDefaultsSetToSameTest(HasProps):\n x = Int(12)\n y = String(\"hello\")\n\n o = IncludeDefaultsSetToSameTest()\n\n assert 'x' in o.properties_with_values(include_defaults=True)\n assert 'y' in o.properties_with_values(include_defaults=True)\n assert 'x' not in o.properties_with_values(include_defaults=False)\n assert 'y' not in o.properties_with_values(include_defaults=False)\n\n # this should no-op\n o.x = 12\n o.y = \"hello\"\n\n assert 'x' in o.properties_with_values(include_defaults=True)\n assert 'y' in o.properties_with_values(include_defaults=True)\n assert 'x' not in o.properties_with_values(include_defaults=False)\n assert 'y' not in o.properties_with_values(include_defaults=False)\n\n def test_override_defaults(self) -> None:\n class FooBase(HasProps):\n x = Int(12)\n\n class FooSub(FooBase):\n x = Override(default=14)\n\n def func_default():\n return 16\n\n class FooSubSub(FooBase):\n x = Override(default=func_default)\n\n f_base = FooBase()\n f_sub = FooSub()\n f_sub_sub = FooSubSub()\n\n assert f_base.x == 12\n assert f_sub.x == 14\n assert f_sub_sub.x == 16\n\n assert 12 == f_base.properties_with_values(include_defaults=True)['x']\n assert 14 == f_sub.properties_with_values(include_defaults=True)['x']\n assert 16 == f_sub_sub.properties_with_values(include_defaults=True)['x']\n\n assert 'x' not in f_base.properties_with_values(include_defaults=False)\n assert 'x' not in f_sub.properties_with_values(include_defaults=False)\n assert 'x' in f_sub_sub.properties_with_values(include_defaults=False)\n\n # def test_kwargs_init(self) -> None:\n # class Foo(HasProps):\n # x = String\n # y = Int\n # z = Float\n # f = Foo(x = \"hello\", y = 14)\n # assert f.x == \"hello\"\n # assert f.y == 14\n\n # with pytest.raises(TypeError):\n # # This should raise a TypeError: object.__init__() takes no parameters\n # g = Foo(z = 3.14, q = \"blah\")\n\nclass Foo(HasProps):\n pass\n\nclass Bar(HasProps):\n pass\n\nclass Baz(HasProps):\n pass\n\ndef test_HasProps_equals() -> None:\n class Foo(HasProps):\n x = Int(12)\n y = String(\"hello\")\n z = List(Int, [1,2,3])\n\n class FooUnrelated(HasProps):\n x = Int(12)\n y = String(\"hello\")\n z = List(Int, [1,2,3])\n\n v = Foo().equals(Foo())\n assert v is True\n\n v = Foo(x=1).equals(Foo(x=1))\n assert v is True\n\n v = Foo(x=1).equals(Foo(x=2))\n assert v is False\n\n v = Foo(x=1).equals(1)\n assert v is False\n\n v = Foo().equals(FooUnrelated())\n assert v is False\n\ndef test_HasProps_clone() -> None:\n p1 = Plot(width=1000)\n c1 = p1.properties_with_values(include_defaults=False)\n p2 = p1._clone()\n c2 = p2.properties_with_values(include_defaults=False)\n assert c1 == c2\n\ndef test_Alias() -> None:\n class Foo(HasProps):\n x = Int(12)\n ax = Alias('x')\n\n f = Foo(x=10)\n assert f.x == 10\n assert f.ax == 10\n\n f.x = 20\n assert f.x == 20\n assert f.ax == 20\n\n f.ax = 30\n assert f.x == 30\n assert f.ax == 30\n\n#-----------------------------------------------------------------------------\n# Dev API\n#-----------------------------------------------------------------------------\n\n#-----------------------------------------------------------------------------\n# Private API\n#-----------------------------------------------------------------------------\n\n#-----------------------------------------------------------------------------\n# Code\n#-----------------------------------------------------------------------------\n\nTest___all__ = verify_all(bcp, ALL)\n"
] | [
[
"numpy.array"
]
] |
omshinde/dfc2019 | [
"2e48cc8442c2c33aef7e1a0de27041709ef160e8"
] | [
"track2/icnet/memory_saving_gradients.py"
] | [
"from toposort import toposort\nimport contextlib\nimport numpy as np\nimport tensorflow as tf\nimport tensorflow.contrib.graph_editor as ge\nimport time\nimport sys\nsys.setrecursionlimit(10000)\n# refers back to current module if we decide to split helpers out\nutil = sys.modules[__name__]\n\n# getting rid of \"WARNING:tensorflow:VARIABLES collection name is deprecated\"\nsetattr(tf.GraphKeys, \"VARIABLES\", \"variables\")\n\n# save original gradients since tf.gradient could be monkey-patched to point\n# to our version\nfrom tensorflow.python.ops import gradients as tf_gradients_lib\ntf_gradients = tf_gradients_lib.gradients\n\nMIN_CHECKPOINT_NODE_SIZE=1024 # use lower value during testing\n\n# specific versions we can use to do process-wide replacement of tf.gradients\ndef gradients_speed(ys, xs, grad_ys=None, **kwargs):\n return gradients(ys, xs, grad_ys, checkpoints='speed', **kwargs)\n\ndef gradients_memory(ys, xs, grad_ys=None, **kwargs):\n return gradients(ys, xs, grad_ys, checkpoints='memory', **kwargs)\n \ndef gradients_collection(ys, xs, grad_ys=None, **kwargs):\n return gradients(ys, xs, grad_ys, checkpoints='collection', **kwargs)\n\ndef gradients(ys, xs, grad_ys=None, checkpoints='collection', **kwargs):\n '''\n Authors: Tim Salimans & Yaroslav Bulatov\n\n memory efficient gradient implementation inspired by \"Training Deep Nets with Sublinear Memory Cost\"\n by Chen et al. 2016 (https://arxiv.org/abs/1604.06174)\n\n ys,xs,grad_ys,kwargs are the arguments to standard tensorflow tf.gradients\n (https://www.tensorflow.org/versions/r0.12/api_docs/python/train.html#gradients)\n\n 'checkpoints' can either be\n - a list consisting of tensors from the forward pass of the neural net\n that we should re-use when calculating the gradients in the backward pass\n all other tensors that do not appear in this list will be re-computed\n - a string specifying how this list should be determined. currently we support\n - 'speed': checkpoint all outputs of convolutions and matmuls. these ops are usually the most expensive,\n so checkpointing them maximizes the running speed\n (this is a good option if nonlinearities, concats, batchnorms, etc are taking up a lot of memory)\n - 'memory': try to minimize the memory usage\n (currently using a very simple strategy that identifies a number of bottleneck tensors in the graph to checkpoint)\n - 'collection': look for a tensorflow collection named 'checkpoints', which holds the tensors to checkpoint\n '''\n\n # print(\"Calling memsaving gradients with\", checkpoints)\n if not isinstance(ys,list):\n ys = [ys]\n if not isinstance(xs,list):\n xs = [xs]\n\n bwd_ops = ge.get_backward_walk_ops([y.op for y in ys],\n inclusive=True)\n\n debug_print(\"bwd_ops: %s\", bwd_ops)\n \n # forward ops are all ops that are candidates for recomputation\n fwd_ops = ge.get_forward_walk_ops([x.op for x in xs],\n inclusive=True,\n within_ops=bwd_ops)\n debug_print(\"fwd_ops: %s\", fwd_ops)\n \n # exclude ops with no inputs\n fwd_ops = [op for op in fwd_ops if op.inputs]\n\n # don't recompute xs, remove variables\n xs_ops = _to_ops(xs)\n fwd_ops = [op for op in fwd_ops if not op in xs_ops]\n fwd_ops = [op for op in fwd_ops if not '/assign' in op.name]\n fwd_ops = [op for op in fwd_ops if not '/Assign' in op.name]\n fwd_ops = [op for op in fwd_ops if not '/read' in op.name]\n ts_all = ge.filter_ts(fwd_ops, True) # get the tensors\n ts_all = [t for t in ts_all if '/read' not in t.name]\n ts_all = set(ts_all) - set(xs) - set(ys)\n\n # construct list of tensors to checkpoint during forward pass, if not\n # given as input\n if type(checkpoints) is not list:\n if checkpoints == 'collection':\n checkpoints = tf.get_collection('checkpoints')\n \n elif checkpoints == 'speed':\n # checkpoint all expensive ops to maximize running speed\n checkpoints = ge.filter_ts_from_regex(fwd_ops, 'conv2d|Conv|MatMul')\n \n elif checkpoints == 'memory':\n\n # remove very small tensors and some weird ops\n def fixdims(t): # tf.Dimension values are not compatible with int, convert manually\n try:\n return [int(e if e.value is not None else 64) for e in t]\n except:\n return [0] # unknown shape\n ts_all = [t for t in ts_all if np.prod(fixdims(t.shape)) > MIN_CHECKPOINT_NODE_SIZE]\n ts_all = [t for t in ts_all if 'L2Loss' not in t.name]\n ts_all = [t for t in ts_all if 'entropy' not in t.name]\n ts_all = [t for t in ts_all if 'FusedBatchNorm' not in t.name]\n ts_all = [t for t in ts_all if 'Switch' not in t.name]\n ts_all = [t for t in ts_all if 'dropout' not in t.name]\n # DV: FP16_FIX - need to add 'Cast' layer here to make it work for FP16\n ts_all = [t for t in ts_all if 'Cast' not in t.name]\n\n # filter out all tensors that are inputs of the backward graph\n with util.capture_ops() as bwd_ops:\n tf_gradients(ys, xs, grad_ys, **kwargs)\n\n bwd_inputs = [t for op in bwd_ops for t in op.inputs]\n # list of tensors in forward graph that is in input to bwd graph\n ts_filtered = list(set(bwd_inputs).intersection(ts_all))\n debug_print(\"Using tensors %s\", ts_filtered)\n\n # try two slightly different ways of getting bottlenecks tensors\n # to checkpoint\n for ts in [ts_filtered, ts_all]:\n\n # get all bottlenecks in the graph\n bottleneck_ts = []\n for t in ts:\n b = set(ge.get_backward_walk_ops(t.op, inclusive=True, within_ops=fwd_ops))\n f = set(ge.get_forward_walk_ops(t.op, inclusive=False, within_ops=fwd_ops))\n # check that there are not shortcuts\n b_inp = set([inp for op in b for inp in op.inputs]).intersection(ts_all)\n f_inp = set([inp for op in f for inp in op.inputs]).intersection(ts_all)\n if not set(b_inp).intersection(f_inp) and len(b_inp)+len(f_inp) >= len(ts_all):\n bottleneck_ts.append(t) # we have a bottleneck!\n else:\n debug_print(\"Rejected bottleneck candidate and ops %s\", [t] + list(set(ts_all) - set(b_inp) - set(f_inp)))\n\n # success? or try again without filtering?\n if len(bottleneck_ts) >= np.sqrt(len(ts_filtered)): # yes, enough bottlenecks found!\n break\n\n if not bottleneck_ts:\n raise Exception('unable to find bottleneck tensors! please provide checkpoint nodes manually, or use checkpoints=\"speed\".')\n\n # sort the bottlenecks\n bottlenecks_sorted_lists = tf_toposort(bottleneck_ts, within_ops=fwd_ops)\n sorted_bottlenecks = [t for ts in bottlenecks_sorted_lists for t in ts]\n\n # save an approximately optimal number ~ sqrt(N)\n N = len(ts_filtered)\n if len(bottleneck_ts) <= np.ceil(np.sqrt(N)):\n checkpoints = sorted_bottlenecks\n else:\n step = int(np.ceil(len(bottleneck_ts) / np.sqrt(N)))\n checkpoints = sorted_bottlenecks[step::step]\n \n else:\n raise Exception('%s is unsupported input for \"checkpoints\"' % (checkpoints,))\n\n checkpoints = list(set(checkpoints).intersection(ts_all))\n\n # at this point automatic selection happened and checkpoints is list of nodes\n assert isinstance(checkpoints, list)\n\n debug_print(\"Checkpoint nodes used: %s\", checkpoints)\n # better error handling of special cases\n # xs are already handled as checkpoint nodes, so no need to include them\n xs_intersect_checkpoints = set(xs).intersection(set(checkpoints))\n if xs_intersect_checkpoints:\n debug_print(\"Warning, some input nodes are also checkpoint nodes: %s\",\n xs_intersect_checkpoints)\n ys_intersect_checkpoints = set(ys).intersection(set(checkpoints))\n debug_print(\"ys: %s, checkpoints: %s, intersect: %s\", ys, checkpoints,\n ys_intersect_checkpoints)\n # saving an output node (ys) gives no benefit in memory while creating\n # new edge cases, exclude them\n if ys_intersect_checkpoints:\n debug_print(\"Warning, some output nodes are also checkpoints nodes: %s\",\n format_ops(ys_intersect_checkpoints))\n\n # remove initial and terminal nodes from checkpoints list if present\n checkpoints = list(set(checkpoints) - set(ys) - set(xs))\n \n # check that we have some nodes to checkpoint\n if not checkpoints:\n raise Exception('no checkpoints nodes found or given as input! ')\n\n # disconnect dependencies between checkpointed tensors\n checkpoints_disconnected = {}\n for x in checkpoints:\n if x.op and x.op.name is not None:\n grad_node = tf.stop_gradient(x, name=x.op.name+\"_sg\")\n else:\n grad_node = tf.stop_gradient(x)\n checkpoints_disconnected[x] = grad_node\n\n # partial derivatives to the checkpointed tensors and xs\n ops_to_copy = fast_backward_ops(seed_ops=[y.op for y in ys],\n stop_at_ts=checkpoints, within_ops=fwd_ops)\n debug_print(\"Found %s ops to copy within fwd_ops %s, seed %s, stop_at %s\",\n len(ops_to_copy), fwd_ops, [r.op for r in ys], checkpoints)\n debug_print(\"ops_to_copy = %s\", ops_to_copy)\n debug_print(\"Processing list %s\", ys)\n copied_sgv, info = ge.copy_with_input_replacements(ge.sgv(ops_to_copy), {})\n for origin_op, op in info._transformed_ops.items():\n op._set_device(origin_op.node_def.device)\n copied_ops = info._transformed_ops.values()\n debug_print(\"Copied %s to %s\", ops_to_copy, copied_ops)\n ge.reroute_ts(checkpoints_disconnected.values(), checkpoints_disconnected.keys(), can_modify=copied_ops)\n debug_print(\"Rewired %s in place of %s restricted to %s\",\n checkpoints_disconnected.values(), checkpoints_disconnected.keys(), copied_ops)\n\n # get gradients with respect to current boundary + original x's\n copied_ys = [info._transformed_ops[y.op]._outputs[0] for y in ys]\n boundary = list(checkpoints_disconnected.values())\n dv = tf_gradients(ys=copied_ys, xs=boundary+xs, grad_ys=grad_ys, **kwargs)\n debug_print(\"Got gradients %s\", dv)\n debug_print(\"for %s\", copied_ys)\n debug_print(\"with respect to %s\", boundary+xs)\n\n inputs_to_do_before = [y.op for y in ys]\n if grad_ys is not None:\n inputs_to_do_before += grad_ys\n wait_to_do_ops = list(copied_ops) + [g.op for g in dv if g is not None]\n my_add_control_inputs(wait_to_do_ops, inputs_to_do_before)\n\n # partial derivatives to the checkpointed nodes\n # dictionary of \"node: backprop\" for nodes in the boundary\n d_checkpoints = {r: dr for r,dr in zip(checkpoints_disconnected.keys(),\n dv[:len(checkpoints_disconnected)])}\n # partial derivatives to xs (usually the params of the neural net)\n d_xs = dv[len(checkpoints_disconnected):]\n\n # incorporate derivatives flowing through the checkpointed nodes\n checkpoints_sorted_lists = tf_toposort(checkpoints, within_ops=fwd_ops)\n for ts in checkpoints_sorted_lists[::-1]:\n debug_print(\"Processing list %s\", ts)\n checkpoints_other = [r for r in checkpoints if r not in ts]\n checkpoints_disconnected_other = [checkpoints_disconnected[r] for r in checkpoints_other]\n\n # copy part of the graph below current checkpoint node, stopping at\n # other checkpoints nodes\n ops_to_copy = fast_backward_ops(within_ops=fwd_ops, seed_ops=[r.op for r in ts], stop_at_ts=checkpoints_other)\n debug_print(\"Found %s ops to copy within %s, seed %s, stop_at %s\",\n len(ops_to_copy), fwd_ops, [r.op for r in ts],\n checkpoints_other)\n debug_print(\"ops_to_copy = %s\", ops_to_copy)\n if not ops_to_copy: # we're done!\n break\n copied_sgv, info = ge.copy_with_input_replacements(ge.sgv(ops_to_copy), {})\n for origin_op, op in info._transformed_ops.items():\n op._set_device(origin_op.node_def.device)\n copied_ops = info._transformed_ops.values()\n debug_print(\"Copied %s to %s\", ops_to_copy, copied_ops)\n ge.reroute_ts(checkpoints_disconnected_other, checkpoints_other, can_modify=copied_ops)\n debug_print(\"Rewired %s in place of %s restricted to %s\",\n checkpoints_disconnected_other, checkpoints_other, copied_ops)\n\n # gradient flowing through the checkpointed node\n boundary = [info._transformed_ops[r.op]._outputs[0] for r in ts]\n substitute_backprops = [d_checkpoints[r] for r in ts]\n dv = tf_gradients(boundary,\n checkpoints_disconnected_other+xs,\n grad_ys=substitute_backprops, **kwargs)\n debug_print(\"Got gradients %s\", dv)\n debug_print(\"for %s\", boundary)\n debug_print(\"with respect to %s\", checkpoints_disconnected_other+xs)\n debug_print(\"with boundary backprop substitutions %s\", substitute_backprops)\n\n inputs_to_do_before = [d_checkpoints[r].op for r in ts]\n wait_to_do_ops = list(copied_ops) + [g.op for g in dv if g is not None]\n my_add_control_inputs(wait_to_do_ops, inputs_to_do_before)\n\n # partial derivatives to the checkpointed nodes\n for r, dr in zip(checkpoints_other, dv[:len(checkpoints_other)]):\n if dr is not None:\n if d_checkpoints[r] is None:\n d_checkpoints[r] = dr\n else:\n d_checkpoints[r] += dr\n def _unsparsify(x):\n if not isinstance(x, tf.IndexedSlices):\n return x\n assert x.dense_shape is not None, \"memory_saving_gradients encountered sparse gradients of unknown shape\"\n indices = x.indices\n while indices.shape.ndims < x.values.shape.ndims:\n indices = tf.expand_dims(indices, -1)\n return tf.scatter_nd(indices, x.values, x.dense_shape)\n\n # partial derivatives to xs (usually the params of the neural net)\n d_xs_new = dv[len(checkpoints_other):]\n for j in range(len(xs)):\n if d_xs_new[j] is not None:\n if d_xs[j] is None:\n d_xs[j] = _unsparsify(d_xs_new[j])\n else:\n d_xs[j] += _unsparsify(d_xs_new[j])\n\n\n return d_xs\n\ndef tf_toposort(ts, within_ops=None):\n all_ops = ge.get_forward_walk_ops([x.op for x in ts], within_ops=within_ops)\n\n deps = {}\n for op in all_ops:\n for o in op.outputs:\n deps[o] = set(op.inputs)\n sorted_ts = toposort(deps)\n\n # only keep the tensors from our original list\n ts_sorted_lists = []\n for l in sorted_ts:\n keep = list(set(l).intersection(ts))\n if keep:\n ts_sorted_lists.append(keep)\n\n return ts_sorted_lists\n\ndef fast_backward_ops(within_ops, seed_ops, stop_at_ts):\n bwd_ops = set(ge.get_backward_walk_ops(seed_ops, stop_at_ts=stop_at_ts))\n ops = bwd_ops.intersection(within_ops).difference([t.op for t in stop_at_ts])\n return list(ops)\n\n@contextlib.contextmanager\ndef capture_ops():\n \"\"\"Decorator to capture ops created in the block.\n with capture_ops() as ops:\n # create some ops\n print(ops) # => prints ops created.\n \"\"\"\n\n micros = int(time.time()*10**6)\n scope_name = str(micros)\n op_list = []\n with tf.name_scope(scope_name):\n yield op_list\n\n g = tf.get_default_graph()\n op_list.extend(ge.select_ops(scope_name+\"/.*\", graph=g))\n\ndef _to_op(tensor_or_op):\n if hasattr(tensor_or_op, \"op\"):\n return tensor_or_op.op\n return tensor_or_op\n\ndef _to_ops(iterable):\n if not _is_iterable(iterable):\n return iterable\n return [_to_op(i) for i in iterable]\n\ndef _is_iterable(o):\n try:\n _ = iter(o)\n except Exception:\n return False\n return True\n\nDEBUG_LOGGING=False\ndef debug_print(s, *args):\n \"\"\"Like logger.log, but also replaces all TensorFlow ops/tensors with their\n names. Sensitive to value of DEBUG_LOGGING, see enable_debug/disable_debug\n\n Usage:\n debug_print(\"see tensors %s for %s\", tensorlist, [1,2,3])\n \"\"\"\n\n if DEBUG_LOGGING:\n formatted_args = [format_ops(arg) for arg in args]\n print(\"DEBUG \"+s % tuple(formatted_args))\n\ndef format_ops(ops, sort_outputs=True):\n \"\"\"Helper method for printing ops. Converts Tensor/Operation op to op.name,\n rest to str(op).\"\"\"\n \n if hasattr(ops, '__iter__') and not isinstance(ops, str):\n l = [(op.name if hasattr(op, \"name\") else str(op)) for op in ops]\n if sort_outputs:\n return sorted(l)\n return l\n else:\n return ops.name if hasattr(ops, \"name\") else str(ops)\n\ndef my_add_control_inputs(wait_to_do_ops, inputs_to_do_before):\n for op in wait_to_do_ops:\n ci = [i for i in inputs_to_do_before if op.control_inputs is None or i not in op.control_inputs]\n ge.add_control_inputs(op, ci)\n"
] | [
[
"numpy.sqrt",
"tensorflow.contrib.graph_editor.sgv",
"tensorflow.scatter_nd",
"tensorflow.contrib.graph_editor.filter_ts",
"tensorflow.get_collection",
"tensorflow.contrib.graph_editor.get_forward_walk_ops",
"tensorflow.contrib.graph_editor.reroute_ts",
"tensorflow.expand_dims",
"tensorflow.stop_gradient",
"tensorflow.contrib.graph_editor.filter_ts_from_regex",
"tensorflow.contrib.graph_editor.get_backward_walk_ops",
"tensorflow.name_scope",
"tensorflow.get_default_graph",
"tensorflow.contrib.graph_editor.add_control_inputs",
"tensorflow.contrib.graph_editor.select_ops"
]
] |
katekaseth/Project_One | [
"0eae5928b92ff99cc27815b73acc751d0348fca8"
] | [
"server/db/Data/data_cleaner.py"
] | [
"import pandas as pd\nimport re\n\ndata = pd.read_csv(\"BIPMetadata_current.csv\")\n\ndef format_date(date_column):\n # formatting the date data to display as yyyy-mm-dd\n new_dates = []\n for date in date_column:\n month = date[0:date.find('/')]\n date = date[date.find('/')+1:]\n day = date[0:date.find('/')]\n year = date[date.find('/')+1:]\n\n if (len(month) == 1):\n month = \"0\" + month\n if (len(day) == 1):\n day = \"0\" + day\n if (len(year) == 2):\n year = \"20\" + year\n newDate = year + \"-\" + month + \"-\" + day\n \n print(newDate)\n new_dates.append(newDate)\n return new_dates\n\n\ndef truncate(column, length):\n # truncates given column to given length and returns new column\n new_d = []\n for d in column:\n if (len(d) > length):\n d = d[0:length]\n new_d.append(d)\n return new_d\n\n\n# source: https://stackoverflow.com/questions/9662346/python-code-to-remove-html-tags-from-a-string\ndef cleanhtml(column):\n new_desc = []\n for d in column:\n cleanr = re.compile('<.*?>|&([a-z0-9]+|#[0-9]{1,6}|#x[0-9a-f]{1,6});')\n cleantext = re.sub(cleanr, '', d)\n new_desc.append(' '.join(cleantext.split()))\n return new_desc\n\n\ndef remove_spaces(column):\n new_sql = []\n for d in column:\n new_sql.append(' '.join(d.split()))\n return new_sql\n\n\nnew_created = format_date(data[\"created\"])\nprint(\"UPDATAED\")\nnew_updated = format_date(data[\"updated\"])\nnew_query = remove_spaces(data[\"sql_query\"])\nnew_query = truncate(new_query, 5000)\nnew_description = truncate(data[\"description\"], 500)\nnew_description = cleanhtml(new_description)\n\n\ndata[\"created\"] = new_created\ndata[\"updated\"] = new_updated\ndata[\"sql_query\"] = new_query\ndata[\"description\"] = new_description\n\n\ndata.to_csv(\"BIPMetadata_cleaned.csv\", index=False)"
] | [
[
"pandas.read_csv"
]
] |
kirtanp/MAMO-fair | [
"fd0fc39383f11a9e1ec401233b89c2399860fb94"
] | [
"utils/utilities.py"
] | [
"#!/usr/bin/env python\nimport numpy as np\nfrom collections import defaultdict\nimport itertools\nfrom sklearn.metrics import confusion_matrix\n\ndef print_data_stats(sens_attr, class_labels):\n \"\"\"Print a few numbers about the data: Total number of points, number of\n protected examples and unprotected examples, and number of protected points\n in positive class, and number of unprotected points in positive class.\n\n Parameters\n -----------\n sens_attr: numpy array\n The sensitive attribute of shape=(number_points,).\n class_labels: nunmp\n The class labels of shape=(number_points,).\n \"\"\"\n non_prot_all = sum(sens_attr == 1.0) # non-protected group\n prot_all = len(sens_attr) - non_prot_all # protected group\n non_prot_pos = sum(class_labels[sens_attr == 1.0] == 1.0) # non_protected in positive class\n prot_pos = sum(class_labels == 1.0) - non_prot_pos # protected in positive class\n frac_non_prot_pos = float(non_prot_pos) / float(non_prot_all)\n frac_prot_pos = float(prot_pos) / float(prot_all)\n print\n print(\"Total data points: %d\" % len(sens_attr))\n print(\"# non-protected examples: %d\" % non_prot_all)\n print(\"# protected examples: %d\" % prot_all)\n print(\"# non-protected examples in positive class: %d (%0.1f%%)\" % (non_prot_pos, non_prot_pos * 100.0 / non_prot_all))\n print(\"# protected examples in positive class: %d (%0.1f%%)\" % (prot_pos, prot_pos * 100.0 / prot_all))\n\ndef get_positive_rate(y_predicted, y_true):\n \"\"\"Compute the positive rate for given predictions of the class label.\n\n Parameters\n ----------\n y_predicted: numpy array\n The predicted class labels of shape=(number_points,).\n y_true: numpy array\n The true class labels of shape=(number_points,).\n\n Returns\n ---------\n pr: float\n The positive rate.\n \"\"\"\n tn, fp, fn, tp = confusion_matrix(y_true, y_predicted).ravel()\n pr = (tp+fp) / (tp+fp+tn+fn)\n return pr\n\ndef get_true_positive_rate(y_predicted, y_true):\n \"\"\"Compute the true positive rate for given predictions of the class label.\n\n Parameters\n ----------\n y_predicted: numpy array\n The predicted class labels of shape=(number_points,).\n y_true: numpy array\n The true class labels of shape=(number_points,).\n\n Returns\n ---------\n tpr: float\n The true positive rate.\n \"\"\"\n tn, fp, fn, tp = confusion_matrix(y_true, y_predicted).ravel()\n tpr = tp / (tp+fn)\n return tpr\n\ndef compute_fairness_measures(y_predicted, y_true, sens_attr):\n \"\"\"Compute value of demographic parity and equality of opportunity for given predictions.\n\n Parameters\n ----------\n y_predicted: numpy array\n The predicted class labels of shape=(number_points,).\n y_true: numpy array\n The true class labels of shape=(number_points,).\n sens_attr: numpy array\n The sensitive labels of shape=(number_points,).\n\n Returns\n ----------\n DDP: float\n The difference of demographic parity.\n DEO: float\n The difference of equality of opportunity.\n \"\"\"\n positive_rate_prot = get_positive_rate(y_predicted[sens_attr==-1], y_true[sens_attr==-1])\n positive_rate_unprot = get_positive_rate(y_predicted[sens_attr==1], y_true[sens_attr==1])\n true_positive_rate_prot = get_true_positive_rate(y_predicted[sens_attr==-1], y_true[sens_attr==-1])\n true_positive_rate_unprot = get_true_positive_rate(y_predicted[sens_attr==1], y_true[sens_attr==1])\n DDP = positive_rate_unprot - positive_rate_prot\n DEO = true_positive_rate_unprot - true_positive_rate_prot\n rates = [positive_rate_unprot, positive_rate_prot]\n DP = np.min(rates)/(np.max(rates) + 1e-5)\n\n return DDP, DEO, DP\n\ndef get_accuracy(y_true, y_predicted):\n \"\"\"Compute the accuracy for given predicted class labels.\n\n Parameters\n ----------\n y_true: numpy array\n The true class labels of shape=(number_points,).\n y_predicted: numpy array\n The predicted class labels of shape=(number_points,).\n\n Returns\n ---------\n accuracy: float\n The accuracy of the predictions.\n \"\"\"\n correct_answers = (y_predicted == y_true).astype(int) # will have 1 when the prediction and the actual label match\n accuracy = float(sum(correct_answers)) / float(len(correct_answers))\n return accuracy\n"
] | [
[
"sklearn.metrics.confusion_matrix",
"numpy.max",
"numpy.min"
]
] |
changwoolee/gradient-rescaling-attention-model | [
"2f1d819e8cee03a9d06312e700a5c474bed48c70"
] | [
"util.py"
] | [
"import tensorflow as tf\n\nfrom contextlib import contextmanager\nfrom PIL import Image\n\nfrom keras import backend as K\nfrom keras.utils.data_utils import OrderedEnqueuer\n\ndef heteroscedastic_loss(attention=False, \n\t\t\t\t\t\t\t\t\t\t\t\t block_attention_gradient=False, \n\t\t\t\t\t\t\t\t\t\t\t\t mode='l2'):\n\t''' Heteroscedastic loss.'''\n\n\tdef het_loss(y_true, y_pred):\n\t\ty_mean = y_pred[:,:,:,:3]\n\t\ty_logvar = y_pred[:,:,:,3:]\n\t\ty_logvar = K.clip(y_logvar, -10, 10)\n\t\tif mode == 'l2':\n\t\t\teuclidian_loss = K.square(y_true/127.5 - y_mean/127.5)\n\t\telif mode == 'l1':\n\t\t\teuclidian_loss = K.abs(y_true/127.5 - y_mean/127.5)\n\n\t\tloss = tf.exp(-y_logvar)*euclidian_loss + y_logvar\n\t\tloss *= 127.5\n\t\tif mode == 'l2':\n\t\t\tloss *= 127.5\n\t\t\t\n\n\t\tif attention:\n\t\t\tattention_mask = K.sigmoid(y_logvar) \n\n\t\t\tif block_attention_gradient:\n\t\t\t\tattention_mask = K.stop_gradient(attention_mask)\n\n\t\t\tloss = attention_mask * loss\n\t\treturn K.mean(loss, axis=-1)\n\n\treturn het_loss\n\n\n\n\n\n\n\n\n@contextmanager\ndef concurrent_generator(sequence, num_workers=8, max_queue_size=32, use_multiprocessing=False):\n\tenqueuer = OrderedEnqueuer(sequence, use_multiprocessing=use_multiprocessing)\n\ttry:\n\t\tenqueuer.start(workers=num_workers, max_queue_size=max_queue_size)\n\t\tyield enqueuer.get()\n\tfinally:\n\t\tenqueuer.stop()\n\n\ndef init_session(gpu_memory_fraction):\n\tK.tensorflow_backend.set_session(tensorflow_session(gpu_memory_fraction=gpu_memory_fraction))\n\n\ndef reset_session(gpu_memory_fraction):\n\tK.clear_session()\n\tinit_session(gpu_memory_fraction)\n\n\ndef tensorflow_session(gpu_memory_fraction):\n\tconfig = tf.ConfigProto()\n\tconfig.gpu_options.allow_growth = True\n\tconfig.gpu_options.per_process_gpu_memory_fraction = gpu_memory_fraction\n\treturn tf.Session(config=config)\n\n\ndef load_image(path):\n\timg = Image.open(path)\n\tif img.mode != 'RGB':\n\t\timg = img.convert('RGB')\n\treturn img\n"
] | [
[
"tensorflow.exp",
"tensorflow.ConfigProto",
"tensorflow.Session"
]
] |
jie311/vega | [
"1bba6100ead802697e691403b951e6652a99ccae"
] | [
"vega/algorithms/nas/fis/autogate_s2_trainer_callback.py"
] | [
"# -*- coding: utf-8 -*-\r\n\r\n# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.\r\n# This program is free software; you can redistribute it and/or modify\r\n# it under the terms of the MIT License.\r\n# This program is distributed in the hope that it will be useful,\r\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\r\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\r\n# MIT License for more details.\r\n\"\"\"AutoGate top-k version Stage2 TrainerCallback.\"\"\"\r\n\r\nimport logging\r\nimport pandas as pd\r\nfrom vega.common import ClassFactory, ClassType\r\nfrom vega.common import FileOps\r\nfrom vega.algorithms.nas.fis.ctr_trainer_callback import CtrTrainerCallback\r\nfrom vega.core.pipeline.conf import ModelConfig\r\n\r\nlogger = logging.getLogger(__name__)\r\n\r\n\r\n@ClassFactory.register(ClassType.CALLBACK)\r\nclass AutoGateS2TrainerCallback(CtrTrainerCallback):\r\n \"\"\"AutoGateS2TrainerCallback module.\"\"\"\r\n\r\n def __init__(self):\r\n \"\"\"Construct AutoGateS2TrainerCallback class.\"\"\"\r\n super(CtrTrainerCallback, self).__init__()\r\n self.sieve_board = pd.DataFrame(\r\n columns=['selected_feature_pairs', 'score'])\r\n self.selected_pairs = list()\r\n\r\n logging.info(\"init autogate s2 trainer callback\")\r\n\r\n def before_train(self, logs=None):\r\n \"\"\"Call before_train of the managed callbacks.\"\"\"\r\n super().before_train(logs)\r\n\r\n \"\"\"Be called before the training process.\"\"\"\r\n hpo_result = FileOps.load_pickle(FileOps.join_path(\r\n self.trainer.local_output_path, 'best_config.pickle'))\r\n logging.info(\"loading stage1_hpo_result \\n{}\".format(hpo_result))\r\n\r\n feature_interaction_score = hpo_result['feature_interaction_score']\r\n print('feature_interaction_score:', feature_interaction_score)\r\n sorted_pairs = sorted(feature_interaction_score.items(),\r\n key=lambda x: abs(x[1]), reverse=True)\r\n\r\n if ModelConfig.model_desc:\r\n fis_ratio = ModelConfig.model_desc[\"custom\"][\"fis_ratio\"]\r\n else:\r\n fis_ratio = 1.0\r\n top_k = int(len(feature_interaction_score) * min(1.0, fis_ratio))\r\n self.selected_pairs = list(map(lambda x: x[0], sorted_pairs[:top_k]))\r\n\r\n # add selected_pairs\r\n setattr(ModelConfig.model_desc['custom'], 'selected_pairs', self.selected_pairs)\r\n\r\n def after_train(self, logs=None):\r\n \"\"\"Call after_train of the managed callbacks.\"\"\"\r\n curr_auc = float(self.trainer.valid_metrics.results['auc'])\r\n\r\n self.sieve_board = self.sieve_board.append(\r\n {\r\n 'selected_feature_pairs': self.selected_pairs,\r\n 'score': curr_auc\r\n }, ignore_index=True)\r\n result_file = FileOps.join_path(\r\n self.trainer.local_output_path, '{}_result.csv'.format(self.trainer.__worker_id__))\r\n\r\n self.sieve_board.to_csv(result_file, sep='\\t')\r\n"
] | [
[
"pandas.DataFrame"
]
] |
aauss/DSND_Term2 | [
"ff1ff8edc208652c29bfc25f18c610a02dc9d299"
] | [
"lessons/CRISP_DM/RemovingData.py"
] | [
"import pandas as pd\r\nimport numpy as np\r\nfrom collections import defaultdict\r\nimport RemovingDataSolns as s\r\n\r\n# Question 1\r\ndef prop_sals_test(prop_sals):\r\n '''\r\n INPUT prop_sals - a float as the percent of missing values in the salary column\r\n\r\n Prints statement related to the correctness of the solution of the proportion\r\n '''\r\n if np.allclose(prop_sals, s.prop_sals):\r\n print(\"Nice job! That looks right!\")\r\n else:\r\n print(\"Oops! Make sure your value is for the proportion of nan values in only the Salary column.\")\r\n\r\n\r\n# Question 2\r\ndef sal_rm_test(sal_rm):\r\n '''\r\n INPUT sal_rm - a pandas dataframe with all rows that are missing a value the salary column removed. The dataframe should only have the columns of num_vars (quant variables)\r\n\r\n Prints statement related to the correctness of the solution of the dataframe\r\n '''\r\n if sal_rm.equals(s.sal_rm):\r\n print(\"Nice job! That looks right!\")\r\n else:\r\n print(\"That wasn't quite as expected. Try again, this should be the num_vars dataframe with salary removed.\")\r\n\r\n# Question 3\r\ndef question3_check(question3_solution):\r\n '''\r\n INPUT question3_solution - the letter (a, b, or c) corresponding to the statement that best describes what happend when fitting your model.\r\n\r\n Prints statement related to the correctness of the letter chosen.\r\n '''\r\n if question3_solution == s.question3_solution:\r\n print(\"Nice job! That's right! Those missing values in the X matrix will still not allow us to predict the response.\")\r\n else:\r\n print(\"Oops! That wasn't what we were expecting. Your solution should be either a, b, or c for the string that best relates to what happened.\")\r\n\r\n\r\n# Question 4\r\ndef all_rm_test(all_rm):\r\n '''\r\n INPUT all_rm - a pandas dataframe with all rows that are missing a value in any column removed from num_vars (only the numeric columns)\r\n\r\n Prints statement related to the correctness of the solution of the dataframe\r\n '''\r\n if all_rm.equals(s.all_rm):\r\n print(\"Nice job! That looks right. The default is to drop any row with a missing value in any column, so we didn't need to specify any arguments in this case.\")\r\n else:\r\n print(\"Oops! That doesn't look like what we were expecting. Make sure you are working with only the numeric columns, and you have dropped any rows with missing values.\")\r\n\r\n\r\n# Question 5\r\ndef question5_check(question5_solution):\r\n '''\r\n INPUT question3_solution - the letter (a, b, or c) corresponding to the statement that best describes what happend when fitting your model.\r\n\r\n Prints statement related to the correctness of the letter chosen.\r\n '''\r\n if question5_solution == s.question5_solution:\r\n print(\"Nice job! That's right! Python isn't exactly magic, but sometimes it feels like it is!\")\r\n else:\r\n print(\"Oops! Your solution should have worked. In which case, no output should have printed. This solution should follow just as in the screencast.\")\r\n\r\n\r\n# Question 6\r\ndef r2_test_check(r2_test):\r\n '''\r\n INPUT r2_test - the rsquared value from fitting a model with all nan values dropped and only using quantitative variables.\r\n\r\n Prints statement related to the correctness rsquared matching solution.\r\n '''\r\n if r2_test == s.r2_test:\r\n print(\"Nice job! That's right! Your rsquared matches the solution.\")\r\n else:\r\n print(\"Oops! That wasn't the value that was expected. You should fit your model using the training data, predict on the X_test data, and then score comparing the y_test and your predicted values.\")\r\n\r\n# Question 7\r\ndef question7_check(question7_solution):\r\n '''\r\n INPUT question7_solution - a dictionary with statements of takeaways from the rest of the notebook. The values should be the variables a, b, c, d, e, f, or g\r\n\r\n Prints statement related to the correctness of the solution of the dictionary\r\n '''\r\n if question7_solution == s.question7_solution:\r\n print(\"Nice job! That looks right to me! We would really like to predict for anyone who provides a salary, but our model right now definitely has some limitations.\")\r\n elif question7_solution['The number of reported salaries in the original dataset'] != s.question7_solution['The number of reported salaries in the original dataset']:\r\n print(\"The number of reported salaries in the original dataset doesn't look quite right.\")\r\n elif question7_solution['The number of test salaries predicted using our model'] != s.question7_solution['The number of test salaries predicted using our model']:\r\n print(\"The number of salaries predicted using our model doesn't look quite right.\")\r\n elif question7_solution['If an individual does not rate stackoverflow, but has a salary'] != s.question7_solution['If an individual does not rate stackoverflow, but has a salary']:\r\n print(\"Whether an individual rates stackoverflow or has a job satisfaction we would still like to predict the salary if we can.\")\r\n elif question7_solution['If an individual does not have a a job satisfaction, but has a salary'] != s.question7_solution['If an individual does not have a a job satisfaction, but has a salary']:\r\n print(\"Whether an individual rates stackoverflow or has a job satisfaction we would still like to predict the salary if we can.\")\r\n elif question7_solution['Our model predicts salaries for the two individuals described above.'] != s.question7_solution['Our model predicts salaries for the two individuals described above.']:\r\n print(\"Unfortunately, our current model will not predict for anyone who has missing values in any column - even if they do have a salary!\")\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n"
] | [
[
"numpy.allclose"
]
] |
pengshuang/allennlp | [
"91d0fa1a51485c4118e48426d76328acd8049587"
] | [
"allennlp/interpret/saliency_interpreters/simple_gradient.py"
] | [
"import math\n\nfrom typing import List\nimport numpy\n\nfrom allennlp.common.util import JsonDict, sanitize\nfrom allennlp.interpret.saliency_interpreters.saliency_interpreter import SaliencyInterpreter\nfrom allennlp.nn import util\n\n\n@SaliencyInterpreter.register(\"simple-gradient\")\nclass SimpleGradient(SaliencyInterpreter):\n \"\"\"\n Registered as a `SaliencyInterpreter` with name \"simple-gradient\".\n \"\"\"\n\n def saliency_interpret_from_json(self, inputs: JsonDict) -> JsonDict:\n \"\"\"\n Interprets the model's prediction for inputs. Gets the gradients of the loss with respect\n to the input and returns those gradients normalized and sanitized.\n \"\"\"\n labeled_instances = self.predictor.json_to_labeled_instances(inputs)\n\n # List of embedding inputs, used for multiplying gradient by the input for normalization\n embeddings_list: List[numpy.ndarray] = []\n\n instances_with_grads = dict()\n for idx, instance in enumerate(labeled_instances):\n # Hook used for saving embeddings\n handle = self._register_forward_hook(embeddings_list)\n grads = self.predictor.get_gradients([instance])[0]\n handle.remove()\n\n # Gradients come back in the reverse order that they were sent into the network\n embeddings_list.reverse()\n for key, grad in grads.items():\n # Get number at the end of every gradient key (they look like grad_input_[int],\n # we're getting this [int] part and subtracting 1 for zero-based indexing).\n # This is then used as an index into the reversed input array to match up the\n # gradient and its respective embedding.\n input_idx = int(key[-1]) - 1\n # The [0] here is undo-ing the batching that happens in get_gradients.\n emb_grad = numpy.sum(grad[0] * embeddings_list[input_idx], axis=1)\n norm = numpy.linalg.norm(emb_grad, ord=1)\n normalized_grad = [math.fabs(e) / norm for e in emb_grad]\n grads[key] = normalized_grad\n\n instances_with_grads[\"instance_\" + str(idx + 1)] = grads\n return sanitize(instances_with_grads)\n\n def _register_forward_hook(self, embeddings_list: List):\n \"\"\"\n Finds all of the TextFieldEmbedders, and registers a forward hook onto them. When forward()\n is called, embeddings_list is filled with the embedding values. This is necessary because\n our normalization scheme multiplies the gradient by the embedding value.\n \"\"\"\n\n def forward_hook(module, inputs, output):\n embeddings_list.append(output.squeeze(0).clone().detach().numpy())\n\n embedding_layer = util.find_embedding_layer(self.predictor._model)\n handle = embedding_layer.register_forward_hook(forward_hook)\n\n return handle\n"
] | [
[
"numpy.sum",
"numpy.linalg.norm"
]
] |
webdeveloper0012/Tensor2tensor | [
"48bce065278eba461c8a2840e4132becbc822c7c",
"48bce065278eba461c8a2840e4132becbc822c7c"
] | [
"tensor2tensor/data_generators/problem.py",
"tensor2tensor/layers/common_hparams.py"
] | [
"# coding=utf-8\n# Copyright 2017 The Tensor2Tensor Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Base class for problem/dataset definitions.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nimport collections\nimport os\nimport random\n# Dependency imports\nimport six\nfrom tensor2tensor.data_generators import generator_utils\nfrom tensor2tensor.data_generators import text_encoder\nfrom tensor2tensor.utils import metrics\nfrom tensor2tensor.utils import registry\nimport tensorflow as tf\n\n\n\nclass SpaceID(object):\n \"\"\"Input and target space ids. Add more as needed.\"\"\"\n # Generic / unknown output space (default)\n GENERIC = 0\n # Image labels\n IMAGE_LABEL = 1\n # English characters\n EN_CHR = 2\n # English tokens\n EN_TOK = 3\n # English bpe tokens\n EN_BPE_TOK = 4\n # French characters\n FR_CHR = 5\n # French tokens\n FR_TOK = 6\n # German characters\n DE_CHR = 7\n # German tokens\n DE_TOK = 8\n # German bpe tokens\n DE_BPE_TOK = 9\n # Digit cipher lexicon 0\n DIGIT_0 = 10\n # Digit cipher lexicon 1\n DIGIT_1 = 11\n # Audio waveform domain\n AUDIO_WAV = 12\n # Audio spectral domain\n AUDIO_SPECTRAL = 13\n # Parse characters\n PARSE_CHR = 14\n # Parse tokens\n PARSE_TOK = 15\n # Chinese tokens\n ZH_TOK = 16\n # Icelandic characters\n ICE_CHAR = 17\n # Icelandic tokens\n ICE_TOK = 18\n # Icelandic parse tokens\n ICE_PARSE_TOK = 19\n # Macedonian tokens\n MK_TOK = 20\n # Czech tokens\n CS_TOK = 21\n # Czech characters\n CS_CHR = 22\n # Genetic bases (ACTG)\n DNA = 23\n # Real numbers\n REAL = 24\n # Images\n IMAGE = 25\n # Peptide\n PEPTIDE = 26\n # Python\n PY_TOK = 27\n # C++\n CPP_TOK = 28\n # Strokes\n STROKES = 29\n # Pickled Python\n PICKLED_PYTHON = 30\n\n\ndef default_model_hparams():\n return tf.contrib.training.HParams(\n max_input_seq_length=0,\n max_target_seq_length=0,\n prepend_mode=\"none\",\n data_dir=None)\n\n\ndef preprocess_example_common(example, hparams, mode):\n \"\"\"Preprocessing steps common to all models.\"\"\"\n if hparams.max_input_seq_length > 0:\n example[\"inputs\"] = example[\"inputs\"][:hparams.max_input_seq_length]\n if hparams.max_target_seq_length > 0:\n example[\"targets\"] = example[\"targets\"][:hparams.max_target_seq_length]\n if hparams.prepend_mode != \"none\":\n if mode == tf.estimator.ModeKeys.PREDICT:\n example[\"partial_targets\"] = tf.concat([example[\"inputs\"], [0]], 0)\n else:\n example[\"targets\"] = tf.concat(\n [example[\"inputs\"], [0], example[\"targets\"]], 0)\n return example\n\n\nclass Problem(object):\n \"\"\"Problem base class. Specifies a T2T problem.\n\n Problems unify the specification of a problem for data generation, training,\n and inference.\n\n New problems are specified by the following methods:\n\n Data generation:\n * generate_data(data_dir, tmp_dir)\n - Generate training and dev datasets into data_dir.\n - Additional files, e.g. vocabulary files, should also be written to\n data_dir. Vocab files are newline-separated files with each line\n containing a token. The standard convention for the filename is to\n set it to be\n ${Problem.vocab_name}.${Problem.targeted_vocab_size}\n - Downloads and other files can be written to tmp_dir\n - If you have a training and dev generator, you can generate the\n training and dev datasets with\n generator_utils.generate_dataset_and_shuffle.\n - Use the self.training_filepaths and self.dev_filepaths functions to\n get sharded filenames. If shuffled=False, the filenames will contain\n an \"unshuffled\" suffix; you should then shuffle the data\n shard-by-shard with generator_utils.shuffle_dataset.\n - Allows to specify the number of shards, optionally (can be omitted).\n - Subclasses must override\n * dataset_filename()\n - Base filename for problem.\n - Defaults to registered name (self.name).\n\n Training:\n * hparams(defaults, model_hparams)\n - Specify the problem hyperparameters (see _default_hparams)\n - Mutate defaults as needed\n * example_reading_spec\n - Specify the names and types of the features on disk.\n - Specify tf.contrib.slim.tfexample_decoder\n * preprocess_example(example, mode)\n - Preprocess the example feature dict from feature name to Tensor or\n SparseTensor.\n - Used in training, eval, and inference (specified by mode).\n\n Eval:\n * eval_metrics\n - Specify the set of evaluation metrics for this problem.\n\n Inference:\n * feature_encoders(data_dir)\n - Return a dict of <feature name, TextEncoder> for encoding and decoding\n inference input/output.\n - Defaults to TextEncoder for inputs and targets.\n \"\"\"\n\n # ============================================================================\n # BEGIN SUBCLASS INTERFACE\n # ============================================================================\n\n def generate_data(self, data_dir, tmp_dir, task_id=-1):\n raise NotImplementedError()\n\n def hparams(self, defaults, model_hparams):\n pass\n\n def dataset_filename(self):\n return self.name\n\n def feature_encoders(self, data_dir):\n del data_dir\n return {\n \"inputs\": text_encoder.TextEncoder(),\n \"targets\": text_encoder.TextEncoder()\n }\n\n def example_reading_spec(self):\n data_fields = {\n \"inputs\": tf.VarLenFeature(tf.int64),\n \"targets\": tf.VarLenFeature(tf.int64)\n }\n data_items_to_decoders = None\n return (data_fields, data_items_to_decoders)\n\n def preprocess_example(self, example, mode, hparams):\n return preprocess_example_common(example, hparams, mode)\n\n def eval_metrics(self):\n return [\n metrics.Metrics.ACC, metrics.Metrics.ACC_TOP5,\n metrics.Metrics.ACC_PER_SEQ, metrics.Metrics.NEG_LOG_PERPLEXITY\n ]\n\n # ============================================================================\n # END SUBCLASS INTERFACE\n # ============================================================================\n\n def training_filepaths(self, data_dir, num_shards, shuffled):\n file_basename = self.dataset_filename()\n if not shuffled:\n file_basename += generator_utils.UNSHUFFLED_SUFFIX\n return generator_utils.train_data_filenames(file_basename, data_dir,\n num_shards)\n\n def dev_filepaths(self, data_dir, num_shards, shuffled):\n file_basename = self.dataset_filename()\n if not shuffled:\n file_basename += generator_utils.UNSHUFFLED_SUFFIX\n return generator_utils.dev_data_filenames(file_basename, data_dir,\n num_shards)\n\n def test_filepaths(self, data_dir, num_shards, shuffled):\n file_basename = self.dataset_filename()\n if not shuffled:\n file_basename += generator_utils.UNSHUFFLED_SUFFIX\n return generator_utils.test_data_filenames(file_basename, data_dir,\n num_shards)\n\n def filepattern(self, data_dir, mode, shard=None):\n \"\"\"Get filepattern for data files for mode.\n\n Matches mode to a suffix.\n * TRAIN: train\n * EVAL: dev\n * PREDICT: dev\n * test: test\n\n Args:\n data_dir: str, data directory.\n mode: tf.estimator.ModeKeys or \"test\".\n shard: int, if provided, will only read data from the specified shard.\n\n Returns:\n filepattern str\n \"\"\"\n path = os.path.join(data_dir, self.dataset_filename())\n shard_str = \"-%05d\" % shard if shard is not None else \"\"\n if mode == tf.estimator.ModeKeys.TRAIN:\n suffix = \"train\"\n elif mode in [tf.estimator.ModeKeys.EVAL, tf.estimator.ModeKeys.PREDICT]:\n suffix = \"dev\"\n else:\n assert mode == \"test\"\n suffix = \"test\"\n\n return \"%s-%s%s*\" % (path, suffix, shard_str)\n\n def __init__(self, was_reversed=False, was_copy=False):\n \"\"\"Create a Problem.\n\n Args:\n was_reversed: bool, whether to reverse inputs and targets.\n was_copy: bool, whether to copy inputs to targets. Can be composed with\n was_reversed so that if both are true, the targets become the inputs,\n which are then copied to targets so that the task is targets->targets.\n \"\"\"\n self._was_reversed = was_reversed\n self._was_copy = was_copy\n self._encoders = None\n self._hparams = None\n self._feature_info = None\n\n def get_feature_encoders(self, data_dir=None):\n if self._encoders is None:\n self._encoders = self.feature_encoders(data_dir)\n return self._encoders\n\n def get_hparams(self, model_hparams=None):\n \"\"\"Returns problem_hparams.\"\"\"\n if self._hparams is not None:\n return self._hparams\n\n if self._encoders is None:\n data_dir = (model_hparams and model_hparams.data_dir) or None\n self.get_feature_encoders(data_dir)\n\n hp = _default_hparams()\n ret = self.hparams(hp, model_hparams)\n if ret is not None:\n raise ValueError(\"The Problem subclass hparams function should mutate \"\n \"the defaults passed in and return None.\")\n\n hp.add_hparam(\"vocabulary\", self._encoders)\n hp.add_hparam(\"was_reversed\", self._was_reversed)\n hp.add_hparam(\"was_copy\", self._was_copy)\n\n if self._was_reversed:\n _reverse_problem_hparams(hp)\n if self._was_copy:\n _copy_problem_hparams(hp)\n\n self._hparams = hp\n return self._hparams\n\n def maybe_reverse_features(self, feature_map):\n if not self._was_reversed:\n return\n inputs, targets = feature_map[\"inputs\"], feature_map[\"targets\"]\n feature_map[\"inputs\"], feature_map[\"targets\"] = targets, inputs\n\n def maybe_copy_features(self, feature_map):\n if not self._was_copy:\n return\n feature_map[\"targets\"] = feature_map[\"inputs\"]\n\n def dataset(self,\n mode,\n data_dir=None,\n num_threads=None,\n output_buffer_size=None,\n shuffle_files=None,\n hparams=None,\n preprocess=True,\n dataset_split=None,\n shard=None):\n \"\"\"Build a Dataset for this problem.\n\n Args:\n mode: tf.estimator.ModeKeys; determines which files to read from.\n data_dir: directory that contains data files.\n num_threads: int, number of threads to use for decode and preprocess\n Dataset.map calls.\n output_buffer_size: int, how many elements to prefetch in Dataset.map\n calls.\n shuffle_files: whether to shuffle input files. Default behavior (i.e. when\n shuffle_files=None) is to shuffle if mode == TRAIN.\n hparams: tf.contrib.training.HParams; hparams to be passed to\n Problem.preprocess_example and Problem.hparams. If None, will use a\n default set that is a no-op.\n preprocess: bool, whether to map the Dataset through\n Problem.preprocess_example.\n dataset_split: tf.estimator.ModeKeys + [\"test\"], which split to read data\n from (TRAIN:\"-train\", EVAL:\"-dev\", \"test\":\"-test\"). Defaults to mode.\n shard: int, if provided, will only read data from the specified shard.\n\n Returns:\n Dataset containing dict<feature name, Tensor>.\n \"\"\"\n dataset_split = dataset_split or mode\n assert data_dir\n\n if hparams is None:\n hparams = default_model_hparams()\n\n if not hasattr(hparams, \"data_dir\"):\n hparams.add_hparam(\"data_dir\", data_dir)\n if not hparams.data_dir:\n hparams.data_dir = data_dir\n # Construct the Problem's hparams so that items within it are accessible\n _ = self.get_hparams(hparams)\n\n data_fields, data_items_to_decoders = self.example_reading_spec()\n if data_items_to_decoders is None:\n data_items_to_decoders = {\n field: tf.contrib.slim.tfexample_decoder.Tensor(field)\n for field in data_fields\n }\n\n is_training = mode == tf.estimator.ModeKeys.TRAIN\n data_filepattern = self.filepattern(data_dir, dataset_split, shard=shard)\n tf.logging.info(\"Reading data files from %s\", data_filepattern)\n data_files = tf.contrib.slim.parallel_reader.get_data_files(\n data_filepattern)\n if shuffle_files or shuffle_files is None and is_training:\n random.shuffle(data_files)\n dataset = tf.contrib.data.TFRecordDataset(data_files)\n\n def decode_record(record):\n \"\"\"Serialized Example to dict of <feature name, Tensor>.\"\"\"\n decoder = tf.contrib.slim.tfexample_decoder.TFExampleDecoder(\n data_fields, data_items_to_decoders)\n\n decode_items = list(data_items_to_decoders)\n decoded = decoder.decode(record, items=decode_items)\n return dict(zip(decode_items, decoded))\n\n def _preprocess(example):\n example = self.preprocess_example(example, mode, hparams)\n self.maybe_reverse_features(example)\n self.maybe_copy_features(example)\n return example\n\n dataset = dataset.map(decode_record, num_threads=num_threads)\n\n if preprocess:\n dataset = dataset.map(\n _preprocess,\n num_threads=num_threads,\n output_buffer_size=output_buffer_size)\n\n return dataset\n\n @property\n def has_inputs(self):\n return \"inputs\" in self.get_feature_encoders()\n\n @property\n def feature_info(self):\n \"\"\"Retrieve dict<feature name, FeatureInfo>.\n\n Must first call Problem.get_hparams or Problem.dataset to have the problem's\n internal hparams already constructed.\n\n Returns:\n dict<feature name, FeatureInfo>\n \"\"\"\n if self._feature_info is not None:\n return self._feature_info\n\n assert self._hparams is not None\n\n hp = self.get_hparams()\n input_mods = hp.input_modality\n target_mod = hp.target_modality\n vocabs = hp.vocabulary\n if self.has_inputs:\n in_id = hp.input_space_id\n out_id = hp.target_space_id\n\n features = collections.defaultdict(FeatureInfo)\n\n for name, mod_spec in six.iteritems(input_mods):\n mod, vocab_size = mod_spec\n finfo = features[name]\n finfo.modality = mod\n finfo.vocab_size = vocab_size\n\n mod, vocab_size = target_mod\n features[\"targets\"].modality = mod\n features[\"targets\"].vocab_size = vocab_size\n\n for name, encoder in six.iteritems(vocabs):\n features[name].encoder = encoder\n\n if self.has_inputs:\n features[\"inputs\"].space_id = in_id\n features[\"targets\"].space_id = out_id\n\n self._feature_info = features\n return features\n\n\nclass FeatureInfo(object):\n\n def __init__(self,\n encoder=None,\n modality=None,\n vocab_size=None,\n space_id=None):\n self.encoder = encoder\n self.modality = modality\n self.vocab_size = vocab_size\n self.space_id = space_id\n\n\ndef _copy_problem_hparams(p_hparams):\n \"\"\"Use input modality, vocab, and space id for target.\"\"\"\n p = p_hparams\n # Duplicate input modality.\n p.target_modality = p.input_modality[\"inputs\"]\n # Duplicate input vocabulary.\n p.vocabulary[\"targets\"] = p.vocabulary[\"inputs\"]\n # Duplicate input space ids.\n p.target_space_id = p.input_space_id\n # Mark that p was reversed.\n p.was_copy = True\n\n\ndef _reverse_problem_hparams(p_hparams):\n \"\"\"Swap input/output modalities, vocab, and space ids.\"\"\"\n p = p_hparams\n\n # Swap modalities.\n input_modality = p.input_modality[\"inputs\"]\n target_modality = p.target_modality\n p.input_modality[\"inputs\"] = target_modality\n p.target_modality = input_modality\n\n # Swap vocabularies.\n input_vocabulary = p.vocabulary[\"inputs\"]\n target_vocabulary = p.vocabulary[\"targets\"]\n p.vocabulary[\"inputs\"] = target_vocabulary\n p.vocabulary[\"targets\"] = input_vocabulary\n\n # Swap input/target space ids.\n input_space_id = p.input_space_id\n target_space_id = p.target_space_id\n p.input_space_id = target_space_id\n p.target_space_id = input_space_id\n\n # Mark that p was reversed.\n p.was_reversed = True\n\n\ndef _default_hparams():\n \"\"\"A set of basic model hyperparameters.\"\"\"\n return tf.contrib.training.HParams(\n # Use this parameter to get comparable perplexity numbers with different\n # tokenizations. This value should be set to the ratio of the number of\n # tokens in the test set according to the tokenization used to the number\n # of tokens in the test set in the \"official\" tokenization. For\n # example, if we are using a word-piece based model and we want to\n # compute per-word perplexity, then we set loss_multiplier to the number\n # of wordpieces per word in the test set.\n loss_multiplier=1.0,\n\n # Use this parameter to allow for larger sequences in the batch. Without\n # the use of this parameter, the size of the inner two dimensions will\n # be used to judge the sequence length.\n batch_size_multiplier=1,\n\n # To make queues of the right capacity, it's good to know the maximal\n # expected batch size, as it can vary a lot. It only affects performance\n # of input readers and memory use. The defaults should be safe and fast,\n # but decrease if your reader uses a lot of memory and increase if slow.\n max_expected_batch_size_per_shard=64,\n\n # During inference for autoregressive problems, if the batch_size is 1,\n # the inference will stop when the model predict a text_encoder.EOS_ID\n # token.\n stop_at_eos=False,\n\n # Modalities used to map from input features to a space compatible with\n # chosen model architecture. One modality spec (which is a 2-tuple,\n # (modality_full_name, vocab_size)) per feature key. modality_full_name\n # is a string type:name, e.g. class_label:class_label_2d. Leaving off\n # the name uses the default modality for that type (e.g. class_label ==\n # class_label:default).\n input_modality={},\n\n # Modality used to map from hidden representation to the target space.\n # Specified as a modality spec, a 2-tuple described above.\n target_modality=None,\n\n # Identifiers used to tell the model which input/target space will be\n # expected. For example, it can tell that we expect French as characters\n # as output, or Spanish as sound. Spaces defined as constants in SpaceID\n # class.\n input_space_id=SpaceID.GENERIC,\n target_space_id=SpaceID.GENERIC)\n\n\nclass Text2TextProblem(Problem):\n \"\"\"Base class for text-to-text problems.\"\"\"\n\n @property\n def is_character_level(self):\n \"\"\"Whether the inputs and targets are sequences of characters.\"\"\"\n raise NotImplementedError()\n\n @property\n def targeted_vocab_size(self):\n raise NotImplementedError() # Not needed if self.is_character_level.\n\n def generator(self, data_dir, tmp_dir, is_training):\n \"\"\"Generator for the training and evaluation data.\n\n Args:\n data_dir: The directory in which to assets, e.g. the vocab file.\n tmp_dir: A scratch directory (if needed).\n is_training: A boolean indicating if we should generate training data\n (True) or dev set data (False).\n\n Yields:\n dicts with keys \"inputs\" and \"targets\", with values being lists of token\n ids.\n \"\"\"\n raise NotImplementedError()\n\n @property\n def use_train_shards_for_dev(self):\n \"\"\"If true, we only generate training data and hold out shards for dev.\"\"\"\n return False\n\n @property\n def input_space_id(self):\n raise NotImplementedError()\n\n @property\n def target_space_id(self):\n raise NotImplementedError()\n\n @property\n def num_shards(self):\n raise NotImplementedError()\n\n @property\n def num_dev_shards(self):\n return 1\n\n @property\n def vocab_name(self):\n raise NotImplementedError()\n\n @property\n def vocab_file(self):\n return \"%s.%d\" % (self.vocab_name, self.targeted_vocab_size)\n\n @property\n def use_subword_tokenizer(self):\n raise NotImplementedError()\n\n @property\n def has_inputs(self):\n return True # Set to False for language models.\n\n def generate_data(self, data_dir, tmp_dir, task_id=-1):\n train_paths = self.training_filepaths(\n data_dir, self.num_shards, shuffled=False)\n dev_paths = self.dev_filepaths(\n data_dir, self.num_dev_shards, shuffled=False)\n if self.use_train_shards_for_dev:\n all_paths = train_paths + dev_paths\n generator_utils.generate_files(\n self.generator(data_dir, tmp_dir, True), all_paths)\n generator_utils.shuffle_dataset(all_paths)\n else:\n generator_utils.generate_dataset_and_shuffle(\n self.generator(data_dir, tmp_dir, True), train_paths,\n self.generator(data_dir, tmp_dir, False), dev_paths)\n\n def feature_encoders(self, data_dir):\n if self.is_character_level:\n encoder = text_encoder.ByteTextEncoder()\n elif self.use_subword_tokenizer:\n vocab_filename = os.path.join(data_dir, self.vocab_file)\n encoder = text_encoder.SubwordTextEncoder(vocab_filename)\n else:\n vocab_filename = os.path.join(data_dir, self.vocab_file)\n encoder = text_encoder.TokenTextEncoder(vocab_filename)\n if self.has_inputs:\n return {\"inputs\": encoder, \"targets\": encoder}\n return {\"targets\": encoder}\n\n def hparams(self, defaults, unused_model_hparams):\n p = defaults\n p.stop_at_eos = int(True)\n\n if self.has_inputs:\n source_vocab_size = self._encoders[\"inputs\"].vocab_size\n p.input_modality = {\n \"inputs\": (registry.Modalities.SYMBOL, source_vocab_size)\n }\n target_vocab_size = self._encoders[\"targets\"].vocab_size\n p.target_modality = (registry.Modalities.SYMBOL, target_vocab_size)\n if self.has_inputs:\n p.input_space_id = self.input_space_id\n p.target_space_id = self.target_space_id\n if self.is_character_level:\n p.loss_multiplier = 2.0\n\n def eval_metrics(self):\n return [\n metrics.Metrics.ACC, metrics.Metrics.ACC_TOP5,\n metrics.Metrics.ACC_PER_SEQ, metrics.Metrics.NEG_LOG_PERPLEXITY,\n metrics.Metrics.APPROX_BLEU, metrics.Metrics.ROUGE_2_F,\n metrics.Metrics.ROUGE_L_F\n ]\n",
"# coding=utf-8\n# Copyright 2017 The Tensor2Tensor Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Hyperparameters and ranges common to multiple models.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\n# Dependency imports\n\nimport six\nfrom six.moves import zip # pylint: disable=redefined-builtin\nfrom tensor2tensor.utils import registry\n\nimport tensorflow as tf\n\n\n@registry.register_hparams(\"basic_1\")\ndef basic_params1():\n \"\"\"A set of basic hyperparameters.\"\"\"\n return tf.contrib.training.HParams(\n batch_size=4096, # in tokens per batch per gpu\n # Fixed batch size turns off bucketing during training mode\n # and uses batch_size as minibatch size (use small batch_size<=32)\n use_fixed_batch_size=False,\n num_hidden_layers=4,\n kernel_height=3,\n kernel_width=1,\n hidden_size=64,\n compress_steps=0,\n # All hyperparameters ending in \"dropout\" are automatically set to 0.0\n # when not in training mode.\n dropout=0.2,\n clip_grad_norm=2.0,\n grad_noise_scale=0.0,\n summarize_grads=False,\n initializer=\"orthogonal\",\n initializer_gain=1.5,\n label_smoothing=0.1,\n optimizer=\"Adam\",\n optimizer_adam_epsilon=1e-6,\n optimizer_adam_beta1=0.85,\n optimizer_adam_beta2=0.997,\n optimizer_momentum_momentum=0.9,\n weight_decay=0.1,\n weight_noise=0.0,\n learning_rate_decay_scheme=\"none\",\n learning_rate_warmup_steps=100,\n learning_rate_cosine_cycle_steps=250000,\n learning_rate=0.1,\n sampling_method=\"argmax\", # \"argmax\" or \"random\"\n sampling_temp=1.0, # temperature for sampling\n problem_choice=\"adaptive\", # \"uniform\", \"adaptive\", \"distributed\"\n # expand the logits a piece at a time - saves memory.\n factored_logits=False,\n multiply_embedding_mode=\"sqrt_depth\",\n # Parameters related to mixtures of experts.\n moe_hidden_sizes=\"2048\", # hidden layer sizes (comma-separated)\n moe_num_experts=64, # number of experts per layer\n moe_k=2, # how many experts to use for each batch element\n moe_loss_coef=1e-2,\n # Sequences of operations to perform on layer input and layer output.\n # Used by common_layers.layer_preprocess, common_layers.layer_postprocess\n # Each character repsesnts an operation:\n # none: no preprocessing\n # d: apply dropout\n # n: apply normalization (see norm_type and norm_epsilon)\n # a: add layer input (residual connection - only during postprocess)\n # The special string \"none\" is used instead of the empty string\n # to indicate no pre/postprocesisng, since the empty string causes\n # trouble for hyperparameter tuning.\n # TODO(noam): The current settings (\"\", \"dan\") are the published version\n # of the transformer. (\"n\", \"da\") seems better for harder-to-learn\n # models, so it should probably be the default.\n layer_preprocess_sequence=\"none\",\n layer_postprocess_sequence=\"dan\",\n # dropout rate to use during layer_preprocess and layer_postprocess\n layer_prepostprocess_dropout=0.1,\n # What type of normalization to use\n norm_type=\"layer\", # \"batch\", layer\", \"noam\", \"none\".\n # epsilon parameter to normalization function\n norm_epsilon=1e-6,\n symbol_modality_num_shards=16,\n # During training, we drop sequences whose inputs and targets are shorter\n # than min_length\n min_length=0,\n # During training, we drop sequences whose inputs or targets are longer\n # than max_length.\n # If max_length==0, we use hparams.batch_size instead.\n max_length=0,\n # Maximum length in the smallest length bucket. Setting this\n # flag too high will result in wasteful padding of short\n # sequences. Due to some (hopefully) temporary hacks in the\n # data reading and batching code, setting this flag too low\n # results in a very long batch-shuffling queue.\n # TODO(noam): change this once the Datasets API changes.\n min_length_bucket=8,\n # This flag controls the number of length buckets in the data\n # reader. The buckets have maximum lengths from\n # min_bucket_length to (max_length or batch_size), increasing\n # (approximately) by factors of length_bucket_step.\n length_bucket_step=1.1,\n # If set to True, drop sequences longer than max_length during eval.\n # This affects the validity of the evaluation metrics.\n eval_drop_long_sequences=False,\n # TODO(lukaszkaiser): these parameters should probably be set elsewhere.\n # in SymbolModality, share the output embeddings and the softmax\n # variables.\n # You can also share the input embeddings with the output embeddings\n # by using a problem_hparams that uses the same modality object for\n # the input_modality and target_modality.\n shared_embedding_and_softmax_weights=False,\n # In SymbolModality, skip the top layer, assume we're providing logits.\n symbol_modality_skip_top=False,\n # For each feature for which you want to override the default input\n # modality, add an entry to this semicolon-separated string. Entries are\n # formatted \"feature_name:modality_type:modality_name\", e.g.\n # \"inputs:symbol:default;other_inputs:audio:identity\".\n input_modalities=\"default\", # We don't use empty string in params.\n # To override the default target modality, specify\n # \"modality_type:modality_name\", e.g. \"symbol:ctc\".\n target_modality=\"default\",\n # The maximum length of \"input\" sequence.\n # Sequences longer than this value will be truncated. 0 or negative values\n # mean there is no maximum or truncation.\n # You can change this behavior by overridding preprocess_example() method\n # in your problem class.\n max_input_seq_length=0,\n # The maximum length of \"target\" sequence.\n # Sequences longer than this value will be truncated. 0 or negative values\n # mean there is no maximum or truncation.\n # You can change this behavior by overridding preprocess_example() method\n # in your problem class.\n max_target_seq_length=0,\n # This flag allows us to optionally treat a seq-to-seq problem\n # as a language model. Legal values are:\n #\n # \"none\" - Do not prepend the inputs to the targets.\n # \"prepend_inputs_masked_attention\"\n # replace \"targets\" in preprocessing with\n # tf.concat([inputs, [0], targets], axis=1)\n # i.e. we prepend the inputs to the targets with a single\n # padding token in between. Use masked self-attention on the\n # entire resulting sequence. During training, we compute losses on\n # the combined sequence. During eval, we compute the metrics\n # on only the targets portion.\n # \"prepend_inputs_full_attention\"\n # similar to the previous option except that each\n # position in the inputs portion can see the\n # entire inputs portion. This removes the challenge of\n # autoregressively predicting the inputs portion.\n prepend_mode=\"none\",\n # Scheduled sampling is interesting for auto-regressive models.\n # It runs an additional step using the generated output as autoregressive\n # targets, which can improve the models inference results later. The\n # parameter scheduled_sampling_prob determines with what probability\n # will such additional step be run. It's turned off (0.0) by default.\n # This probability will exponentially warm up for the number of\n # steps determined by scheduled_sampling_warmup_steps.\n # The tensor used for the second step will consist of outputs from\n # the first step mixed with gold truth, with the proportion of gold\n # determined by scheduled_sampling_gold_mixin_prob.\n scheduled_sampling_prob=0.0,\n scheduled_sampling_warmup_steps=50000,\n scheduled_sampling_gold_mixin_prob=0.5,\n # This is the actual batch size, *not* tokens per batch (i.e. for\n # language models this is the number of sentences in the batch)\n tpu_batch_size_per_shard=24,\n )\n\n\nclass RangedHParams(object):\n \"\"\"Defines parameter ranges for tuning.\"\"\"\n\n # From ParameterConfig proto\n LINEAR_SCALE = 1\n LOG_SCALE = 2\n REVERSE_LOG_SCALE = 3\n\n def __init__(self):\n self._categorical_params = {}\n self._discrete_params = {}\n self._discrete_float_params = {}\n self._float_params = {}\n self._int_params = {}\n\n def _check_reset_and_type_change(self, name, orig_ctr):\n \"\"\"Check if name is in orig_ctr or in one of the other type containers.\"\"\"\n # Resetting a hyperparameter\n if name in orig_ctr:\n tf.logging.warning(\"Overwriting hparam %s\", name)\n\n ctr_names = [(self._categorical_params,\n \"categorical\"), (self._discrete_params, \"discrete\"),\n (self._float_params, \"float\"), (self._int_params, \"int\"),\n (self._discrete_float_params, \"discrete_float\")]\n ctrs, names = list(zip(*ctr_names))\n orig_name = names[ctrs.index(orig_ctr)]\n\n for ctr, ctr_name in ctr_names:\n if ctr is orig_ctr:\n continue\n\n # Using a different type for the same hyperparameter name\n if name in ctr:\n raise ValueError(\"Setting hyperparameter %s as type %s, but a \"\n \"hyperparemeter of the same name was originally \"\n \"registered as type %s\" % (name, ctr_name, orig_name))\n\n def set_categorical(self, name, categories, length=None):\n self._check_reset_and_type_change(name, self._categorical_params)\n self._categorical_params[name] = (name, categories, length)\n\n def set_discrete(self, name, feasible_points, scale=None, length=None):\n self._check_reset_and_type_change(name, self._discrete_params)\n self._discrete_params[name] = (name, feasible_points, scale, length)\n\n def set_float(self, name, min_val, max_val, scale=None, length=None):\n if name in self._discrete_float_params:\n del self._discrete_float_params[name]\n self._check_reset_and_type_change(name, self._float_params)\n self._float_params[name] = (name, min_val, max_val, scale, length)\n\n def set_discrete_float(self, name, val):\n self._check_reset_and_type_change(name, self._discrete_float_params)\n self._discrete_float_params[name] = (name, [val])\n\n def set_int(self, name, min_val, max_val, scale=None, length=None):\n self._check_reset_and_type_change(name, self._int_params)\n self._int_params[name] = (name, min_val, max_val, scale, length)\n\n def fix_select_params(self, hp):\n ctrs = [\n self._categorical_params, self._discrete_params,\n self._discrete_float_params, self._float_params, self._int_params\n ]\n for key, val in hp.values().iteritems():\n for ctr in ctrs:\n if key in ctr:\n del ctr[key]\n self.set_discrete(key, [val])\n\n\ndef fill_ranged_hparams_from_hparams(hparams, ranged_hparams):\n \"\"\"Fill ranged_hparams with singleton values from hparams.\n\n HParams are placed in RangedHParams with the following functions, according to\n type:\n * int: set_discrete\n * bool: set_discrete\n * float: set_discrete_float\n * str: set_categorical\n\n Args:\n hparams: tf.contrib.training.HParams; contains the hyperparameters to copy\n over to ranged_hparams.\n ranged_hparams: RangedHParams; will have hparams values copied to it.\n\n Raises:\n ValueError: if hparams contains a hyperparameter not of type\n {int, float, str, bool}.\n \"\"\"\n for name, (hp_type, is_multivalent) in six.iteritems(hparams._hparam_types): # pylint: disable=protected-access\n\n if is_multivalent:\n raise ValueError(\"Multivalent hparams not supported in RangedHParams. \"\n \"Hyperparameter %s is multivalent.\" % name)\n val = getattr(hparams, name)\n if hp_type == int:\n ranged_hparams.set_discrete(name, [val])\n elif hp_type == bool:\n ranged_hparams.set_discrete(name, [int(val)])\n elif hp_type == float:\n ranged_hparams.set_discrete_float(name, val)\n elif hp_type == str:\n ranged_hparams.set_categorical(name, [val])\n else:\n raise ValueError(\"Unsupported type %s for param %s\" % (hp_type, name))\n\n\n@registry.register_ranged_hparams(\"basic1\")\ndef basic_range1(ranged_hparams):\n \"\"\"A basic range of hyperparameters.\"\"\"\n rhp = ranged_hparams\n\n hparams = basic_params1()\n fill_ranged_hparams_from_hparams(hparams, rhp)\n\n rhp.set_discrete(\"batch_size\", [1024, 2048, 4096])\n rhp.set_discrete(\"num_hidden_layers\", [1, 2, 3, 4, 5, 6])\n rhp.set_discrete(\"hidden_size\", [32, 64, 128, 256, 512], scale=rhp.LOG_SCALE)\n rhp.set_discrete(\"kernel_height\", [1, 3, 5, 7])\n rhp.set_discrete(\"kernel_width\", [1, 3, 5, 7])\n rhp.set_discrete(\"compress_steps\", [0, 1, 2])\n rhp.set_float(\"dropout\", 0.0, 0.5)\n rhp.set_float(\"weight_decay\", 1e-4, 10.0, scale=rhp.LOG_SCALE)\n rhp.set_float(\"label_smoothing\", 0.0, 0.2)\n rhp.set_float(\"clip_grad_norm\", 0.01, 50.0, scale=rhp.LOG_SCALE)\n rhp.set_float(\"learning_rate\", 0.005, 2.0, scale=rhp.LOG_SCALE)\n rhp.set_categorical(\"initializer\",\n [\"uniform\", \"orthogonal\", \"uniform_unit_scaling\"])\n rhp.set_float(\"initializer_gain\", 0.5, 3.5)\n rhp.set_categorical(\"learning_rate_decay_scheme\",\n [\"none\", \"sqrt\", \"noam\", \"exp10k\"])\n rhp.set_float(\"optimizer_adam_epsilon\", 1e-7, 1e-2, scale=rhp.LOG_SCALE)\n rhp.set_float(\"optimizer_adam_beta1\", 0.8, 0.9)\n rhp.set_float(\"optimizer_adam_beta2\", 0.995, 0.999)\n rhp.set_categorical(\n \"optimizer\",\n [\"Adam\", \"Adagrad\", \"Momentum\", \"RMSProp\", \"SGD\", \"YellowFin\"])\n"
] | [
[
"tensorflow.VarLenFeature",
"tensorflow.logging.info",
"tensorflow.contrib.slim.tfexample_decoder.Tensor",
"tensorflow.contrib.slim.tfexample_decoder.TFExampleDecoder",
"tensorflow.concat",
"tensorflow.contrib.data.TFRecordDataset",
"tensorflow.contrib.training.HParams",
"tensorflow.contrib.slim.parallel_reader.get_data_files"
],
[
"tensorflow.contrib.training.HParams",
"tensorflow.logging.warning"
]
] |
A-Charvin/cv-tricks.com | [
"3c6da9c62665abefa6114e0b7f0c39a0a012f496"
] | [
"Tensorflow-tutorials/tutorial-2-image-classifier/predict2.py"
] | [
"import tensorflow as tf\nimport numpy as np\nimport os,glob,cv2\nimport sys,argparse\n\n\n# First, pass the path of the image\ndir_path = os.path.dirname(os.path.realpath(__file__))\nimage_path=sys.argv[1] \nfilename = dir_path +'/' +image_path\nimage_size=128\nnum_channels=3\nimages = []\n# Reading the image using OpenCV\nimage = cv2.imread(filename)\n# Resizing the image to our desired size and preprocessing will be done exactly as done during training\nimage = cv2.resize(image, (image_size, image_size),0,0, cv2.INTER_LINEAR)\nimages.append(image)\nimages = np.array(images, dtype=np.uint8)\nimages = images.astype('float32')\nimages = np.multiply(images, 1.0/255.0) \n#The input to the network is of shape [None image_size image_size num_channels]. Hence we reshape.\nx_batch = images.reshape(1, image_size,image_size,num_channels)\n\n## Let us restore the saved model \nsess = tf.Session()\n# Step-1: Recreate the network graph. At this step only graph is created.\nsaver = tf.train.import_meta_graph('ore-mine-model.meta')\n# Step-2: Now let's load the weights saved using the restore method.\nsaver.restore(sess, tf.train.latest_checkpoint('./'))\n\n# Accessing the default graph which we have restored\ngraph = tf.get_default_graph()\n\n# Now, let's get hold of the op that we can be processed to get the output.\n# In the original network y_pred is the tensor that is the prediction of the network\ny_pred = graph.get_tensor_by_name(\"y_pred:0\")\n\n## Let's feed the images to the input placeholders\nx= graph.get_tensor_by_name(\"x:0\") \ny_true = graph.get_tensor_by_name(\"y_true:0\") \ny_test_images = np.zeros((1, len(os.listdir('training_data')))) \n\n\n### Creating the feed_dict that is required to be fed to calculate y_pred \nfeed_dict_testing = {x: x_batch, y_true: y_test_images}\nresult=sess.run(y_pred, feed_dict=feed_dict_testing)\n# result is of this format [probabiliy_of_rose probability_of_sunflower]\nprint(result)\n"
] | [
[
"numpy.multiply",
"tensorflow.train.latest_checkpoint",
"tensorflow.get_default_graph",
"tensorflow.Session",
"numpy.array",
"tensorflow.train.import_meta_graph"
]
] |
87003697/Segmentation | [
"5973a64768632fc52c55f9ffc9f0b43746699b37"
] | [
"utils/losses.py"
] | [
"import numpy as np\nimport torch\nimport torch.nn.functional as F\nimport torch.nn as nn\nfrom sklearn.utils import class_weight \nfrom utils.lovasz_losses import lovasz_softmax\nimport pdb\n\ndef make_one_hot(labels, classes):\n one_hot = torch.FloatTensor(labels.size()[0], classes, labels.size()[2], labels.size()[3]).zero_().to(labels.device)\n target = one_hot.scatter_(1, labels.data, 1)\n return target\n\ndef get_weights(target):\n t_np = target.view(-1).data.cpu().numpy()\n\n classes, counts = np.unique(t_np, return_counts=True)\n cls_w = np.median(counts) / counts\n #cls_w = class_weight.compute_class_weight('balanced', classes, t_np)\n\n weights = np.ones(7)\n weights[classes] = cls_w\n return torch.from_numpy(weights).float().cuda()\n\nclass CrossEntropyLoss2d(nn.Module):\n def __init__(self, weight=None, ignore_index=255, reduction='mean'):\n super(CrossEntropyLoss2d, self).__init__()\n self.CE = nn.CrossEntropyLoss(weight=weight, ignore_index=ignore_index, reduction=reduction)\n\n def forward(self, output, target):\n loss = self.CE(output, target)\n return loss\n\nclass DiceLoss(nn.Module):\n def __init__(self, smooth=1., ignore_index=255):\n super(DiceLoss, self).__init__()\n self.ignore_index = ignore_index\n self.smooth = smooth\n\n def forward(self, output, target):\n if self.ignore_index not in range(target.min(), target.max()):\n if (target == self.ignore_index).sum() > 0:\n target[target == self.ignore_index] = target.min()\n target = make_one_hot(target.unsqueeze(dim=1), classes=output.size()[1])\n output = F.softmax(output, dim=1)\n output_flat = output.contiguous().view(-1)\n target_flat = target.contiguous().view(-1)\n intersection = (output_flat * target_flat).sum()\n loss = 1 - ((2. * intersection + self.smooth) /\n (output_flat.sum() + target_flat.sum() + self.smooth))\n return loss\n\nclass FocalLoss(nn.Module):\n def __init__(self, gamma=2, alpha=None, ignore_index=255, size_average=True):\n super(FocalLoss, self).__init__()\n self.gamma = gamma\n self.size_average = size_average\n self.CE_loss = nn.CrossEntropyLoss(reduce=False, ignore_index=ignore_index, weight=alpha)\n\n def forward(self, output, target):\n logpt = self.CE_loss(output, target)\n pt = torch.exp(-logpt)\n loss = ((1-pt)**self.gamma) * logpt\n if self.size_average:\n return loss.mean()\n return loss.sum()\n\nclass CE_DiceLoss(nn.Module):\n def __init__(self, smooth=1, reduction='mean', ignore_index=255, weight=None):\n super(CE_DiceLoss, self).__init__()\n self.smooth = smooth\n self.dice = DiceLoss()\n self.cross_entropy = nn.CrossEntropyLoss(weight=weight, reduction=reduction, ignore_index=ignore_index)\n \n def forward(self, output, target):\n CE_loss = self.cross_entropy(output, target)\n dice_loss = self.dice(output, target)\n return CE_loss + dice_loss\n\nclass LovaszSoftmax(nn.Module):\n def __init__(self, classes='present', per_image=False, ignore_index=255):\n super(LovaszSoftmax, self).__init__()\n self.smooth = classes\n self.per_image = per_image\n self.ignore_index = ignore_index\n \n def forward(self, output, target):\n logits = F.softmax(output, dim=1)\n loss = lovasz_softmax(logits, target, ignore=self.ignore_index)\n return loss\n"
] | [
[
"numpy.ones",
"torch.nn.functional.softmax",
"numpy.median",
"torch.nn.CrossEntropyLoss",
"torch.exp",
"torch.from_numpy",
"numpy.unique"
]
] |
meeseeksmachine/pandas | [
"27ebb3e1e40513ad5f8919a5bbc7298e2e070a39"
] | [
"pandas/core/sparse/frame.py"
] | [
"\"\"\"\nData structures for sparse float data. Life is made simpler by dealing only\nwith float64 data\n\"\"\"\nfrom __future__ import division\n# pylint: disable=E1101,E1103,W0231,E0202\n\nimport warnings\nfrom pandas.compat import lmap\nfrom pandas import compat\nimport numpy as np\n\nfrom pandas.core.dtypes.missing import isna, notna\nfrom pandas.core.dtypes.cast import maybe_upcast, find_common_type\nfrom pandas.core.dtypes.common import ensure_platform_int, is_scipy_sparse\n\nfrom pandas.compat.numpy import function as nv\nfrom pandas.core.index import Index, MultiIndex, ensure_index\nfrom pandas.core.series import Series\nfrom pandas.core.frame import DataFrame, extract_index, _prep_ndarray\nimport pandas.core.algorithms as algos\nfrom pandas.core.internals import (BlockManager,\n create_block_manager_from_arrays)\nimport pandas.core.generic as generic\nfrom pandas.core.sparse.series import SparseSeries, SparseArray\nfrom pandas._libs.sparse import BlockIndex, get_blocks\nfrom pandas.util._decorators import Appender\nimport pandas.core.ops as ops\nimport pandas.core.common as com\nimport pandas.core.indexes.base as ibase\n\n_shared_doc_kwargs = dict(klass='SparseDataFrame')\n\n\nclass SparseDataFrame(DataFrame):\n \"\"\"\n DataFrame containing sparse floating point data in the form of SparseSeries\n objects\n\n Parameters\n ----------\n data : same types as can be passed to DataFrame or scipy.sparse.spmatrix\n .. versionchanged :: 0.23.0\n If data is a dict, argument order is maintained for Python 3.6\n and later.\n\n index : array-like, optional\n column : array-like, optional\n default_kind : {'block', 'integer'}, default 'block'\n Default sparse kind for converting Series to SparseSeries. Will not\n override SparseSeries passed into constructor\n default_fill_value : float\n Default fill_value for converting Series to SparseSeries\n (default: nan). Will not override SparseSeries passed in.\n \"\"\"\n _subtyp = 'sparse_frame'\n\n def __init__(self, data=None, index=None, columns=None, default_kind=None,\n default_fill_value=None, dtype=None, copy=False):\n\n # pick up the defaults from the Sparse structures\n if isinstance(data, SparseDataFrame):\n if index is None:\n index = data.index\n if columns is None:\n columns = data.columns\n if default_fill_value is None:\n default_fill_value = data.default_fill_value\n if default_kind is None:\n default_kind = data.default_kind\n elif isinstance(data, (SparseSeries, SparseArray)):\n if index is None:\n index = data.index\n if default_fill_value is None:\n default_fill_value = data.fill_value\n if columns is None and hasattr(data, 'name'):\n columns = [data.name]\n if columns is None:\n raise Exception(\"cannot pass a series w/o a name or columns\")\n data = {columns[0]: data}\n\n if default_fill_value is None:\n default_fill_value = np.nan\n if default_kind is None:\n default_kind = 'block'\n\n self._default_kind = default_kind\n self._default_fill_value = default_fill_value\n\n if is_scipy_sparse(data):\n mgr = self._init_spmatrix(data, index, columns, dtype=dtype,\n fill_value=default_fill_value)\n elif isinstance(data, dict):\n mgr = self._init_dict(data, index, columns, dtype=dtype)\n elif isinstance(data, (np.ndarray, list)):\n mgr = self._init_matrix(data, index, columns, dtype=dtype)\n elif isinstance(data, SparseDataFrame):\n mgr = self._init_mgr(data._data,\n dict(index=index, columns=columns),\n dtype=dtype, copy=copy)\n elif isinstance(data, DataFrame):\n mgr = self._init_dict(data, data.index, data.columns, dtype=dtype)\n elif isinstance(data, Series):\n mgr = self._init_dict(data.to_frame(), data.index,\n columns=None, dtype=dtype)\n elif isinstance(data, BlockManager):\n mgr = self._init_mgr(data, axes=dict(index=index, columns=columns),\n dtype=dtype, copy=copy)\n elif data is None:\n data = DataFrame()\n\n if index is None:\n index = Index([])\n else:\n index = ensure_index(index)\n\n if columns is None:\n columns = Index([])\n else:\n for c in columns:\n data[c] = SparseArray(np.nan, index=index,\n kind=self._default_kind,\n fill_value=self._default_fill_value)\n mgr = to_manager(data, columns, index)\n if dtype is not None:\n mgr = mgr.astype(dtype)\n else:\n msg = ('SparseDataFrame called with unknown type \"{data_type}\" '\n 'for data argument')\n raise TypeError(msg.format(data_type=type(data).__name__))\n\n generic.NDFrame.__init__(self, mgr)\n\n @property\n def _constructor(self):\n return SparseDataFrame\n\n _constructor_sliced = SparseSeries\n\n def _init_dict(self, data, index, columns, dtype=None):\n # pre-filter out columns if we passed it\n if columns is not None:\n columns = ensure_index(columns)\n data = {k: v for k, v in compat.iteritems(data) if k in columns}\n else:\n keys = com._dict_keys_to_ordered_list(data)\n columns = Index(keys)\n\n if index is None:\n index = extract_index(list(data.values()))\n\n def sp_maker(x):\n return SparseArray(x, kind=self._default_kind,\n fill_value=self._default_fill_value,\n copy=True, dtype=dtype)\n sdict = {}\n for k, v in compat.iteritems(data):\n if isinstance(v, Series):\n # Force alignment, no copy necessary\n if not v.index.equals(index):\n v = v.reindex(index)\n\n if not isinstance(v, SparseSeries):\n v = sp_maker(v.values)\n elif isinstance(v, SparseArray):\n v = v.copy()\n else:\n if isinstance(v, dict):\n v = [v.get(i, np.nan) for i in index]\n\n v = sp_maker(v)\n sdict[k] = v\n\n # TODO: figure out how to handle this case, all nan's?\n # add in any other columns we want to have (completeness)\n nan_arr = np.empty(len(index), dtype='float64')\n nan_arr.fill(np.nan)\n nan_arr = sp_maker(nan_arr)\n sdict.update((c, nan_arr) for c in columns if c not in sdict)\n\n return to_manager(sdict, columns, index)\n\n def _init_matrix(self, data, index, columns, dtype=None):\n \"\"\" Init self from ndarray or list of lists \"\"\"\n data = _prep_ndarray(data, copy=False)\n index, columns = self._prep_index(data, index, columns)\n data = {idx: data[:, i] for i, idx in enumerate(columns)}\n return self._init_dict(data, index, columns, dtype)\n\n def _init_spmatrix(self, data, index, columns, dtype=None,\n fill_value=None):\n \"\"\" Init self from scipy.sparse matrix \"\"\"\n index, columns = self._prep_index(data, index, columns)\n data = data.tocoo()\n N = len(index)\n\n # Construct a dict of SparseSeries\n sdict = {}\n values = Series(data.data, index=data.row, copy=False)\n for col, rowvals in values.groupby(data.col):\n # get_blocks expects int32 row indices in sorted order\n rowvals = rowvals.sort_index()\n rows = rowvals.index.values.astype(np.int32)\n blocs, blens = get_blocks(rows)\n\n sdict[columns[col]] = SparseSeries(\n rowvals.values, index=index,\n fill_value=fill_value,\n sparse_index=BlockIndex(N, blocs, blens))\n\n # Add any columns that were empty and thus not grouped on above\n sdict.update({column: SparseSeries(index=index,\n fill_value=fill_value,\n sparse_index=BlockIndex(N, [], []))\n for column in columns\n if column not in sdict})\n\n return self._init_dict(sdict, index, columns, dtype)\n\n def _prep_index(self, data, index, columns):\n N, K = data.shape\n if index is None:\n index = ibase.default_index(N)\n if columns is None:\n columns = ibase.default_index(K)\n\n if len(columns) != K:\n raise ValueError('Column length mismatch: {columns} vs. {K}'\n .format(columns=len(columns), K=K))\n if len(index) != N:\n raise ValueError('Index length mismatch: {index} vs. {N}'\n .format(index=len(index), N=N))\n return index, columns\n\n def to_coo(self):\n \"\"\"\n Return the contents of the frame as a sparse SciPy COO matrix.\n\n .. versionadded:: 0.20.0\n\n Returns\n -------\n coo_matrix : scipy.sparse.spmatrix\n If the caller is heterogeneous and contains booleans or objects,\n the result will be of dtype=object. See Notes.\n\n Notes\n -----\n The dtype will be the lowest-common-denominator type (implicit\n upcasting); that is to say if the dtypes (even of numeric types)\n are mixed, the one that accommodates all will be chosen.\n\n e.g. If the dtypes are float16 and float32, dtype will be upcast to\n float32. By numpy.find_common_type convention, mixing int64 and\n and uint64 will result in a float64 dtype.\n \"\"\"\n try:\n from scipy.sparse import coo_matrix\n except ImportError:\n raise ImportError('Scipy is not installed')\n\n dtype = find_common_type(self.dtypes)\n cols, rows, datas = [], [], []\n for col, name in enumerate(self):\n s = self[name]\n row = s.sp_index.to_int_index().indices\n cols.append(np.repeat(col, len(row)))\n rows.append(row)\n datas.append(s.sp_values.astype(dtype, copy=False))\n\n cols = np.concatenate(cols)\n rows = np.concatenate(rows)\n datas = np.concatenate(datas)\n return coo_matrix((datas, (rows, cols)), shape=self.shape)\n\n def __array_wrap__(self, result):\n return self._constructor(\n result, index=self.index, columns=self.columns,\n default_kind=self._default_kind,\n default_fill_value=self._default_fill_value).__finalize__(self)\n\n def __getstate__(self):\n # pickling\n return dict(_typ=self._typ, _subtyp=self._subtyp, _data=self._data,\n _default_fill_value=self._default_fill_value,\n _default_kind=self._default_kind)\n\n def _unpickle_sparse_frame_compat(self, state):\n \"\"\" original pickle format \"\"\"\n series, cols, idx, fv, kind = state\n\n if not isinstance(cols, Index): # pragma: no cover\n from pandas.io.pickle import _unpickle_array\n columns = _unpickle_array(cols)\n else:\n columns = cols\n\n if not isinstance(idx, Index): # pragma: no cover\n from pandas.io.pickle import _unpickle_array\n index = _unpickle_array(idx)\n else:\n index = idx\n\n series_dict = DataFrame()\n for col, (sp_index, sp_values) in compat.iteritems(series):\n series_dict[col] = SparseSeries(sp_values, sparse_index=sp_index,\n fill_value=fv)\n\n self._data = to_manager(series_dict, columns, index)\n self._default_fill_value = fv\n self._default_kind = kind\n\n def to_dense(self):\n \"\"\"\n Convert to dense DataFrame\n\n Returns\n -------\n df : DataFrame\n \"\"\"\n data = {k: v.to_dense() for k, v in compat.iteritems(self)}\n return DataFrame(data, index=self.index, columns=self.columns)\n\n def _apply_columns(self, func):\n \"\"\" get new SparseDataFrame applying func to each columns \"\"\"\n\n new_data = {}\n for col, series in compat.iteritems(self):\n new_data[col] = func(series)\n\n return self._constructor(\n data=new_data, index=self.index, columns=self.columns,\n default_fill_value=self.default_fill_value).__finalize__(self)\n\n def astype(self, dtype):\n return self._apply_columns(lambda x: x.astype(dtype))\n\n def copy(self, deep=True):\n \"\"\"\n Make a copy of this SparseDataFrame\n \"\"\"\n result = super(SparseDataFrame, self).copy(deep=deep)\n result._default_fill_value = self._default_fill_value\n result._default_kind = self._default_kind\n return result\n\n @property\n def default_fill_value(self):\n return self._default_fill_value\n\n @property\n def default_kind(self):\n return self._default_kind\n\n @property\n def density(self):\n \"\"\"\n Ratio of non-sparse points to total (dense) data points\n represented in the frame\n \"\"\"\n tot_nonsparse = sum(ser.sp_index.npoints\n for _, ser in compat.iteritems(self))\n tot = len(self.index) * len(self.columns)\n return tot_nonsparse / float(tot)\n\n def fillna(self, value=None, method=None, axis=0, inplace=False,\n limit=None, downcast=None):\n new_self = super(SparseDataFrame,\n self).fillna(value=value, method=method, axis=axis,\n inplace=inplace, limit=limit,\n downcast=downcast)\n if not inplace:\n self = new_self\n\n # set the fill value if we are filling as a scalar with nothing special\n # going on\n if (value is not None and value == value and method is None and\n limit is None):\n self._default_fill_value = value\n\n if not inplace:\n return self\n\n # ----------------------------------------------------------------------\n # Support different internal representation of SparseDataFrame\n\n def _sanitize_column(self, key, value, **kwargs):\n \"\"\"\n Creates a new SparseArray from the input value.\n\n Parameters\n ----------\n key : object\n value : scalar, Series, or array-like\n kwargs : dict\n\n Returns\n -------\n sanitized_column : SparseArray\n\n \"\"\"\n def sp_maker(x, index=None):\n return SparseArray(x, index=index,\n fill_value=self._default_fill_value,\n kind=self._default_kind)\n if isinstance(value, SparseSeries):\n clean = value.reindex(self.index).as_sparse_array(\n fill_value=self._default_fill_value, kind=self._default_kind)\n\n elif isinstance(value, SparseArray):\n if len(value) != len(self.index):\n raise AssertionError('Length of values does not match '\n 'length of index')\n clean = value\n\n elif hasattr(value, '__iter__'):\n if isinstance(value, Series):\n clean = value.reindex(self.index)\n if not isinstance(value, SparseSeries):\n clean = sp_maker(clean)\n else:\n if len(value) != len(self.index):\n raise AssertionError('Length of values does not match '\n 'length of index')\n clean = sp_maker(value)\n\n # Scalar\n else:\n clean = sp_maker(value, self.index)\n\n # always return a SparseArray!\n return clean\n\n def get_value(self, index, col, takeable=False):\n \"\"\"\n Quickly retrieve single value at passed column and index\n\n .. deprecated:: 0.21.0\n\n Please use .at[] or .iat[] accessors.\n\n Parameters\n ----------\n index : row label\n col : column label\n takeable : interpret the index/col as indexers, default False\n\n Returns\n -------\n value : scalar value\n \"\"\"\n warnings.warn(\"get_value is deprecated and will be removed \"\n \"in a future release. Please use \"\n \".at[] or .iat[] accessors instead\", FutureWarning,\n stacklevel=2)\n return self._get_value(index, col, takeable=takeable)\n\n def _get_value(self, index, col, takeable=False):\n if takeable is True:\n series = self._iget_item_cache(col)\n else:\n series = self._get_item_cache(col)\n\n return series._get_value(index, takeable=takeable)\n _get_value.__doc__ = get_value.__doc__\n\n def set_value(self, index, col, value, takeable=False):\n \"\"\"\n Put single value at passed column and index\n\n .. deprecated:: 0.21.0\n\n Please use .at[] or .iat[] accessors.\n\n Parameters\n ----------\n index : row label\n col : column label\n value : scalar value\n takeable : interpret the index/col as indexers, default False\n\n Notes\n -----\n This method *always* returns a new object. It is currently not\n particularly efficient (and potentially very expensive) but is provided\n for API compatibility with DataFrame\n\n Returns\n -------\n frame : DataFrame\n \"\"\"\n warnings.warn(\"set_value is deprecated and will be removed \"\n \"in a future release. Please use \"\n \".at[] or .iat[] accessors instead\", FutureWarning,\n stacklevel=2)\n return self._set_value(index, col, value, takeable=takeable)\n\n def _set_value(self, index, col, value, takeable=False):\n dense = self.to_dense()._set_value(\n index, col, value, takeable=takeable)\n return dense.to_sparse(kind=self._default_kind,\n fill_value=self._default_fill_value)\n _set_value.__doc__ = set_value.__doc__\n\n def _slice(self, slobj, axis=0, kind=None):\n if axis == 0:\n new_index = self.index[slobj]\n new_columns = self.columns\n else:\n new_index = self.index\n new_columns = self.columns[slobj]\n\n return self.reindex(index=new_index, columns=new_columns)\n\n def xs(self, key, axis=0, copy=False):\n \"\"\"\n Returns a row (cross-section) from the SparseDataFrame as a Series\n object.\n\n Parameters\n ----------\n key : some index contained in the index\n\n Returns\n -------\n xs : Series\n \"\"\"\n if axis == 1:\n data = self[key]\n return data\n\n i = self.index.get_loc(key)\n data = self.take([i]).get_values()[0]\n return Series(data, index=self.columns)\n\n # ----------------------------------------------------------------------\n # Arithmetic-related methods\n\n def _combine_frame(self, other, func, fill_value=None, level=None):\n this, other = self.align(other, join='outer', level=level, copy=False)\n new_index, new_columns = this.index, this.columns\n\n if level is not None:\n raise NotImplementedError(\"'level' argument is not supported\")\n\n if self.empty and other.empty:\n return self._constructor(index=new_index).__finalize__(self)\n\n new_data = {}\n if fill_value is not None:\n # TODO: be a bit more intelligent here\n for col in new_columns:\n if col in this and col in other:\n dleft = this[col].to_dense()\n dright = other[col].to_dense()\n result = dleft._binop(dright, func, fill_value=fill_value)\n result = result.to_sparse(fill_value=this[col].fill_value)\n new_data[col] = result\n else:\n\n for col in new_columns:\n if col in this and col in other:\n new_data[col] = func(this[col], other[col])\n\n # if the fill values are the same use them? or use a valid one\n new_fill_value = None\n other_fill_value = getattr(other, 'default_fill_value', np.nan)\n if self.default_fill_value == other_fill_value:\n new_fill_value = self.default_fill_value\n elif np.isnan(self.default_fill_value) and not np.isnan(\n other_fill_value):\n new_fill_value = other_fill_value\n elif not np.isnan(self.default_fill_value) and np.isnan(\n other_fill_value):\n new_fill_value = self.default_fill_value\n\n return self._constructor(data=new_data, index=new_index,\n columns=new_columns,\n default_fill_value=new_fill_value\n ).__finalize__(self)\n\n def _combine_match_index(self, other, func, level=None):\n new_data = {}\n\n if level is not None:\n raise NotImplementedError(\"'level' argument is not supported\")\n\n new_index = self.index.union(other.index)\n this = self\n if self.index is not new_index:\n this = self.reindex(new_index)\n\n if other.index is not new_index:\n other = other.reindex(new_index)\n\n for col, series in compat.iteritems(this):\n new_data[col] = func(series.values, other.values)\n\n # fill_value is a function of our operator\n fill_value = None\n if isna(other.fill_value) or isna(self.default_fill_value):\n fill_value = np.nan\n else:\n fill_value = func(np.float64(self.default_fill_value),\n np.float64(other.fill_value))\n\n return self._constructor(\n new_data, index=new_index, columns=self.columns,\n default_fill_value=fill_value).__finalize__(self)\n\n def _combine_match_columns(self, other, func, level=None, try_cast=True):\n # patched version of DataFrame._combine_match_columns to account for\n # NumPy circumventing __rsub__ with float64 types, e.g.: 3.0 - series,\n # where 3.0 is numpy.float64 and series is a SparseSeries. Still\n # possible for this to happen, which is bothersome\n\n if level is not None:\n raise NotImplementedError(\"'level' argument is not supported\")\n\n new_data = {}\n\n union = intersection = self.columns\n\n if not union.equals(other.index):\n union = other.index.union(self.columns)\n intersection = other.index.intersection(self.columns)\n\n for col in intersection:\n new_data[col] = func(self[col], float(other[col]))\n\n return self._constructor(\n new_data, index=self.index, columns=union,\n default_fill_value=self.default_fill_value).__finalize__(self)\n\n def _combine_const(self, other, func, errors='raise', try_cast=True):\n return self._apply_columns(lambda x: func(x, other))\n\n def _reindex_index(self, index, method, copy, level, fill_value=np.nan,\n limit=None, takeable=False):\n if level is not None:\n raise TypeError('Reindex by level not supported for sparse')\n\n if self.index.equals(index):\n if copy:\n return self.copy()\n else:\n return self\n\n if len(self.index) == 0:\n return self._constructor(\n index=index, columns=self.columns).__finalize__(self)\n\n indexer = self.index.get_indexer(index, method, limit=limit)\n indexer = ensure_platform_int(indexer)\n mask = indexer == -1\n need_mask = mask.any()\n\n new_series = {}\n for col, series in self.iteritems():\n if mask.all():\n continue\n\n values = series.values\n # .take returns SparseArray\n new = values.take(indexer)\n if need_mask:\n new = new.values\n # convert integer to float if necessary. need to do a lot\n # more than that, handle boolean etc also\n new, fill_value = maybe_upcast(new, fill_value=fill_value)\n np.putmask(new, mask, fill_value)\n\n new_series[col] = new\n\n return self._constructor(\n new_series, index=index, columns=self.columns,\n default_fill_value=self._default_fill_value).__finalize__(self)\n\n def _reindex_columns(self, columns, method, copy, level, fill_value=None,\n limit=None, takeable=False):\n if level is not None:\n raise TypeError('Reindex by level not supported for sparse')\n\n if notna(fill_value):\n raise NotImplementedError(\"'fill_value' argument is not supported\")\n\n if limit:\n raise NotImplementedError(\"'limit' argument is not supported\")\n\n if method is not None:\n raise NotImplementedError(\"'method' argument is not supported\")\n\n # TODO: fill value handling\n sdict = {k: v for k, v in compat.iteritems(self) if k in columns}\n return self._constructor(\n sdict, index=self.index, columns=columns,\n default_fill_value=self._default_fill_value).__finalize__(self)\n\n def _reindex_with_indexers(self, reindexers, method=None, fill_value=None,\n limit=None, copy=False, allow_dups=False):\n\n if method is not None or limit is not None:\n raise NotImplementedError(\"cannot reindex with a method or limit \"\n \"with sparse\")\n\n if fill_value is None:\n fill_value = np.nan\n\n reindexers = {self._get_axis_number(a): val\n for (a, val) in compat.iteritems(reindexers)}\n\n index, row_indexer = reindexers.get(0, (None, None))\n columns, col_indexer = reindexers.get(1, (None, None))\n\n if columns is None:\n columns = self.columns\n\n new_arrays = {}\n for col in columns:\n if col not in self:\n continue\n if row_indexer is not None:\n new_arrays[col] = algos.take_1d(self[col].get_values(),\n row_indexer,\n fill_value=fill_value)\n else:\n new_arrays[col] = self[col]\n\n return self._constructor(new_arrays, index=index,\n columns=columns).__finalize__(self)\n\n def _join_compat(self, other, on=None, how='left', lsuffix='', rsuffix='',\n sort=False):\n if on is not None:\n raise NotImplementedError(\"'on' keyword parameter is not yet \"\n \"implemented\")\n return self._join_index(other, how, lsuffix, rsuffix)\n\n def _join_index(self, other, how, lsuffix, rsuffix):\n if isinstance(other, Series):\n if other.name is None:\n raise ValueError('Other Series must have a name')\n\n other = SparseDataFrame(\n {other.name: other},\n default_fill_value=self._default_fill_value)\n\n join_index = self.index.join(other.index, how=how)\n\n this = self.reindex(join_index)\n other = other.reindex(join_index)\n\n this, other = this._maybe_rename_join(other, lsuffix, rsuffix)\n\n from pandas import concat\n return concat([this, other], axis=1, verify_integrity=True)\n\n def _maybe_rename_join(self, other, lsuffix, rsuffix):\n to_rename = self.columns.intersection(other.columns)\n if len(to_rename) > 0:\n if not lsuffix and not rsuffix:\n raise ValueError('columns overlap but no suffix specified: '\n '{to_rename}'.format(to_rename=to_rename))\n\n def lrenamer(x):\n if x in to_rename:\n return '{x}{lsuffix}'.format(x=x, lsuffix=lsuffix)\n return x\n\n def rrenamer(x):\n if x in to_rename:\n return '{x}{rsuffix}'.format(x=x, rsuffix=rsuffix)\n return x\n\n this = self.rename(columns=lrenamer)\n other = other.rename(columns=rrenamer)\n else:\n this = self\n\n return this, other\n\n def transpose(self, *args, **kwargs):\n \"\"\"\n Returns a DataFrame with the rows/columns switched.\n \"\"\"\n nv.validate_transpose(args, kwargs)\n return self._constructor(\n self.values.T, index=self.columns, columns=self.index,\n default_fill_value=self._default_fill_value,\n default_kind=self._default_kind).__finalize__(self)\n\n T = property(transpose)\n\n @Appender(DataFrame.count.__doc__)\n def count(self, axis=0, **kwds):\n if axis is None:\n axis = self._stat_axis_number\n\n return self.apply(lambda x: x.count(), axis=axis)\n\n def cumsum(self, axis=0, *args, **kwargs):\n \"\"\"\n Return SparseDataFrame of cumulative sums over requested axis.\n\n Parameters\n ----------\n axis : {0, 1}\n 0 for row-wise, 1 for column-wise\n\n Returns\n -------\n y : SparseDataFrame\n \"\"\"\n nv.validate_cumsum(args, kwargs)\n\n if axis is None:\n axis = self._stat_axis_number\n\n return self.apply(lambda x: x.cumsum(), axis=axis)\n\n @Appender(generic._shared_docs['isna'] % _shared_doc_kwargs)\n def isna(self):\n return self._apply_columns(lambda x: x.isna())\n isnull = isna\n\n @Appender(generic._shared_docs['notna'] % _shared_doc_kwargs)\n def notna(self):\n return self._apply_columns(lambda x: x.notna())\n notnull = notna\n\n def apply(self, func, axis=0, broadcast=None, reduce=None,\n result_type=None):\n \"\"\"\n Analogous to DataFrame.apply, for SparseDataFrame\n\n Parameters\n ----------\n func : function\n Function to apply to each column\n axis : {0, 1, 'index', 'columns'}\n broadcast : bool, default False\n For aggregation functions, return object of same size with values\n propagated\n\n .. deprecated:: 0.23.0\n This argument will be removed in a future version, replaced\n by result_type='broadcast'.\n\n reduce : boolean or None, default None\n Try to apply reduction procedures. If the DataFrame is empty,\n apply will use reduce to determine whether the result should be a\n Series or a DataFrame. If reduce is None (the default), apply's\n return value will be guessed by calling func an empty Series (note:\n while guessing, exceptions raised by func will be ignored). If\n reduce is True a Series will always be returned, and if False a\n DataFrame will always be returned.\n\n .. deprecated:: 0.23.0\n This argument will be removed in a future version, replaced\n by result_type='reduce'.\n\n result_type : {'expand', 'reduce', 'broadcast, None}\n These only act when axis=1 {columns}:\n\n * 'expand' : list-like results will be turned into columns.\n * 'reduce' : return a Series if possible rather than expanding\n list-like results. This is the opposite to 'expand'.\n * 'broadcast' : results will be broadcast to the original shape\n of the frame, the original index & columns will be retained.\n\n The default behaviour (None) depends on the return value of the\n applied function: list-like results will be returned as a Series\n of those. However if the apply function returns a Series these\n are expanded to columns.\n\n .. versionadded:: 0.23.0\n\n Returns\n -------\n applied : Series or SparseDataFrame\n \"\"\"\n if not len(self.columns):\n return self\n axis = self._get_axis_number(axis)\n\n if isinstance(func, np.ufunc):\n new_series = {}\n for k, v in compat.iteritems(self):\n applied = func(v)\n applied.fill_value = func(v.fill_value)\n new_series[k] = applied\n return self._constructor(\n new_series, index=self.index, columns=self.columns,\n default_fill_value=self._default_fill_value,\n default_kind=self._default_kind).__finalize__(self)\n\n from pandas.core.apply import frame_apply\n op = frame_apply(self,\n func=func,\n axis=axis,\n reduce=reduce,\n broadcast=broadcast,\n result_type=result_type)\n return op.get_result()\n\n def applymap(self, func):\n \"\"\"\n Apply a function to a DataFrame that is intended to operate\n elementwise, i.e. like doing map(func, series) for each series in the\n DataFrame\n\n Parameters\n ----------\n func : function\n Python function, returns a single value from a single value\n\n Returns\n -------\n applied : DataFrame\n \"\"\"\n return self.apply(lambda x: lmap(func, x))\n\n\ndef to_manager(sdf, columns, index):\n \"\"\" create and return the block manager from a dataframe of series,\n columns, index\n \"\"\"\n\n # from BlockManager perspective\n axes = [ensure_index(columns), ensure_index(index)]\n\n return create_block_manager_from_arrays(\n [sdf[c] for c in columns], columns, axes)\n\n\ndef stack_sparse_frame(frame):\n \"\"\"\n Only makes sense when fill_value is NaN\n \"\"\"\n lengths = [s.sp_index.npoints for _, s in compat.iteritems(frame)]\n nobs = sum(lengths)\n\n # this is pretty fast\n minor_labels = np.repeat(np.arange(len(frame.columns)), lengths)\n\n inds_to_concat = []\n vals_to_concat = []\n # TODO: Figure out whether this can be reached.\n # I think this currently can't be reached because you can't build a\n # SparseDataFrame with a non-np.NaN fill value (fails earlier).\n for _, series in compat.iteritems(frame):\n if not np.isnan(series.fill_value):\n raise TypeError('This routine assumes NaN fill value')\n\n int_index = series.sp_index.to_int_index()\n inds_to_concat.append(int_index.indices)\n vals_to_concat.append(series.sp_values)\n\n major_labels = np.concatenate(inds_to_concat)\n stacked_values = np.concatenate(vals_to_concat)\n index = MultiIndex(levels=[frame.index, frame.columns],\n labels=[major_labels, minor_labels],\n verify_integrity=False)\n\n lp = DataFrame(stacked_values.reshape((nobs, 1)), index=index,\n columns=['foo'])\n return lp.sort_index(level=0)\n\n\ndef homogenize(series_dict):\n \"\"\"\n Conform a set of SparseSeries (with NaN fill_value) to a common SparseIndex\n corresponding to the locations where they all have data\n\n Parameters\n ----------\n series_dict : dict or DataFrame\n\n Notes\n -----\n Using the dumbest algorithm I could think of. Should put some more thought\n into this\n\n Returns\n -------\n homogenized : dict of SparseSeries\n \"\"\"\n index = None\n\n need_reindex = False\n\n for _, series in compat.iteritems(series_dict):\n if not np.isnan(series.fill_value):\n raise TypeError('this method is only valid with NaN fill values')\n\n if index is None:\n index = series.sp_index\n elif not series.sp_index.equals(index):\n need_reindex = True\n index = index.intersect(series.sp_index)\n\n if need_reindex:\n output = {}\n for name, series in compat.iteritems(series_dict):\n if not series.sp_index.equals(index):\n series = series.sparse_reindex(index)\n\n output[name] = series\n else:\n output = series_dict\n\n return output\n\n\n# use unaccelerated ops for sparse objects\nops.add_flex_arithmetic_methods(SparseDataFrame)\nops.add_special_arithmetic_methods(SparseDataFrame)\n"
] | [
[
"pandas.compat.iteritems",
"pandas.core.index.ensure_index",
"pandas.core.internals.create_block_manager_from_arrays",
"pandas.core.frame._prep_ndarray",
"pandas.core.sparse.series.SparseArray",
"pandas.core.dtypes.common.is_scipy_sparse",
"numpy.float64",
"numpy.concatenate",
"pandas.core.index.MultiIndex",
"pandas.io.pickle._unpickle_array",
"pandas.core.dtypes.missing.notna",
"numpy.isnan",
"pandas.core.indexes.base.default_index",
"pandas.core.generic.NDFrame.__init__",
"pandas.compat.numpy.function.validate_cumsum",
"pandas.core.apply.frame_apply",
"pandas.core.ops.add_special_arithmetic_methods",
"pandas._libs.sparse.BlockIndex",
"pandas.compat.numpy.function.validate_transpose",
"pandas.core.common._dict_keys_to_ordered_list",
"scipy.sparse.coo_matrix",
"pandas.concat",
"numpy.putmask",
"pandas.core.dtypes.cast.find_common_type",
"pandas.core.dtypes.common.ensure_platform_int",
"pandas.core.dtypes.missing.isna",
"pandas.core.ops.add_flex_arithmetic_methods",
"pandas.util._decorators.Appender",
"pandas.core.series.Series",
"pandas.compat.lmap",
"pandas.core.index.Index",
"pandas.core.dtypes.cast.maybe_upcast",
"pandas._libs.sparse.get_blocks",
"pandas.core.sparse.series.SparseSeries",
"pandas.core.frame.DataFrame"
]
] |
jamesdu0504/760GroupProject | [
"dd870b3af7958fb2088c627ab02c781412b2a20f"
] | [
"dataset_characteristics.py"
] | [
"import datasets.import_datasets as im\nimport pandas as pd\n\n#Takes a very long time to run, probably not worth running when the output \n\ndatasets = [\"BMS1\", \n \"BMS2\", \n \"toydata\"\n \"uci_retail\",\n \"mushroom\", \n \"Belgian_retail\",\n \"chess\", \n \"connect\", \n \"mushroom\", \n \"pumsb\", \n \"pumsb_star\", \n \"T40I10D100K\", \n \"T10I4D100K\", \n \"accidents\", \n \"instacart\"]\n\ndef main(datasets):\n df = pd.DataFrame(columns=['Dataset Name',\n 'Number of transactions',\n 'Number of Unique items',\n 'Minimum Transaction Length',\n 'Maximum Transaction Length',\n 'Average Transaction Length'])\n\n for dataset_name in datasets:\n print(\"Analysing\", dataset_name)\n data = im.import_dataset(dataset_name)\n \n data = data.astype('bool')\n\n average = 0\n minimum = 100000\n maximum = 0\n for _, row in data.iterrows():\n transaction_len = sum(row)\n #Minimum transaction length\n if minimum > transaction_len:\n minimum = transaction_len\n\n #Maximum transaction length\n if maximum < transaction_len:\n maximum = transaction_len\n \n #Average transaction length\n average += transaction_len\n\n new_row = {'Dataset Name':dataset_name,\n 'Number of transactions':data.shape[0],\n 'Number of Unique items':data.shape[1],\n 'Minimum Transaction Length':minimum,\n 'Maximum Transaction Length':maximum,\n 'Average Transaction Length':average/data.shape[0]\n }\n\n df = df.append(new_row, ignore_index=True)\n\n print(df)\n return df\n\nmain(datasets).to_csv('Dataset_details.csv')"
] | [
[
"pandas.DataFrame"
]
] |
gaozhangyang/DecST | [
"116ce9efa28a07793900d09345abab4cb512db98"
] | [
"ex_ablation/exp_conv.py"
] | [
"\nimport sys; sys.path.append('..')\nfrom API.tools import EarlyStopping\nfrom API.exp_basic import Exp_Basic\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\nfrom torch import optim\nfrom ex_ablation.model import ConvUnet\nfrom API.dataloader import load_data\nimport json\n\nimport os\nimport time\nimport logging\nfrom tqdm import tqdm\nfrom API.metrics import metric\nfrom torch.optim.lr_scheduler import ReduceLROnPlateau\nimport nni\n\nimport warnings\nwarnings.filterwarnings('ignore')\n\nclass Exp_Traffic(Exp_Basic):\n def __init__(self, args):\n super(Exp_Traffic, self).__init__(args)\n self.path = args.res_dir+'/{}'.format(args.ex_name)\n if not os.path.exists(self.path):\n os.makedirs(self.path)\n\n self.checkpoints_path = os.path.join(self.path, 'checkpoints')\n if not os.path.exists(self.checkpoints_path):\n os.makedirs(self.checkpoints_path)\n\n sv_param = os.path.join(self.path, 'model_param.json')\n with open(sv_param, 'w') as file_obj:\n json.dump(args.__dict__, file_obj)\n \n for handler in logging.root.handlers[:]:\n logging.root.removeHandler(handler)\n logging.basicConfig(level=logging.INFO,#控制台打印的日志级别\n filename=self.path+'/log.log',#'log/{}_{}_{}.log'.format(args.gcn_type,args.graph_type,args.order_list)\n filemode='a',##模式,有w和a,w就是写模式,每次都会重新写日志,覆盖之前的日志\n #a是追加模式,默认如果不写的话,就是追加模式\n format='%(asctime)s - %(message)s'#日志格式\n )\n \n self._get_data()\n\n self._select_optimizer()\n if self.args.epoch_s>0:\n self._load(self.args.epoch_s-1)\n \n def _build_model(self):\n from ast import literal_eval as make_tuple\n in_shape = tuple(self.args.in_shape)\n # logging.info('{}'.format(self.args.in_shape))\n model = ConvUnet(self.args,self.args.dataname,in_shape,self.args.hidC,self.args.hidT)\n return model\n\n def _get_data(self):\n config = self.args.__dict__\n\n self.train_loader, self.vali_loader, self.test_loader, self.data_mean, self.data_std = load_data(config['dataname'],config['batch_size'], config['val_batch_size'], config['data_root'],require_back=True)\n if self.vali_loader is None:\n self.vali_loader = self.test_loader\n\n def _select_optimizer(self):\n self.model_optim = optim.Adam(self.model.parameters(), lr=self.args.lr)\n self.scheduler = ReduceLROnPlateau(self.model_optim, mode='min', patience=3,factor=0.8,verbose=True)\n return self.model_optim\n \n def _adjust_learning_rate(self,optimizer,epoch,args):\n lr_adjust = {epoch: args.lr * (0.5 ** ((epoch-1) // 2))}\n\n if epoch in lr_adjust.keys():\n lr = lr_adjust[epoch]\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr\n print('Updating learning rate to {}'.format(lr))\n \n def _select_criterion(self):\n criterion = nn.MSELoss()\n return criterion\n \n def _save(self,epoch):\n torch.save(self.model.state_dict(), os.path.join(self.checkpoints_path, str(epoch) + '.pth'))\n state=self.scheduler.state_dict()\n with open(os.path.join(self.checkpoints_path, str(epoch) + '.json'), 'w') as file_obj:\n json.dump(state, file_obj)\n \n def _load(self,epoch):\n self.model.load_state_dict(torch.load(os.path.join(self.checkpoints_path, str(epoch) + '.pth')))\n state = json.load(open(self.checkpoints_path+'/'+str(epoch) + '.json','r'))\n self.scheduler.load_state_dict(state)\n\n def vali(self, vali_loader, criterion, name,epoch):\n self.model.eval()\n preds=[]\n trues=[]\n total_loss = []\n vali_pbar = tqdm(vali_loader)\n for i, (batch_x,batch_y,background) in enumerate(vali_pbar):\n batch_x = batch_x.to(self.device)\n batch_y = batch_y\n background = background.float().to(self.device)\n\n # pred_y, pred_b = self.model(batch_x,background)\n pred_y = self.model(batch_x,background)\n true = batch_y.detach().cpu()\n pred_y = pred_y.detach().cpu()\n loss = criterion(pred_y, true)\n vali_pbar.set_description('vali loss: {:.4f}'.format(loss.item()))\n total_loss.append(loss)\n\n preds.append(pred_y.numpy())\n trues.append(true.numpy())\n if i*batch_x.shape[0]>500:\n break\n\n total_loss = np.average(total_loss)\n\n preds = np.concatenate(preds,axis=0)\n trues = np.concatenate(trues,axis=0)\n mae, mse, rmse, mape, mspe = metric(preds, trues,vali_loader.dataset.mean,vali_loader.dataset.std)\n print('{}\\tmse:{}, mae:{}, rmse:{}, mape:{} ,mspe:{}'.format(name,mse, mae, rmse, mape, mspe ))\n logging.info('{}\\tmse:{}, mae:{}, rmse:{}, mape:{} ,mspe:{}'.format(name,mse, mae, rmse, mape, mspe ))\n self.model.train()\n\n if name == 'vali':\n nni.report_intermediate_result(mse)\n\n return total_loss\n \n\n def train(self, args):\n config = args.__dict__\n time_now = time.time()\n \n train_steps = len(self.train_loader)\n early_stopping = EarlyStopping(patience=self.args.patience, verbose=True)\n \n model_optim = self._select_optimizer()\n criterion = self._select_criterion()\n\n for epoch in range(config['epoch_s'], config['epoch_e']):\n iter_count = 0\n train_loss = []\n \n self.model.train()\n train_pbar = tqdm(self.train_loader)\n i=0\n for batch_x,batch_y,background in train_pbar:\n iter_count += 1\n \n model_optim.zero_grad()\n batch_x = batch_x.to(self.device) # [32,12,3,32,64]\n batch_y = batch_y.to(self.device) # [32,12,3,32,64]\n background = background.float().to(self.device)\n\n # pred_y, pred_b = self.model(batch_x,background)\n # loss = criterion(pred_y, batch_y)+criterion(pred_b, background)\n\n pred_y = self.model(batch_x,background)\n loss = criterion(pred_y, batch_y)\n train_loss.append(loss.item())\n train_pbar.set_description('train loss: {:.4f}'.format(loss.item()))\n \n loss.backward()\n model_optim.step()\n i+=1\n\n train_loss = np.average(train_loss)\n if epoch % args.log_step == 0:\n self._save(epoch)\n vali_loss = self.vali(self.vali_loader, criterion,'vali',epoch)\n test_loss = self.vali(self.test_loader, criterion,'test',epoch)\n self.scheduler.step(test_loss)\n # nni.report_intermediate_result(test_loss)\n\n\n print(\"Epoch: {0}, Steps: {1} | Train Loss: {2:.7f} Vali Loss: {3:.7f} Test Loss: {4:.7f}\\n\".format(\n epoch + 1, train_steps, train_loss, vali_loss, test_loss))\n logging.info(\"Epoch: {0}, Steps: {1} | Train Loss: {2:.7f} Vali Loss: {3:.7f} Test Loss: {4:.7f}\\n\".format(\n epoch + 1, train_steps, train_loss, vali_loss, test_loss))\n early_stopping(vali_loss, self.model, self.path)\n\n if early_stopping.early_stop:\n print(\"Early stopping\")\n logging.info(\"Early stopping\")\n break\n \n best_model_path = self.path+'/'+'checkpoint.pth'\n self.model.load_state_dict(torch.load(best_model_path))\n return self.model\n\n def test(self,args):\n self.model.eval()\n preds = []\n trues = []\n \n for batch_x,batch_y,background in self.test_loader:\n batch_x = batch_x.to(self.device)\n batch_y = batch_y\n background = background.to(self.device)\n\n pred_y = self.model(batch_x,background)#.squeeze()\n pred_y = pred_y.detach().cpu()\n true = batch_y.detach().cpu().numpy()#.squeeze()\n \n preds.append(pred_y)\n trues.append(true)\n\n preds = np.concatenate(preds,axis=0)\n trues = np.concatenate(trues,axis=0)\n print('test shape:', preds.shape, trues.shape)\n logging.info('test shape:{}-{}'.format(preds.shape, trues.shape))\n\n # result save\n folder_path = self.path+'/results/{}/sv/'.format(args.ex_name)\n if not os.path.exists(folder_path):\n os.makedirs(folder_path)\n\n mae, mse, rmse, mape, mspe = metric(preds, trues,self.test_loader.dataset.mean,self.test_loader.dataset.std)\n print('mse:{}, mae:{}'.format(mse, mae))\n logging.info('mse:{}, mae:{}'.format(mse, mae))\n\n np.save(folder_path+'metrics.npy', np.array([mae, mse, rmse, mape, mspe]))\n np.save(folder_path+'pred.npy', preds)\n np.save(folder_path+'true.npy', trues)\n return mse"
] | [
[
"numpy.save",
"torch.nn.MSELoss",
"torch.load",
"numpy.array",
"torch.optim.lr_scheduler.ReduceLROnPlateau",
"numpy.concatenate",
"numpy.average"
]
] |
cvitolo/DataScienceVM | [
"97e1b780de572266dcdab89d443af55d5b930f42"
] | [
"Tutorials/MLADS-spring-2018/CNTK_distributed/CNTK_distributed.py"
] | [
"import numpy as np\nimport os\nimport sys\nimport cntk\nfrom cntk.layers import Convolution2D, MaxPooling, Dense, Dropout\nfrom utils import *\nimport argparse\nfrom cntk.train.distributed import Communicator, mpi_communicator\n\n# Hyperparams\nEPOCHS = 1\nBATCHSIZE = 64 * 4\nLR = 0.01\nMOMENTUM = 0.9\nN_CLASSES = 10\n\ndef create_basic_model(input, out_dims):\n with cntk.layers.default_options(init=cntk.glorot_uniform(), activation=cntk.relu):\n net = cntk.layers.Convolution((5,5), 32, pad=True)(input)\n net = cntk.layers.MaxPooling((3,3), strides=(2,2))(net)\n\n net = cntk.layers.Convolution((5,5), 32, pad=True)(net)\n net = cntk.layers.MaxPooling((3,3), strides=(2,2))(net)\n\n net = cntk.layers.Convolution((5,5), 64, pad=True)(net)\n net = cntk.layers.MaxPooling((3,3), strides=(2,2))(net)\n \n net = cntk.layers.Dense(64)(net)\n net = cntk.layers.Dense(out_dims, activation=None)(net)\n \n return net\n \ndef init_model(m):\n progress_writers = [cntk.logging.ProgressPrinter(\n freq=int(BATCHSIZE / 2),\n rank=cntk.train.distributed.Communicator.rank(),\n num_epochs=EPOCHS)]\n\n # Loss (dense labels); check if support for sparse labels\n loss = cntk.cross_entropy_with_softmax(m, labels)\n # Momentum SGD\n # https://github.com/Microsoft/CNTK/blob/master/Manual/Manual_How_to_use_learners.ipynb\n # unit_gain=False: momentum_direction = momentum*old_momentum_direction + gradient\n # if unit_gain=True then ...(1-momentum)*gradient\n local_learner = cntk.momentum_sgd(m.parameters,\n lr=cntk.learning_rate_schedule(LR, cntk.UnitType.minibatch) ,\n momentum=cntk.momentum_schedule(MOMENTUM),\n unit_gain=False)\n\n distributed_learner = cntk.train.distributed.data_parallel_distributed_learner(local_learner)\n\n trainer = cntk.Trainer(m, (loss, cntk.classification_error(m, labels)), [distributed_learner], progress_writers)\n\n return trainer, distributed_learner\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--input_dir')\n#parser.add_argument('--output_dir')\n\nprint(sys.argv)\n\nargs = parser.parse_args()\n\n# Data into format for library\nx_train, x_test, y_train, y_test = cifar_for_library(download_dir=args.input_dir, channel_first=True, one_hot=True)\n# CNTK format\ny_train = y_train.astype(np.float32)\ny_test = y_test.astype(np.float32)\nprint(x_train.shape, x_test.shape, y_train.shape, y_test.shape)\nprint(x_train.dtype, x_test.dtype, y_train.dtype, y_test.dtype)\n\n# Placeholders\nfeatures = cntk.input_variable((3, 32, 32), np.float32)\nlabels = cntk.input_variable(N_CLASSES, np.float32)\n# Load symbol\nsym = create_basic_model(features, N_CLASSES)\n\ndef save_model(model, learner, file_name):\n if learner.communicator().is_main():\n model.save(file_name)\n\ntrainer, learner = init_model(sym)\n\nfor j in range(EPOCHS):\n for data, label in yield_mb(x_train, y_train, BATCHSIZE, shuffle=True):\n trainer.train_minibatch({features: data, labels: label})\n # Log (this is just last batch in epoch, not average of batches)\n eval_error = trainer.previous_minibatch_evaluation_average\n print(\"Epoch %d | Accuracy: %.6f\" % (j+1, (1-eval_error)))\n \nz = cntk.softmax(sym)\n\nsave_model(sym, learner, \"{}/cifar_final.model\".format(args.input_dir))\n\nn_samples = (y_test.shape[0]//BATCHSIZE)*BATCHSIZE\ny_guess = np.zeros(n_samples, dtype=np.int)\ny_truth = np.argmax(y_test[:n_samples], axis=-1)\nc = 0\nfor data, label in yield_mb(x_test, y_test, BATCHSIZE):\n predicted_label_probs = z.eval({features : data})\n y_guess[c*BATCHSIZE:(c+1)*BATCHSIZE] = np.argmax(predicted_label_probs, axis=-1)\n c += 1\n\nprint(\"Accuracy: \", sum(y_guess == y_truth)/len(y_guess))\n\ncntk.train.distributed.Communicator.finalize()\n"
] | [
[
"numpy.argmax",
"numpy.zeros"
]
] |
jjjjohnson/OpenTransformer | [
"9a6371095ee83896d886addf55bda3a42c3918f6"
] | [
"otrans/encoder/transformer.py"
] | [
"# File : transformer.py\n# Author : Zhengkun Tian\n# Email : zhengkun.tian@outlook.com\n\nimport logging\nimport torch\nimport torch.nn as nn\nfrom otrans.module.pos import MixedPositionalEncoding, RelPositionalEncoding\nfrom otrans.module.ffn import PositionwiseFeedForward\nfrom otrans.module.attention import MultiHeadedSelfAttention, MultiHeadedSelfAttentionWithRelPos\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass TransformerEncoderLayer(nn.Module):\n def __init__(self, n_heads, d_model, d_ff, slf_attn_dropout, ffn_dropout, residual_dropout,\n normalize_before=False, concat_after=False, relative_positional=False, activation='relu'):\n super(TransformerEncoderLayer, self).__init__()\n\n self.relative_positional = relative_positional\n\n if self.relative_positional:\n self.slf_attn = MultiHeadedSelfAttentionWithRelPos(n_heads, d_model, slf_attn_dropout)\n else:\n self.slf_attn = MultiHeadedSelfAttention(n_heads, d_model, slf_attn_dropout)\n self.feed_forward = PositionwiseFeedForward(d_model, d_ff, ffn_dropout, activation)\n\n self.norm1 = nn.LayerNorm(d_model)\n self.norm2 = nn.LayerNorm(d_model)\n\n self.dropout1 = nn.Dropout(residual_dropout)\n self.dropout2 = nn.Dropout(residual_dropout)\n\n self.normalize_before = normalize_before\n self.concat_after = concat_after\n\n if self.concat_after:\n self.concat_linear = nn.Linear(d_model * 2, d_model)\n\n def forward(self, x, mask, pos=None):\n if self.normalize_before:\n x = self.norm1(x)\n residual = x\n\n if self.relative_positional:\n slf_attn_out, slf_attn_weights = self.slf_attn(x, mask, pos)\n else:\n slf_attn_out, slf_attn_weights = self.slf_attn(x, mask)\n \n if self.concat_after:\n x = residual + self.concat_linear(torch.cat((x, slf_attn_out), dim=-1))\n else:\n x = residual + self.dropout1(slf_attn_out)\n if not self.normalize_before:\n x = self.norm1(x)\n\n if self.normalize_before:\n x = self.norm2(x)\n residual = x\n x = residual + self.dropout2(self.feed_forward(x))\n if not self.normalize_before:\n x = self.norm2(x)\n\n return x, {'slf_attn_weights': slf_attn_weights}\n\n def inference(self, x, mask, pos=None, cache=None):\n if self.normalize_before:\n x = self.norm1(x)\n residual = x\n if self.relative_positional:\n slf_attn_out, slf_attn_weights, new_cache = self.slf_attn.inference(x, mask, cache, pos)\n else:\n slf_attn_out, slf_attn_weights, new_cache = self.slf_attn.inference(x, mask, cache)\n\n if self.concat_after:\n x = residual + self.concat_linear(torch.cat((x, slf_attn_out), dim=-1))\n else:\n x = residual + slf_attn_out\n if not self.normalize_before:\n x = self.norm1(x)\n\n if self.normalize_before:\n x = self.norm2(x)\n residual = x\n x = residual + self.feed_forward(x)\n if not self.normalize_before:\n x = self.norm2(x)\n\n return x, new_cache, {'slf_attn_weights': slf_attn_weights}\n\n\nclass TransformerEncoder(nn.Module):\n def __init__(self, d_model=256, n_heads=4, d_ff=2048, n_blocks=6, pos_dropout=0.0, \n slf_attn_dropout=0.0, ffn_dropout=0.0, residual_dropout=0.1, normalize_before=False,\n concat_after=False, relative_positional=False, activation='relu'):\n super(TransformerEncoder, self).__init__()\n\n self.normalize_before = normalize_before\n self.relative_positional = relative_positional\n\n if self.relative_positional:\n self.pos_emb = RelPositionalEncoding(d_model, pos_dropout)\n else:\n self.pos_emb = MixedPositionalEncoding(d_model, pos_dropout)\n\n self.blocks = nn.ModuleList([\n TransformerEncoderLayer(\n n_heads, d_model, d_ff, slf_attn_dropout, ffn_dropout,\n residual_dropout=residual_dropout, normalize_before=normalize_before,\n concat_after=concat_after, relative_positional=relative_positional, activation=activation) for _ in range(n_blocks)\n ])\n\n if self.normalize_before:\n self.norm = nn.LayerNorm(d_model)\n\n def forward(self, inputs, mask):\n \n enc_output, pos = self.pos_emb(inputs)\n\n enc_output.masked_fill_(~mask.unsqueeze(2), 0.0)\n\n attn_weights = {}\n for i, block in enumerate(self.blocks):\n enc_output, attn_weight = block(enc_output, mask.unsqueeze(1), pos)\n attn_weights['enc_block_%d' % i] = attn_weight\n\n if self.normalize_before:\n enc_output = self.norm(enc_output)\n\n return enc_output, mask, attn_weights\n\n\n def inference(self, inputs, mask, cache=None):\n \n enc_output, pos = self.pos_emb.inference(inputs)\n\n enc_output.masked_fill_(~mask.unsqueeze(2), 0.0)\n\n attn_weights = {}\n new_caches = []\n for i, block in enumerate(self.blocks):\n enc_output, new_cache, attn_weight = block.inference(enc_output, mask.unsqueeze(1), pos, cache)\n attn_weights['enc_block_%d' % i] = attn_weight\n new_caches.append(new_cache)\n\n if self.normalize_before:\n enc_output = self.norm(enc_output)\n\n return enc_output, mask, new_caches, attn_weights\n\n"
] | [
[
"torch.nn.LayerNorm",
"torch.nn.Linear",
"torch.nn.Dropout",
"torch.cat"
]
] |
haruiz/models | [
"4dfcf48f7e15646dca2089a0e9f583d24661924c"
] | [
"research/object_detection/utils/visualization_utils.py"
] | [
"# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"A set of functions that are used for visualization.\n\nThese functions often receive an image, perform some visualization on the image.\nThe functions do not return a value, instead they modify the image itself.\n\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport abc\nimport collections\n# Set headless-friendly backend.\n#import matplotlib; matplotlib.use('Agg') # pylint: disable=multiple-statements\nimport matplotlib.pyplot as plt # pylint: disable=g-import-not-at-top\nimport numpy as np\nimport PIL.Image as Image\nimport PIL.ImageColor as ImageColor\nimport PIL.ImageDraw as ImageDraw\nimport PIL.ImageFont as ImageFont\nimport six\nfrom six.moves import range\nfrom six.moves import zip\nimport tensorflow.compat.v1 as tf\n\nfrom object_detection.core import keypoint_ops\nfrom object_detection.core import standard_fields as fields\nfrom object_detection.utils import shape_utils\n\n_TITLE_LEFT_MARGIN = 10\n_TITLE_TOP_MARGIN = 10\nSTANDARD_COLORS = [\n 'AliceBlue', 'Chartreuse', 'Aqua', 'Aquamarine', 'Azure', 'Beige', 'Bisque',\n 'BlanchedAlmond', 'BlueViolet', 'BurlyWood', 'CadetBlue', 'AntiqueWhite',\n 'Chocolate', 'Coral', 'CornflowerBlue', 'Cornsilk', 'Crimson', 'Cyan',\n 'DarkCyan', 'DarkGoldenRod', 'DarkGrey', 'DarkKhaki', 'DarkOrange',\n 'DarkOrchid', 'DarkSalmon', 'DarkSeaGreen', 'DarkTurquoise', 'DarkViolet',\n 'DeepPink', 'DeepSkyBlue', 'DodgerBlue', 'FireBrick', 'FloralWhite',\n 'ForestGreen', 'Fuchsia', 'Gainsboro', 'GhostWhite', 'Gold', 'GoldenRod',\n 'Salmon', 'Tan', 'HoneyDew', 'HotPink', 'IndianRed', 'Ivory', 'Khaki',\n 'Lavender', 'LavenderBlush', 'LawnGreen', 'LemonChiffon', 'LightBlue',\n 'LightCoral', 'LightCyan', 'LightGoldenRodYellow', 'LightGray', 'LightGrey',\n 'LightGreen', 'LightPink', 'LightSalmon', 'LightSeaGreen', 'LightSkyBlue',\n 'LightSlateGray', 'LightSlateGrey', 'LightSteelBlue', 'LightYellow', 'Lime',\n 'LimeGreen', 'Linen', 'Magenta', 'MediumAquaMarine', 'MediumOrchid',\n 'MediumPurple', 'MediumSeaGreen', 'MediumSlateBlue', 'MediumSpringGreen',\n 'MediumTurquoise', 'MediumVioletRed', 'MintCream', 'MistyRose', 'Moccasin',\n 'NavajoWhite', 'OldLace', 'Olive', 'OliveDrab', 'Orange', 'OrangeRed',\n 'Orchid', 'PaleGoldenRod', 'PaleGreen', 'PaleTurquoise', 'PaleVioletRed',\n 'PapayaWhip', 'PeachPuff', 'Peru', 'Pink', 'Plum', 'PowderBlue', 'Purple',\n 'Red', 'RosyBrown', 'RoyalBlue', 'SaddleBrown', 'Green', 'SandyBrown',\n 'SeaGreen', 'SeaShell', 'Sienna', 'Silver', 'SkyBlue', 'SlateBlue',\n 'SlateGray', 'SlateGrey', 'Snow', 'SpringGreen', 'SteelBlue', 'GreenYellow',\n 'Teal', 'Thistle', 'Tomato', 'Turquoise', 'Violet', 'Wheat', 'White',\n 'WhiteSmoke', 'Yellow', 'YellowGreen'\n]\n\n\ndef _get_multiplier_for_color_randomness():\n \"\"\"Returns a multiplier to get semi-random colors from successive indices.\n\n This function computes a prime number, p, in the range [2, 17] that:\n - is closest to len(STANDARD_COLORS) / 10\n - does not divide len(STANDARD_COLORS)\n\n If no prime numbers in that range satisfy the constraints, p is returned as 1.\n\n Once p is established, it can be used as a multiplier to select\n non-consecutive colors from STANDARD_COLORS:\n colors = [(p * i) % len(STANDARD_COLORS) for i in range(20)]\n \"\"\"\n num_colors = len(STANDARD_COLORS)\n prime_candidates = [5, 7, 11, 13, 17]\n\n # Remove all prime candidates that divide the number of colors.\n prime_candidates = [p for p in prime_candidates if num_colors % p]\n if not prime_candidates:\n return 1\n\n # Return the closest prime number to num_colors / 10.\n abs_distance = [np.abs(num_colors / 10. - p) for p in prime_candidates]\n num_candidates = len(abs_distance)\n inds = [i for _, i in sorted(zip(abs_distance, range(num_candidates)))]\n return prime_candidates[inds[0]]\n\n\ndef save_image_array_as_png(image, output_path):\n \"\"\"Saves an image (represented as a numpy array) to PNG.\n\n Args:\n image: a numpy array with shape [height, width, 3].\n output_path: path to which image should be written.\n \"\"\"\n image_pil = Image.fromarray(np.uint8(image)).convert('RGB')\n with tf.gfile.Open(output_path, 'w') as fid:\n image_pil.save(fid, 'PNG')\n\n\ndef encode_image_array_as_png_str(image):\n \"\"\"Encodes a numpy array into a PNG string.\n\n Args:\n image: a numpy array with shape [height, width, 3].\n\n Returns:\n PNG encoded image string.\n \"\"\"\n image_pil = Image.fromarray(np.uint8(image))\n output = six.BytesIO()\n image_pil.save(output, format='PNG')\n png_string = output.getvalue()\n output.close()\n return png_string\n\n\ndef draw_bounding_box_on_image_array(image,\n ymin,\n xmin,\n ymax,\n xmax,\n color='red',\n thickness=4,\n display_str_list=(),\n use_normalized_coordinates=True):\n \"\"\"Adds a bounding box to an image (numpy array).\n\n Bounding box coordinates can be specified in either absolute (pixel) or\n normalized coordinates by setting the use_normalized_coordinates argument.\n\n Args:\n image: a numpy array with shape [height, width, 3].\n ymin: ymin of bounding box.\n xmin: xmin of bounding box.\n ymax: ymax of bounding box.\n xmax: xmax of bounding box.\n color: color to draw bounding box. Default is red.\n thickness: line thickness. Default value is 4.\n display_str_list: list of strings to display in box\n (each to be shown on its own line).\n use_normalized_coordinates: If True (default), treat coordinates\n ymin, xmin, ymax, xmax as relative to the image. Otherwise treat\n coordinates as absolute.\n \"\"\"\n image_pil = Image.fromarray(np.uint8(image)).convert('RGB')\n draw_bounding_box_on_image(image_pil, ymin, xmin, ymax, xmax, color,\n thickness, display_str_list,\n use_normalized_coordinates)\n np.copyto(image, np.array(image_pil))\n\n\ndef draw_bounding_box_on_image(image,\n ymin,\n xmin,\n ymax,\n xmax,\n color='red',\n thickness=4,\n display_str_list=(),\n use_normalized_coordinates=True):\n \"\"\"Adds a bounding box to an image.\n\n Bounding box coordinates can be specified in either absolute (pixel) or\n normalized coordinates by setting the use_normalized_coordinates argument.\n\n Each string in display_str_list is displayed on a separate line above the\n bounding box in black text on a rectangle filled with the input 'color'.\n If the top of the bounding box extends to the edge of the image, the strings\n are displayed below the bounding box.\n\n Args:\n image: a PIL.Image object.\n ymin: ymin of bounding box.\n xmin: xmin of bounding box.\n ymax: ymax of bounding box.\n xmax: xmax of bounding box.\n color: color to draw bounding box. Default is red.\n thickness: line thickness. Default value is 4.\n display_str_list: list of strings to display in box\n (each to be shown on its own line).\n use_normalized_coordinates: If True (default), treat coordinates\n ymin, xmin, ymax, xmax as relative to the image. Otherwise treat\n coordinates as absolute.\n \"\"\"\n draw = ImageDraw.Draw(image)\n im_width, im_height = image.size\n if use_normalized_coordinates:\n (left, right, top, bottom) = (xmin * im_width, xmax * im_width,\n ymin * im_height, ymax * im_height)\n else:\n (left, right, top, bottom) = (xmin, xmax, ymin, ymax)\n if thickness > 0:\n draw.line([(left, top), (left, bottom), (right, bottom), (right, top),\n (left, top)],\n width=thickness,\n fill=color)\n try:\n font = ImageFont.truetype('arial.ttf', 24)\n except IOError:\n font = ImageFont.load_default()\n\n # If the total height of the display strings added to the top of the bounding\n # box exceeds the top of the image, stack the strings below the bounding box\n # instead of above.\n display_str_heights = [font.getsize(ds)[1] for ds in display_str_list]\n # Each display_str has a top and bottom margin of 0.05x.\n total_display_str_height = (1 + 2 * 0.05) * sum(display_str_heights)\n\n if top > total_display_str_height:\n text_bottom = top\n else:\n text_bottom = bottom + total_display_str_height\n # Reverse list and print from bottom to top.\n for display_str in display_str_list[::-1]:\n text_width, text_height = font.getsize(display_str)\n margin = np.ceil(0.05 * text_height)\n draw.rectangle(\n [(left, text_bottom - text_height - 2 * margin), (left + text_width,\n text_bottom)],\n fill=color)\n draw.text(\n (left + margin, text_bottom - text_height - margin),\n display_str,\n fill='black',\n font=font)\n text_bottom -= text_height - 2 * margin\n\n\ndef draw_bounding_boxes_on_image_array(image,\n boxes,\n color='red',\n thickness=4,\n display_str_list_list=()):\n \"\"\"Draws bounding boxes on image (numpy array).\n\n Args:\n image: a numpy array object.\n boxes: a 2 dimensional numpy array of [N, 4]: (ymin, xmin, ymax, xmax).\n The coordinates are in normalized format between [0, 1].\n color: color to draw bounding box. Default is red.\n thickness: line thickness. Default value is 4.\n display_str_list_list: list of list of strings.\n a list of strings for each bounding box.\n The reason to pass a list of strings for a\n bounding box is that it might contain\n multiple labels.\n\n Raises:\n ValueError: if boxes is not a [N, 4] array\n \"\"\"\n image_pil = Image.fromarray(image)\n draw_bounding_boxes_on_image(image_pil, boxes, color, thickness,\n display_str_list_list)\n np.copyto(image, np.array(image_pil))\n\n\ndef draw_bounding_boxes_on_image(image,\n boxes,\n color='red',\n thickness=4,\n display_str_list_list=()):\n \"\"\"Draws bounding boxes on image.\n\n Args:\n image: a PIL.Image object.\n boxes: a 2 dimensional numpy array of [N, 4]: (ymin, xmin, ymax, xmax).\n The coordinates are in normalized format between [0, 1].\n color: color to draw bounding box. Default is red.\n thickness: line thickness. Default value is 4.\n display_str_list_list: list of list of strings.\n a list of strings for each bounding box.\n The reason to pass a list of strings for a\n bounding box is that it might contain\n multiple labels.\n\n Raises:\n ValueError: if boxes is not a [N, 4] array\n \"\"\"\n boxes_shape = boxes.shape\n if not boxes_shape:\n return\n if len(boxes_shape) != 2 or boxes_shape[1] != 4:\n raise ValueError('Input must be of size [N, 4]')\n for i in range(boxes_shape[0]):\n display_str_list = ()\n if display_str_list_list:\n display_str_list = display_str_list_list[i]\n draw_bounding_box_on_image(image, boxes[i, 0], boxes[i, 1], boxes[i, 2],\n boxes[i, 3], color, thickness, display_str_list)\n\n\ndef create_visualization_fn(category_index,\n include_masks=False,\n include_keypoints=False,\n include_keypoint_scores=False,\n include_track_ids=False,\n **kwargs):\n \"\"\"Constructs a visualization function that can be wrapped in a py_func.\n\n py_funcs only accept positional arguments. This function returns a suitable\n function with the correct positional argument mapping. The positional\n arguments in order are:\n 0: image\n 1: boxes\n 2: classes\n 3: scores\n [4]: masks (optional)\n [4-5]: keypoints (optional)\n [4-6]: keypoint_scores (optional)\n [4-7]: track_ids (optional)\n\n -- Example 1 --\n vis_only_masks_fn = create_visualization_fn(category_index,\n include_masks=True, include_keypoints=False, include_track_ids=False,\n **kwargs)\n image = tf.py_func(vis_only_masks_fn,\n inp=[image, boxes, classes, scores, masks],\n Tout=tf.uint8)\n\n -- Example 2 --\n vis_masks_and_track_ids_fn = create_visualization_fn(category_index,\n include_masks=True, include_keypoints=False, include_track_ids=True,\n **kwargs)\n image = tf.py_func(vis_masks_and_track_ids_fn,\n inp=[image, boxes, classes, scores, masks, track_ids],\n Tout=tf.uint8)\n\n Args:\n category_index: a dict that maps integer ids to category dicts. e.g.\n {1: {1: 'dog'}, 2: {2: 'cat'}, ...}\n include_masks: Whether masks should be expected as a positional argument in\n the returned function.\n include_keypoints: Whether keypoints should be expected as a positional\n argument in the returned function.\n include_keypoint_scores: Whether keypoint scores should be expected as a\n positional argument in the returned function.\n include_track_ids: Whether track ids should be expected as a positional\n argument in the returned function.\n **kwargs: Additional kwargs that will be passed to\n visualize_boxes_and_labels_on_image_array.\n\n Returns:\n Returns a function that only takes tensors as positional arguments.\n \"\"\"\n\n def visualization_py_func_fn(*args):\n \"\"\"Visualization function that can be wrapped in a tf.py_func.\n\n Args:\n *args: First 4 positional arguments must be:\n image - uint8 numpy array with shape (img_height, img_width, 3).\n boxes - a numpy array of shape [N, 4].\n classes - a numpy array of shape [N].\n scores - a numpy array of shape [N] or None.\n -- Optional positional arguments --\n instance_masks - a numpy array of shape [N, image_height, image_width].\n keypoints - a numpy array of shape [N, num_keypoints, 2].\n keypoint_scores - a numpy array of shape [N, num_keypoints].\n track_ids - a numpy array of shape [N] with unique track ids.\n\n Returns:\n uint8 numpy array with shape (img_height, img_width, 3) with overlaid\n boxes.\n \"\"\"\n image = args[0]\n boxes = args[1]\n classes = args[2]\n scores = args[3]\n masks = keypoints = keypoint_scores = track_ids = None\n pos_arg_ptr = 4 # Positional argument for first optional tensor (masks).\n if include_masks:\n masks = args[pos_arg_ptr]\n pos_arg_ptr += 1\n if include_keypoints:\n keypoints = args[pos_arg_ptr]\n pos_arg_ptr += 1\n if include_keypoint_scores:\n keypoint_scores = args[pos_arg_ptr]\n pos_arg_ptr += 1\n if include_track_ids:\n track_ids = args[pos_arg_ptr]\n\n return visualize_boxes_and_labels_on_image_array(\n image,\n boxes,\n classes,\n scores,\n category_index=category_index,\n instance_masks=masks,\n keypoints=keypoints,\n keypoint_scores=keypoint_scores,\n track_ids=track_ids,\n **kwargs)\n return visualization_py_func_fn\n\n\ndef draw_heatmaps_on_image(image, heatmaps):\n \"\"\"Draws heatmaps on an image.\n\n The heatmaps are handled channel by channel and different colors are used to\n paint different heatmap channels.\n\n Args:\n image: a PIL.Image object.\n heatmaps: a numpy array with shape [image_height, image_width, channel].\n Note that the image_height and image_width should match the size of input\n image.\n \"\"\"\n draw = ImageDraw.Draw(image)\n channel = heatmaps.shape[2]\n for c in range(channel):\n heatmap = heatmaps[:, :, c] * 255\n heatmap = heatmap.astype('uint8')\n bitmap = Image.fromarray(heatmap, 'L')\n bitmap.convert('1')\n draw.bitmap(\n xy=[(0, 0)],\n bitmap=bitmap,\n fill=STANDARD_COLORS[c])\n\n\ndef draw_heatmaps_on_image_array(image, heatmaps):\n \"\"\"Overlays heatmaps to an image (numpy array).\n\n The function overlays the heatmaps on top of image. The heatmap values will be\n painted with different colors depending on the channels. Similar to\n \"draw_heatmaps_on_image_array\" function except the inputs are numpy arrays.\n\n Args:\n image: a numpy array with shape [height, width, 3].\n heatmaps: a numpy array with shape [height, width, channel].\n\n Returns:\n An uint8 numpy array representing the input image painted with heatmap\n colors.\n \"\"\"\n if not isinstance(image, np.ndarray):\n image = image.numpy()\n if not isinstance(heatmaps, np.ndarray):\n heatmaps = heatmaps.numpy()\n image_pil = Image.fromarray(np.uint8(image)).convert('RGB')\n draw_heatmaps_on_image(image_pil, heatmaps)\n return np.array(image_pil)\n\n\ndef draw_heatmaps_on_image_tensors(images,\n heatmaps,\n apply_sigmoid=False):\n \"\"\"Draws heatmaps on batch of image tensors.\n\n Args:\n images: A 4D uint8 image tensor of shape [N, H, W, C]. If C > 3, additional\n channels will be ignored. If C = 1, then we convert the images to RGB\n images.\n heatmaps: [N, h, w, channel] float32 tensor of heatmaps. Note that the\n heatmaps will be resized to match the input image size before overlaying\n the heatmaps with input images. Theoretically the heatmap height width\n should have the same aspect ratio as the input image to avoid potential\n misalignment introduced by the image resize.\n apply_sigmoid: Whether to apply a sigmoid layer on top of the heatmaps. If\n the heatmaps come directly from the prediction logits, then we should\n apply the sigmoid layer to make sure the values are in between [0.0, 1.0].\n\n Returns:\n 4D image tensor of type uint8, with heatmaps overlaid on top.\n \"\"\"\n # Additional channels are being ignored.\n if images.shape[3] > 3:\n images = images[:, :, :, 0:3]\n elif images.shape[3] == 1:\n images = tf.image.grayscale_to_rgb(images)\n\n _, height, width, _ = shape_utils.combined_static_and_dynamic_shape(images)\n if apply_sigmoid:\n heatmaps = tf.math.sigmoid(heatmaps)\n resized_heatmaps = tf.image.resize(heatmaps, size=[height, width])\n\n elems = [images, resized_heatmaps]\n\n def draw_heatmaps(image_and_heatmaps):\n \"\"\"Draws heatmaps on image.\"\"\"\n image_with_heatmaps = tf.py_function(\n draw_heatmaps_on_image_array,\n image_and_heatmaps,\n tf.uint8)\n return image_with_heatmaps\n images = tf.map_fn(draw_heatmaps, elems, dtype=tf.uint8, back_prop=False)\n return images\n\n\ndef _resize_original_image(image, image_shape):\n image = tf.expand_dims(image, 0)\n image = tf.image.resize_images(\n image,\n image_shape,\n method=tf.image.ResizeMethod.NEAREST_NEIGHBOR,\n align_corners=True)\n return tf.cast(tf.squeeze(image, 0), tf.uint8)\n\n\ndef draw_bounding_boxes_on_image_tensors(images,\n boxes,\n classes,\n scores,\n category_index,\n original_image_spatial_shape=None,\n true_image_shape=None,\n instance_masks=None,\n keypoints=None,\n keypoint_scores=None,\n keypoint_edges=None,\n track_ids=None,\n max_boxes_to_draw=20,\n min_score_thresh=0.2,\n use_normalized_coordinates=True):\n \"\"\"Draws bounding boxes, masks, and keypoints on batch of image tensors.\n\n Args:\n images: A 4D uint8 image tensor of shape [N, H, W, C]. If C > 3, additional\n channels will be ignored. If C = 1, then we convert the images to RGB\n images.\n boxes: [N, max_detections, 4] float32 tensor of detection boxes.\n classes: [N, max_detections] int tensor of detection classes. Note that\n classes are 1-indexed.\n scores: [N, max_detections] float32 tensor of detection scores.\n category_index: a dict that maps integer ids to category dicts. e.g.\n {1: {1: 'dog'}, 2: {2: 'cat'}, ...}\n original_image_spatial_shape: [N, 2] tensor containing the spatial size of\n the original image.\n true_image_shape: [N, 3] tensor containing the spatial size of unpadded\n original_image.\n instance_masks: A 4D uint8 tensor of shape [N, max_detection, H, W] with\n instance masks.\n keypoints: A 4D float32 tensor of shape [N, max_detection, num_keypoints, 2]\n with keypoints.\n keypoint_scores: A 3D float32 tensor of shape [N, max_detection,\n num_keypoints] with keypoint scores.\n keypoint_edges: A list of tuples with keypoint indices that specify which\n keypoints should be connected by an edge, e.g. [(0, 1), (2, 4)] draws\n edges from keypoint 0 to 1 and from keypoint 2 to 4.\n track_ids: [N, max_detections] int32 tensor of unique tracks ids (i.e.\n instance ids for each object). If provided, the color-coding of boxes is\n dictated by these ids, and not classes.\n max_boxes_to_draw: Maximum number of boxes to draw on an image. Default 20.\n min_score_thresh: Minimum score threshold for visualization. Default 0.2.\n use_normalized_coordinates: Whether to assume boxes and kepoints are in\n normalized coordinates (as opposed to absolute coordiantes).\n Default is True.\n\n Returns:\n 4D image tensor of type uint8, with boxes drawn on top.\n \"\"\"\n # Additional channels are being ignored.\n if images.shape[3] > 3:\n images = images[:, :, :, 0:3]\n elif images.shape[3] == 1:\n images = tf.image.grayscale_to_rgb(images)\n visualization_keyword_args = {\n 'use_normalized_coordinates': use_normalized_coordinates,\n 'max_boxes_to_draw': max_boxes_to_draw,\n 'min_score_thresh': min_score_thresh,\n 'agnostic_mode': False,\n 'line_thickness': 4,\n 'keypoint_edges': keypoint_edges\n }\n if true_image_shape is None:\n true_shapes = tf.constant(-1, shape=[images.shape.as_list()[0], 3])\n else:\n true_shapes = true_image_shape\n if original_image_spatial_shape is None:\n original_shapes = tf.constant(-1, shape=[images.shape.as_list()[0], 2])\n else:\n original_shapes = original_image_spatial_shape\n\n visualize_boxes_fn = create_visualization_fn(\n category_index,\n include_masks=instance_masks is not None,\n include_keypoints=keypoints is not None,\n include_keypoint_scores=keypoint_scores is not None,\n include_track_ids=track_ids is not None,\n **visualization_keyword_args)\n\n elems = [true_shapes, original_shapes, images, boxes, classes, scores]\n if instance_masks is not None:\n elems.append(instance_masks)\n if keypoints is not None:\n elems.append(keypoints)\n if keypoint_scores is not None:\n elems.append(keypoint_scores)\n if track_ids is not None:\n elems.append(track_ids)\n\n def draw_boxes(image_and_detections):\n \"\"\"Draws boxes on image.\"\"\"\n true_shape = image_and_detections[0]\n original_shape = image_and_detections[1]\n if true_image_shape is not None:\n image = shape_utils.pad_or_clip_nd(image_and_detections[2],\n [true_shape[0], true_shape[1], 3])\n if original_image_spatial_shape is not None:\n image_and_detections[2] = _resize_original_image(image, original_shape)\n\n image_with_boxes = tf.py_func(visualize_boxes_fn, image_and_detections[2:],\n tf.uint8)\n return image_with_boxes\n\n images = tf.map_fn(draw_boxes, elems, dtype=tf.uint8, back_prop=False)\n return images\n\n\ndef draw_side_by_side_evaluation_image(eval_dict,\n category_index,\n max_boxes_to_draw=20,\n min_score_thresh=0.2,\n use_normalized_coordinates=True,\n keypoint_edges=None):\n \"\"\"Creates a side-by-side image with detections and groundtruth.\n\n Bounding boxes (and instance masks, if available) are visualized on both\n subimages.\n\n Args:\n eval_dict: The evaluation dictionary returned by\n eval_util.result_dict_for_batched_example() or\n eval_util.result_dict_for_single_example().\n category_index: A category index (dictionary) produced from a labelmap.\n max_boxes_to_draw: The maximum number of boxes to draw for detections.\n min_score_thresh: The minimum score threshold for showing detections.\n use_normalized_coordinates: Whether to assume boxes and keypoints are in\n normalized coordinates (as opposed to absolute coordinates).\n Default is True.\n keypoint_edges: A list of tuples with keypoint indices that specify which\n keypoints should be connected by an edge, e.g. [(0, 1), (2, 4)] draws\n edges from keypoint 0 to 1 and from keypoint 2 to 4.\n\n Returns:\n A list of [1, H, 2 * W, C] uint8 tensor. The subimage on the left\n corresponds to detections, while the subimage on the right corresponds to\n groundtruth.\n \"\"\"\n detection_fields = fields.DetectionResultFields()\n input_data_fields = fields.InputDataFields()\n\n images_with_detections_list = []\n\n # Add the batch dimension if the eval_dict is for single example.\n if len(eval_dict[detection_fields.detection_classes].shape) == 1:\n for key in eval_dict:\n if (key != input_data_fields.original_image and\n key != input_data_fields.image_additional_channels):\n eval_dict[key] = tf.expand_dims(eval_dict[key], 0)\n\n for indx in range(eval_dict[input_data_fields.original_image].shape[0]):\n instance_masks = None\n if detection_fields.detection_masks in eval_dict:\n instance_masks = tf.cast(\n tf.expand_dims(\n eval_dict[detection_fields.detection_masks][indx], axis=0),\n tf.uint8)\n keypoints = None\n keypoint_scores = None\n if detection_fields.detection_keypoints in eval_dict:\n keypoints = tf.expand_dims(\n eval_dict[detection_fields.detection_keypoints][indx], axis=0)\n if detection_fields.detection_keypoint_scores in eval_dict:\n keypoint_scores = tf.expand_dims(\n eval_dict[detection_fields.detection_keypoint_scores][indx], axis=0)\n else:\n keypoint_scores = tf.cast(keypoint_ops.set_keypoint_visibilities(\n keypoints), dtype=tf.float32)\n\n groundtruth_instance_masks = None\n if input_data_fields.groundtruth_instance_masks in eval_dict:\n groundtruth_instance_masks = tf.cast(\n tf.expand_dims(\n eval_dict[input_data_fields.groundtruth_instance_masks][indx],\n axis=0), tf.uint8)\n groundtruth_keypoints = None\n groundtruth_keypoint_scores = None\n gt_kpt_vis_fld = input_data_fields.groundtruth_keypoint_visibilities\n if input_data_fields.groundtruth_keypoints in eval_dict:\n groundtruth_keypoints = tf.expand_dims(\n eval_dict[input_data_fields.groundtruth_keypoints][indx], axis=0)\n if gt_kpt_vis_fld in eval_dict:\n groundtruth_keypoint_scores = tf.expand_dims(\n tf.cast(eval_dict[gt_kpt_vis_fld][indx], dtype=tf.float32), axis=0)\n else:\n groundtruth_keypoint_scores = tf.cast(\n keypoint_ops.set_keypoint_visibilities(\n groundtruth_keypoints), dtype=tf.float32)\n\n images_with_detections = draw_bounding_boxes_on_image_tensors(\n tf.expand_dims(\n eval_dict[input_data_fields.original_image][indx], axis=0),\n tf.expand_dims(\n eval_dict[detection_fields.detection_boxes][indx], axis=0),\n tf.expand_dims(\n eval_dict[detection_fields.detection_classes][indx], axis=0),\n tf.expand_dims(\n eval_dict[detection_fields.detection_scores][indx], axis=0),\n category_index,\n original_image_spatial_shape=tf.expand_dims(\n eval_dict[input_data_fields.original_image_spatial_shape][indx],\n axis=0),\n true_image_shape=tf.expand_dims(\n eval_dict[input_data_fields.true_image_shape][indx], axis=0),\n instance_masks=instance_masks,\n keypoints=keypoints,\n keypoint_scores=keypoint_scores,\n keypoint_edges=keypoint_edges,\n max_boxes_to_draw=max_boxes_to_draw,\n min_score_thresh=min_score_thresh,\n use_normalized_coordinates=use_normalized_coordinates)\n images_with_groundtruth = draw_bounding_boxes_on_image_tensors(\n tf.expand_dims(\n eval_dict[input_data_fields.original_image][indx], axis=0),\n tf.expand_dims(\n eval_dict[input_data_fields.groundtruth_boxes][indx], axis=0),\n tf.expand_dims(\n eval_dict[input_data_fields.groundtruth_classes][indx], axis=0),\n tf.expand_dims(\n tf.ones_like(\n eval_dict[input_data_fields.groundtruth_classes][indx],\n dtype=tf.float32),\n axis=0),\n category_index,\n original_image_spatial_shape=tf.expand_dims(\n eval_dict[input_data_fields.original_image_spatial_shape][indx],\n axis=0),\n true_image_shape=tf.expand_dims(\n eval_dict[input_data_fields.true_image_shape][indx], axis=0),\n instance_masks=groundtruth_instance_masks,\n keypoints=groundtruth_keypoints,\n keypoint_scores=groundtruth_keypoint_scores,\n keypoint_edges=keypoint_edges,\n max_boxes_to_draw=None,\n min_score_thresh=0.0,\n use_normalized_coordinates=use_normalized_coordinates)\n images_to_visualize = tf.concat([images_with_detections,\n images_with_groundtruth], axis=2)\n\n if input_data_fields.image_additional_channels in eval_dict:\n images_with_additional_channels_groundtruth = (\n draw_bounding_boxes_on_image_tensors(\n tf.expand_dims(\n eval_dict[input_data_fields.image_additional_channels][indx],\n axis=0),\n tf.expand_dims(\n eval_dict[input_data_fields.groundtruth_boxes][indx], axis=0),\n tf.expand_dims(\n eval_dict[input_data_fields.groundtruth_classes][indx],\n axis=0),\n tf.expand_dims(\n tf.ones_like(\n eval_dict[input_data_fields.groundtruth_classes][indx],\n dtype=tf.float32),\n axis=0),\n category_index,\n original_image_spatial_shape=tf.expand_dims(\n eval_dict[input_data_fields.original_image_spatial_shape]\n [indx],\n axis=0),\n true_image_shape=tf.expand_dims(\n eval_dict[input_data_fields.true_image_shape][indx], axis=0),\n instance_masks=groundtruth_instance_masks,\n keypoints=None,\n keypoint_edges=None,\n max_boxes_to_draw=None,\n min_score_thresh=0.0,\n use_normalized_coordinates=use_normalized_coordinates))\n images_to_visualize = tf.concat(\n [images_to_visualize, images_with_additional_channels_groundtruth],\n axis=2)\n images_with_detections_list.append(images_to_visualize)\n\n return images_with_detections_list\n\n\ndef draw_keypoints_on_image_array(image,\n keypoints,\n keypoint_scores=None,\n min_score_thresh=0.5,\n color='red',\n radius=2,\n use_normalized_coordinates=True,\n keypoint_edges=None,\n keypoint_edge_color='green',\n keypoint_edge_width=2):\n \"\"\"Draws keypoints on an image (numpy array).\n\n Args:\n image: a numpy array with shape [height, width, 3].\n keypoints: a numpy array with shape [num_keypoints, 2].\n keypoint_scores: a numpy array with shape [num_keypoints]. If provided, only\n those keypoints with a score above score_threshold will be visualized.\n min_score_thresh: A scalar indicating the minimum keypoint score required\n for a keypoint to be visualized. Note that keypoint_scores must be\n provided for this threshold to take effect.\n color: color to draw the keypoints with. Default is red.\n radius: keypoint radius. Default value is 2.\n use_normalized_coordinates: if True (default), treat keypoint values as\n relative to the image. Otherwise treat them as absolute.\n keypoint_edges: A list of tuples with keypoint indices that specify which\n keypoints should be connected by an edge, e.g. [(0, 1), (2, 4)] draws\n edges from keypoint 0 to 1 and from keypoint 2 to 4.\n keypoint_edge_color: color to draw the keypoint edges with. Default is red.\n keypoint_edge_width: width of the edges drawn between keypoints. Default\n value is 2.\n \"\"\"\n image_pil = Image.fromarray(np.uint8(image)).convert('RGB')\n draw_keypoints_on_image(image_pil,\n keypoints,\n keypoint_scores=keypoint_scores,\n min_score_thresh=min_score_thresh,\n color=color,\n radius=radius,\n use_normalized_coordinates=use_normalized_coordinates,\n keypoint_edges=keypoint_edges,\n keypoint_edge_color=keypoint_edge_color,\n keypoint_edge_width=keypoint_edge_width)\n np.copyto(image, np.array(image_pil))\n\n\ndef draw_keypoints_on_image(image,\n keypoints,\n keypoint_scores=None,\n min_score_thresh=0.5,\n color='red',\n radius=2,\n use_normalized_coordinates=True,\n keypoint_edges=None,\n keypoint_edge_color='green',\n keypoint_edge_width=2):\n \"\"\"Draws keypoints on an image.\n\n Args:\n image: a PIL.Image object.\n keypoints: a numpy array with shape [num_keypoints, 2].\n keypoint_scores: a numpy array with shape [num_keypoints].\n min_score_thresh: a score threshold for visualizing keypoints. Only used if\n keypoint_scores is provided.\n color: color to draw the keypoints with. Default is red.\n radius: keypoint radius. Default value is 2.\n use_normalized_coordinates: if True (default), treat keypoint values as\n relative to the image. Otherwise treat them as absolute.\n keypoint_edges: A list of tuples with keypoint indices that specify which\n keypoints should be connected by an edge, e.g. [(0, 1), (2, 4)] draws\n edges from keypoint 0 to 1 and from keypoint 2 to 4.\n keypoint_edge_color: color to draw the keypoint edges with. Default is red.\n keypoint_edge_width: width of the edges drawn between keypoints. Default\n value is 2.\n \"\"\"\n draw = ImageDraw.Draw(image)\n im_width, im_height = image.size\n keypoints = np.array(keypoints)\n keypoints_x = [k[1] for k in keypoints]\n keypoints_y = [k[0] for k in keypoints]\n if use_normalized_coordinates:\n keypoints_x = tuple([im_width * x for x in keypoints_x])\n keypoints_y = tuple([im_height * y for y in keypoints_y])\n if keypoint_scores is not None:\n keypoint_scores = np.array(keypoint_scores)\n valid_kpt = np.greater(keypoint_scores, min_score_thresh)\n else:\n valid_kpt = np.where(np.any(np.isnan(keypoints), axis=1),\n np.zeros_like(keypoints[:, 0]),\n np.ones_like(keypoints[:, 0]))\n valid_kpt = [v for v in valid_kpt]\n\n for keypoint_x, keypoint_y, valid in zip(keypoints_x, keypoints_y, valid_kpt):\n if valid:\n draw.ellipse([(keypoint_x - radius, keypoint_y - radius),\n (keypoint_x + radius, keypoint_y + radius)],\n outline=color, fill=color)\n if keypoint_edges is not None:\n for keypoint_start, keypoint_end in keypoint_edges:\n if (keypoint_start < 0 or keypoint_start >= len(keypoints) or\n keypoint_end < 0 or keypoint_end >= len(keypoints)):\n continue\n if not (valid_kpt[keypoint_start] and valid_kpt[keypoint_end]):\n continue\n edge_coordinates = [\n keypoints_x[keypoint_start], keypoints_y[keypoint_start],\n keypoints_x[keypoint_end], keypoints_y[keypoint_end]\n ]\n draw.line(\n edge_coordinates, fill=keypoint_edge_color, width=keypoint_edge_width)\n\n\ndef draw_mask_on_image_array(image, mask, color='red', alpha=0.4):\n \"\"\"Draws mask on an image.\n\n Args:\n image: uint8 numpy array with shape (img_height, img_height, 3)\n mask: a uint8 numpy array of shape (img_height, img_height) with\n values between either 0 or 1.\n color: color to draw the keypoints with. Default is red.\n alpha: transparency value between 0 and 1. (default: 0.4)\n\n Raises:\n ValueError: On incorrect data type for image or masks.\n \"\"\"\n if image.dtype != np.uint8:\n raise ValueError('`image` not of type np.uint8')\n if mask.dtype != np.uint8:\n raise ValueError('`mask` not of type np.uint8')\n if np.any(np.logical_and(mask != 1, mask != 0)):\n raise ValueError('`mask` elements should be in [0, 1]')\n if image.shape[:2] != mask.shape:\n raise ValueError('The image has spatial dimensions %s but the mask has '\n 'dimensions %s' % (image.shape[:2], mask.shape))\n rgb = ImageColor.getrgb(color)\n pil_image = Image.fromarray(image)\n\n solid_color = np.expand_dims(\n np.ones_like(mask), axis=2) * np.reshape(list(rgb), [1, 1, 3])\n pil_solid_color = Image.fromarray(np.uint8(solid_color)).convert('RGBA')\n pil_mask = Image.fromarray(np.uint8(255.0*alpha*mask)).convert('L')\n pil_image = Image.composite(pil_solid_color, pil_image, pil_mask)\n np.copyto(image, np.array(pil_image.convert('RGB')))\n\n\ndef visualize_boxes_and_labels_on_image_array(\n image,\n boxes,\n classes,\n scores,\n category_index,\n instance_masks=None,\n instance_boundaries=None,\n keypoints=None,\n keypoint_scores=None,\n keypoint_edges=None,\n track_ids=None,\n use_normalized_coordinates=False,\n max_boxes_to_draw=20,\n min_score_thresh=.5,\n agnostic_mode=False,\n line_thickness=4,\n groundtruth_box_visualization_color='black',\n skip_boxes=False,\n skip_scores=False,\n skip_labels=False,\n skip_track_ids=False):\n \"\"\"Overlay labeled boxes on an image with formatted scores and label names.\n\n This function groups boxes that correspond to the same location\n and creates a display string for each detection and overlays these\n on the image. Note that this function modifies the image in place, and returns\n that same image.\n\n Args:\n image: uint8 numpy array with shape (img_height, img_width, 3)\n boxes: a numpy array of shape [N, 4]\n classes: a numpy array of shape [N]. Note that class indices are 1-based,\n and match the keys in the label map.\n scores: a numpy array of shape [N] or None. If scores=None, then\n this function assumes that the boxes to be plotted are groundtruth\n boxes and plot all boxes as black with no classes or scores.\n category_index: a dict containing category dictionaries (each holding\n category index `id` and category name `name`) keyed by category indices.\n instance_masks: a numpy array of shape [N, image_height, image_width] with\n values ranging between 0 and 1, can be None.\n instance_boundaries: a numpy array of shape [N, image_height, image_width]\n with values ranging between 0 and 1, can be None.\n keypoints: a numpy array of shape [N, num_keypoints, 2], can\n be None.\n keypoint_scores: a numpy array of shape [N, num_keypoints], can be None.\n keypoint_edges: A list of tuples with keypoint indices that specify which\n keypoints should be connected by an edge, e.g. [(0, 1), (2, 4)] draws\n edges from keypoint 0 to 1 and from keypoint 2 to 4.\n track_ids: a numpy array of shape [N] with unique track ids. If provided,\n color-coding of boxes will be determined by these ids, and not the class\n indices.\n use_normalized_coordinates: whether boxes is to be interpreted as\n normalized coordinates or not.\n max_boxes_to_draw: maximum number of boxes to visualize. If None, draw\n all boxes.\n min_score_thresh: minimum score threshold for a box or keypoint to be\n visualized.\n agnostic_mode: boolean (default: False) controlling whether to evaluate in\n class-agnostic mode or not. This mode will display scores but ignore\n classes.\n line_thickness: integer (default: 4) controlling line width of the boxes.\n groundtruth_box_visualization_color: box color for visualizing groundtruth\n boxes\n skip_boxes: whether to skip the drawing of bounding boxes.\n skip_scores: whether to skip score when drawing a single detection\n skip_labels: whether to skip label when drawing a single detection\n skip_track_ids: whether to skip track id when drawing a single detection\n\n Returns:\n uint8 numpy array with shape (img_height, img_width, 3) with overlaid boxes.\n \"\"\"\n # Create a display string (and color) for every box location, group any boxes\n # that correspond to the same location.\n box_to_display_str_map = collections.defaultdict(list)\n box_to_color_map = collections.defaultdict(str)\n box_to_instance_masks_map = {}\n box_to_instance_boundaries_map = {}\n box_to_keypoints_map = collections.defaultdict(list)\n box_to_keypoint_scores_map = collections.defaultdict(list)\n box_to_track_ids_map = {}\n if not max_boxes_to_draw:\n max_boxes_to_draw = boxes.shape[0]\n for i in range(boxes.shape[0]):\n if max_boxes_to_draw == len(box_to_color_map):\n break\n if scores is None or scores[i] > min_score_thresh:\n box = tuple(boxes[i].tolist())\n if instance_masks is not None:\n box_to_instance_masks_map[box] = instance_masks[i]\n if instance_boundaries is not None:\n box_to_instance_boundaries_map[box] = instance_boundaries[i]\n if keypoints is not None:\n box_to_keypoints_map[box].extend(keypoints[i])\n if keypoint_scores is not None:\n box_to_keypoint_scores_map[box].extend(keypoint_scores[i])\n if track_ids is not None:\n box_to_track_ids_map[box] = track_ids[i]\n if scores is None:\n box_to_color_map[box] = groundtruth_box_visualization_color\n else:\n display_str = ''\n if not skip_labels:\n if not agnostic_mode:\n if classes[i] in six.viewkeys(category_index):\n class_name = category_index[classes[i]]['name']\n else:\n class_name = 'N/A'\n display_str = str(class_name)\n if not skip_scores:\n if not display_str:\n display_str = '{}%'.format(round(100*scores[i]))\n else:\n display_str = '{}: {}%'.format(display_str, round(100*scores[i]))\n if not skip_track_ids and track_ids is not None:\n if not display_str:\n display_str = 'ID {}'.format(track_ids[i])\n else:\n display_str = '{}: ID {}'.format(display_str, track_ids[i])\n box_to_display_str_map[box].append(display_str)\n if agnostic_mode:\n box_to_color_map[box] = 'DarkOrange'\n elif track_ids is not None:\n prime_multipler = _get_multiplier_for_color_randomness()\n box_to_color_map[box] = STANDARD_COLORS[\n (prime_multipler * track_ids[i]) % len(STANDARD_COLORS)]\n else:\n box_to_color_map[box] = STANDARD_COLORS[\n classes[i] % len(STANDARD_COLORS)]\n\n # Draw all boxes onto image.\n for box, color in box_to_color_map.items():\n ymin, xmin, ymax, xmax = box\n if instance_masks is not None:\n draw_mask_on_image_array(\n image,\n box_to_instance_masks_map[box],\n color=color\n )\n if instance_boundaries is not None:\n draw_mask_on_image_array(\n image,\n box_to_instance_boundaries_map[box],\n color='red',\n alpha=1.0\n )\n draw_bounding_box_on_image_array(\n image,\n ymin,\n xmin,\n ymax,\n xmax,\n color=color,\n thickness=0 if skip_boxes else line_thickness,\n display_str_list=box_to_display_str_map[box],\n use_normalized_coordinates=use_normalized_coordinates)\n if keypoints is not None:\n keypoint_scores_for_box = None\n if box_to_keypoint_scores_map:\n keypoint_scores_for_box = box_to_keypoint_scores_map[box]\n draw_keypoints_on_image_array(\n image,\n box_to_keypoints_map[box],\n keypoint_scores_for_box,\n min_score_thresh=min_score_thresh,\n color=color,\n radius=line_thickness / 2,\n use_normalized_coordinates=use_normalized_coordinates,\n keypoint_edges=keypoint_edges,\n keypoint_edge_color=color,\n keypoint_edge_width=line_thickness // 2)\n\n return image\n\n\ndef add_cdf_image_summary(values, name):\n \"\"\"Adds a tf.summary.image for a CDF plot of the values.\n\n Normalizes `values` such that they sum to 1, plots the cumulative distribution\n function and creates a tf image summary.\n\n Args:\n values: a 1-D float32 tensor containing the values.\n name: name for the image summary.\n \"\"\"\n def cdf_plot(values):\n \"\"\"Numpy function to plot CDF.\"\"\"\n normalized_values = values / np.sum(values)\n sorted_values = np.sort(normalized_values)\n cumulative_values = np.cumsum(sorted_values)\n fraction_of_examples = (np.arange(cumulative_values.size, dtype=np.float32)\n / cumulative_values.size)\n fig = plt.figure(frameon=False)\n ax = fig.add_subplot('111')\n ax.plot(fraction_of_examples, cumulative_values)\n ax.set_ylabel('cumulative normalized values')\n ax.set_xlabel('fraction of examples')\n fig.canvas.draw()\n width, height = fig.get_size_inches() * fig.get_dpi()\n image = np.fromstring(fig.canvas.tostring_rgb(), dtype='uint8').reshape(\n 1, int(height), int(width), 3)\n return image\n cdf_plot = tf.py_func(cdf_plot, [values], tf.uint8)\n tf.summary.image(name, cdf_plot)\n\n\ndef add_hist_image_summary(values, bins, name):\n \"\"\"Adds a tf.summary.image for a histogram plot of the values.\n\n Plots the histogram of values and creates a tf image summary.\n\n Args:\n values: a 1-D float32 tensor containing the values.\n bins: bin edges which will be directly passed to np.histogram.\n name: name for the image summary.\n \"\"\"\n\n def hist_plot(values, bins):\n \"\"\"Numpy function to plot hist.\"\"\"\n fig = plt.figure(frameon=False)\n ax = fig.add_subplot('111')\n y, x = np.histogram(values, bins=bins)\n ax.plot(x[:-1], y)\n ax.set_ylabel('count')\n ax.set_xlabel('value')\n fig.canvas.draw()\n width, height = fig.get_size_inches() * fig.get_dpi()\n image = np.fromstring(\n fig.canvas.tostring_rgb(), dtype='uint8').reshape(\n 1, int(height), int(width), 3)\n return image\n hist_plot = tf.py_func(hist_plot, [values, bins], tf.uint8)\n tf.summary.image(name, hist_plot)\n\n\nclass EvalMetricOpsVisualization(six.with_metaclass(abc.ABCMeta, object)):\n \"\"\"Abstract base class responsible for visualizations during evaluation.\n\n Currently, summary images are not run during evaluation. One way to produce\n evaluation images in Tensorboard is to provide tf.summary.image strings as\n `value_ops` in tf.estimator.EstimatorSpec's `eval_metric_ops`. This class is\n responsible for accruing images (with overlaid detections and groundtruth)\n and returning a dictionary that can be passed to `eval_metric_ops`.\n \"\"\"\n\n def __init__(self,\n category_index,\n max_examples_to_draw=5,\n max_boxes_to_draw=20,\n min_score_thresh=0.2,\n use_normalized_coordinates=True,\n summary_name_prefix='evaluation_image',\n keypoint_edges=None):\n \"\"\"Creates an EvalMetricOpsVisualization.\n\n Args:\n category_index: A category index (dictionary) produced from a labelmap.\n max_examples_to_draw: The maximum number of example summaries to produce.\n max_boxes_to_draw: The maximum number of boxes to draw for detections.\n min_score_thresh: The minimum score threshold for showing detections.\n use_normalized_coordinates: Whether to assume boxes and keypoints are in\n normalized coordinates (as opposed to absolute coordinates).\n Default is True.\n summary_name_prefix: A string prefix for each image summary.\n keypoint_edges: A list of tuples with keypoint indices that specify which\n keypoints should be connected by an edge, e.g. [(0, 1), (2, 4)] draws\n edges from keypoint 0 to 1 and from keypoint 2 to 4.\n \"\"\"\n\n self._category_index = category_index\n self._max_examples_to_draw = max_examples_to_draw\n self._max_boxes_to_draw = max_boxes_to_draw\n self._min_score_thresh = min_score_thresh\n self._use_normalized_coordinates = use_normalized_coordinates\n self._summary_name_prefix = summary_name_prefix\n self._keypoint_edges = keypoint_edges\n self._images = []\n\n def clear(self):\n self._images = []\n\n def add_images(self, images):\n \"\"\"Store a list of images, each with shape [1, H, W, C].\"\"\"\n if len(self._images) >= self._max_examples_to_draw:\n return\n\n # Store images and clip list if necessary.\n self._images.extend(images)\n if len(self._images) > self._max_examples_to_draw:\n self._images[self._max_examples_to_draw:] = []\n\n def get_estimator_eval_metric_ops(self, eval_dict):\n \"\"\"Returns metric ops for use in tf.estimator.EstimatorSpec.\n\n Args:\n eval_dict: A dictionary that holds an image, groundtruth, and detections\n for a batched example. Note that, we use only the first example for\n visualization. See eval_util.result_dict_for_batched_example() for a\n convenient method for constructing such a dictionary. The dictionary\n contains\n fields.InputDataFields.original_image: [batch_size, H, W, 3] image.\n fields.InputDataFields.original_image_spatial_shape: [batch_size, 2]\n tensor containing the size of the original image.\n fields.InputDataFields.true_image_shape: [batch_size, 3]\n tensor containing the spatial size of the upadded original image.\n fields.InputDataFields.groundtruth_boxes - [batch_size, num_boxes, 4]\n float32 tensor with groundtruth boxes in range [0.0, 1.0].\n fields.InputDataFields.groundtruth_classes - [batch_size, num_boxes]\n int64 tensor with 1-indexed groundtruth classes.\n fields.InputDataFields.groundtruth_instance_masks - (optional)\n [batch_size, num_boxes, H, W] int64 tensor with instance masks.\n fields.InputDataFields.groundtruth_keypoints - (optional)\n [batch_size, num_boxes, num_keypoints, 2] float32 tensor with\n keypoint coordinates in format [y, x].\n fields.InputDataFields.groundtruth_keypoint_visibilities - (optional)\n [batch_size, num_boxes, num_keypoints] bool tensor with\n keypoint visibilities.\n fields.DetectionResultFields.detection_boxes - [batch_size,\n max_num_boxes, 4] float32 tensor with detection boxes in range [0.0,\n 1.0].\n fields.DetectionResultFields.detection_classes - [batch_size,\n max_num_boxes] int64 tensor with 1-indexed detection classes.\n fields.DetectionResultFields.detection_scores - [batch_size,\n max_num_boxes] float32 tensor with detection scores.\n fields.DetectionResultFields.detection_masks - (optional) [batch_size,\n max_num_boxes, H, W] float32 tensor of binarized masks.\n fields.DetectionResultFields.detection_keypoints - (optional)\n [batch_size, max_num_boxes, num_keypoints, 2] float32 tensor with\n keypoints.\n fields.DetectionResultFields.detection_keypoint_scores - (optional)\n [batch_size, max_num_boxes, num_keypoints] float32 tensor with\n keypoints scores.\n\n Returns:\n A dictionary of image summary names to tuple of (value_op, update_op). The\n `update_op` is the same for all items in the dictionary, and is\n responsible for saving a single side-by-side image with detections and\n groundtruth. Each `value_op` holds the tf.summary.image string for a given\n image.\n \"\"\"\n if self._max_examples_to_draw == 0:\n return {}\n images = self.images_from_evaluation_dict(eval_dict)\n\n def get_images():\n \"\"\"Returns a list of images, padded to self._max_images_to_draw.\"\"\"\n images = self._images\n while len(images) < self._max_examples_to_draw:\n images.append(np.array(0, dtype=np.uint8))\n self.clear()\n return images\n\n def image_summary_or_default_string(summary_name, image):\n \"\"\"Returns image summaries for non-padded elements.\"\"\"\n return tf.cond(\n tf.equal(tf.size(tf.shape(image)), 4),\n lambda: tf.summary.image(summary_name, image),\n lambda: tf.constant(''))\n\n if tf.executing_eagerly():\n update_op = self.add_images([[images[0]]])\n image_tensors = get_images()\n else:\n update_op = tf.py_func(self.add_images, [[images[0]]], [])\n image_tensors = tf.py_func(\n get_images, [], [tf.uint8] * self._max_examples_to_draw)\n eval_metric_ops = {}\n for i, image in enumerate(image_tensors):\n summary_name = self._summary_name_prefix + '/' + str(i)\n value_op = image_summary_or_default_string(summary_name, image)\n eval_metric_ops[summary_name] = (value_op, update_op)\n return eval_metric_ops\n\n @abc.abstractmethod\n def images_from_evaluation_dict(self, eval_dict):\n \"\"\"Converts evaluation dictionary into a list of image tensors.\n\n To be overridden by implementations.\n\n Args:\n eval_dict: A dictionary with all the necessary information for producing\n visualizations.\n\n Returns:\n A list of [1, H, W, C] uint8 tensors.\n \"\"\"\n raise NotImplementedError\n\n\nclass VisualizeSingleFrameDetections(EvalMetricOpsVisualization):\n \"\"\"Class responsible for single-frame object detection visualizations.\"\"\"\n\n def __init__(self,\n category_index,\n max_examples_to_draw=5,\n max_boxes_to_draw=20,\n min_score_thresh=0.2,\n use_normalized_coordinates=True,\n summary_name_prefix='Detections_Left_Groundtruth_Right',\n keypoint_edges=None):\n super(VisualizeSingleFrameDetections, self).__init__(\n category_index=category_index,\n max_examples_to_draw=max_examples_to_draw,\n max_boxes_to_draw=max_boxes_to_draw,\n min_score_thresh=min_score_thresh,\n use_normalized_coordinates=use_normalized_coordinates,\n summary_name_prefix=summary_name_prefix,\n keypoint_edges=keypoint_edges)\n\n def images_from_evaluation_dict(self, eval_dict):\n return draw_side_by_side_evaluation_image(eval_dict, self._category_index,\n self._max_boxes_to_draw,\n self._min_score_thresh,\n self._use_normalized_coordinates,\n self._keypoint_edges)\n"
] | [
[
"numpy.sum",
"numpy.histogram",
"numpy.ones_like",
"tensorflow.compat.v1.py_func",
"tensorflow.compat.v1.expand_dims",
"tensorflow.compat.v1.image.resize",
"tensorflow.compat.v1.shape",
"matplotlib.pyplot.figure",
"numpy.logical_and",
"tensorflow.compat.v1.math.sigmoid",
"numpy.abs",
"tensorflow.compat.v1.py_function",
"tensorflow.compat.v1.executing_eagerly",
"tensorflow.compat.v1.constant",
"tensorflow.compat.v1.cast",
"tensorflow.compat.v1.image.grayscale_to_rgb",
"numpy.isnan",
"numpy.uint8",
"numpy.ceil",
"numpy.greater",
"tensorflow.compat.v1.concat",
"tensorflow.compat.v1.map_fn",
"numpy.arange",
"numpy.sort",
"tensorflow.compat.v1.image.resize_images",
"numpy.zeros_like",
"numpy.cumsum",
"tensorflow.compat.v1.ones_like",
"tensorflow.compat.v1.gfile.Open",
"tensorflow.compat.v1.summary.image",
"tensorflow.compat.v1.squeeze",
"numpy.array"
]
] |
huy-ha/dreamer-pytorch | [
"98561a5fe4ee5323b955f5fc79bbebf483f08d58"
] | [
"dreamer/models/rnns.py"
] | [
"import torch\nimport torch.distributions as td\nimport torch.nn as nn\nimport torch.nn.functional as tf\nfrom rlpyt.utils.collections import namedarraytuple\nfrom rlpyt.utils.buffer import buffer_method\n\nfrom dreamer.utils.module import FreezeParameters\n\nRSSMState = namedarraytuple('RSSMState', ['mean', 'std', 'stoch', 'deter'])\n\n\ndef stack_states(rssm_states: list, dim):\n return RSSMState(\n torch.stack([state.mean for state in rssm_states], dim=dim),\n torch.stack([state.std for state in rssm_states], dim=dim),\n torch.stack([state.stoch for state in rssm_states], dim=dim),\n torch.stack([state.deter for state in rssm_states], dim=dim),\n )\n\n\ndef get_feat(rssm_state: RSSMState):\n return torch.cat((rssm_state.stoch, rssm_state.deter), dim=-1)\n\n\ndef get_dist(rssm_state: RSSMState):\n return td.independent.Independent(td.Normal(rssm_state.mean, rssm_state.std), 1)\n\n\nclass TransitionBase(nn.Module):\n def __init__(self):\n super().__init__()\n\n def forward(self, prev_action, prev_state):\n \"\"\":return: next state\"\"\"\n raise NotImplementedError\n\n\nclass RepresentationBase(nn.Module):\n def __init__(self):\n super().__init__()\n\n def forward(self, obs_embed, prev_action, prev_state):\n \"\"\":return: next state\"\"\"\n raise NotImplementedError\n\n\nclass RollOutModule(nn.Module):\n def __init__(self):\n super().__init__()\n\n def forward(self, steps, obs_embed, prev_action, prev_state):\n raise NotImplementedError\n\n\nclass RSSMTransition(TransitionBase):\n def __init__(self, action_size, stochastic_size=30, deterministic_size=200, hidden_size=200, activation=nn.ELU,\n distribution=td.Normal):\n super().__init__()\n self._action_size = action_size\n self._stoch_size = stochastic_size\n self._deter_size = deterministic_size\n self._hidden_size = hidden_size\n self._activation = activation\n self._cell = nn.GRUCell(hidden_size, deterministic_size)\n self._rnn_input_model = self._build_rnn_input_model()\n self._stochastic_prior_model = self._build_stochastic_model()\n self._dist = distribution\n\n def _build_rnn_input_model(self):\n rnn_input_model = [\n nn.Linear(self._action_size + self._stoch_size, self._hidden_size)]\n rnn_input_model += [self._activation()]\n return nn.Sequential(*rnn_input_model)\n\n def _build_stochastic_model(self):\n stochastic_model = [nn.Linear(self._hidden_size, self._hidden_size)]\n stochastic_model += [self._activation()]\n stochastic_model += [nn.Linear(self._hidden_size,\n 2 * self._stoch_size)]\n return nn.Sequential(*stochastic_model)\n\n def initial_state(self, batch_size, **kwargs):\n return RSSMState(\n torch.zeros(batch_size, self._stoch_size, **kwargs),\n torch.zeros(batch_size, self._stoch_size, **kwargs),\n torch.zeros(batch_size, self._stoch_size, **kwargs),\n torch.zeros(batch_size, self._deter_size, **kwargs),\n )\n\n def forward(self, prev_action: torch.Tensor, prev_state: RSSMState):\n if len(prev_action.shape) != len(prev_state.stoch.shape):\n prev_state = RSSMState(\n mean=prev_state.mean.unsqueeze(dim=0),\n std=prev_state.std.unsqueeze(dim=0),\n stoch=prev_state.stoch.unsqueeze(dim=0),\n deter=prev_state.deter.unsqueeze(dim=0))\n rnn_input = self._rnn_input_model(\n torch.cat([prev_action, prev_state.stoch], dim=-1))\n deter_state = self._cell(rnn_input, prev_state.deter)\n mean, std = torch.chunk(\n self._stochastic_prior_model(deter_state), 2, dim=-1)\n std = tf.softplus(std) + 0.1\n dist = self._dist(mean, std)\n stoch_state = dist.rsample()\n return RSSMState(mean, std, stoch_state, deter_state)\n\n\nclass RSSMRepresentation(RepresentationBase):\n def __init__(self, transition_model: RSSMTransition, obs_embed_size, action_size, stochastic_size=30,\n deterministic_size=200, hidden_size=200, activation=nn.ELU, distribution=td.Normal):\n super().__init__()\n self._transition_model = transition_model\n self._obs_embed_size = obs_embed_size\n self._action_size = action_size\n self._stoch_size = stochastic_size\n self._deter_size = deterministic_size\n self._hidden_size = hidden_size\n self._activation = activation\n self._dist = distribution\n self._stochastic_posterior_model = self._build_stochastic_model()\n\n def _build_stochastic_model(self):\n stochastic_model = [\n nn.Linear(self._deter_size + self._obs_embed_size, self._hidden_size)]\n stochastic_model += [self._activation()]\n stochastic_model += [nn.Linear(self._hidden_size,\n 2 * self._stoch_size)]\n return nn.Sequential(*stochastic_model)\n\n def initial_state(self, batch_size, **kwargs):\n return RSSMState(\n torch.zeros(batch_size, self._stoch_size, **kwargs),\n torch.zeros(batch_size, self._stoch_size, **kwargs),\n torch.zeros(batch_size, self._stoch_size, **kwargs),\n torch.zeros(batch_size, self._deter_size, **kwargs),\n )\n\n def forward(self, obs_embed: torch.Tensor, prev_action: torch.Tensor, prev_state: RSSMState):\n prior_state = self._transition_model(prev_action, prev_state)\n x = torch.cat([prior_state.deter, obs_embed], -1)\n mean, std = torch.chunk(self._stochastic_posterior_model(x), 2, dim=-1)\n std = tf.softplus(std) + 0.1\n dist = self._dist(mean, std)\n stoch_state = dist.rsample()\n posterior_state = RSSMState(mean, std, stoch_state, prior_state.deter)\n return prior_state, posterior_state\n\n\nclass RSSMRollout(RollOutModule):\n def __init__(self, representation_model: RSSMRepresentation, transition_model: RSSMTransition):\n super().__init__()\n self.representation_model = representation_model\n self.transition_model = transition_model\n\n def forward(self, steps: int, obs_embed: torch.Tensor, action: torch.Tensor, prev_state: RSSMState):\n return self.rollout_representation(steps, obs_embed, action, prev_state)\n\n def rollout_representation(self, steps: int, obs_embed: torch.Tensor, action: torch.Tensor,\n prev_state: RSSMState):\n \"\"\"\n Roll out the model with actions and observations from data.\n :param steps: number of steps to roll out\n :param obs_embed: size(time_steps, batch_size, embedding_size)\n :param action: size(time_steps, batch_size, action_size)\n :param prev_state: RSSM state, size(batch_size, state_size)\n :return: prior, posterior states. size(time_steps, batch_size, state_size)\n \"\"\"\n priors = []\n posteriors = []\n for t in range(steps):\n prior_state, posterior_state = self.representation_model(\n obs_embed[t], action[t], prev_state)\n priors.append(prior_state)\n posteriors.append(posterior_state)\n prev_state = posterior_state\n prior = stack_states(priors, dim=0)\n post = stack_states(posteriors, dim=0)\n return prior, post\n\n def rollout_transition(self, steps: int, action: torch.Tensor, prev_state: RSSMState):\n \"\"\"\n Roll out the model with actions from data.\n :param steps: number of steps to roll out\n :param action: size(time_steps, batch_size, action_size)\n :param prev_state: RSSM state, size(batch_size, state_size)\n :return: prior states. size(time_steps, batch_size, state_size)\n \"\"\"\n priors = []\n state = prev_state\n for t in range(steps):\n state = self.transition_model(action[t], state)\n priors.append(state)\n return stack_states(priors, dim=0)\n\n def rollout_policy(self, steps: int, policy, prev_state: RSSMState):\n \"\"\"\n Roll out the model with a policy function.\n :param steps: number of steps to roll out\n :param policy: RSSMState -> action\n :param prev_state: RSSM state, size(batch_size, state_size)\n :return: next states size(time_steps, batch_size, state_size),\n actions size(time_steps, batch_size, action_size)\n \"\"\"\n state = prev_state\n next_states = []\n actions = []\n state = buffer_method(state, 'detach')\n for t in range(steps):\n action, _ = policy(buffer_method(state, 'detach'))\n state = self.transition_model(action, state)\n next_states.append(state)\n actions.append(action)\n next_states = stack_states(next_states, dim=0)\n actions = torch.stack(actions, dim=0)\n return next_states, actions\n"
] | [
[
"torch.stack",
"torch.distributions.Normal",
"torch.nn.Linear",
"torch.nn.GRUCell",
"torch.nn.Sequential",
"torch.zeros",
"torch.nn.functional.softplus",
"torch.cat"
]
] |
PhilaController/phl-budget-data | [
"fd249937c843aaff2375624160e2bec0b8043e3c"
] | [
"src/phl_budget_data/etl/collections/monthly/school.py"
] | [
"\"\"\"Module for parsing montly school collections data.\"\"\"\nfrom typing import ClassVar\n\nimport pandas as pd\nimport pdfplumber\n\nfrom ...utils.misc import rename_tax_rows\nfrom ...utils.pdf import extract_words, words_to_table\nfrom .core import COLLECTION_TYPES, MonthlyCollectionsReport, get_column_names\n\n\nclass SchoolTaxCollections(MonthlyCollectionsReport): # type: ignore\n \"\"\"\n Monthly School District Collections Report.\n\n Parameters\n ----------\n month :\n the calendar month number (starting at 1)\n year :\n the calendar year\n \"\"\"\n\n report_type: ClassVar[COLLECTION_TYPES] = \"school\"\n\n @property\n def legacy(self) -> bool:\n \"\"\"Whether the format is the legacy or current version.\"\"\"\n return self.num_pages > 1\n\n def extract(self) -> pd.DataFrame:\n \"\"\"Internal function to parse the contents of a legacy PDF page.\"\"\"\n\n # Open the PDF document\n with pdfplumber.open(self.path) as pdf:\n\n # Loop over each page\n out: list[pd.DataFrame] = []\n for pg in pdf.pages:\n\n # Extract the words\n words = extract_words(\n pg, keep_blank_chars=False, x_tolerance=1, y_tolerance=1\n )\n\n # Group the words into a table\n data = words_to_table(\n words,\n text_tolerance_y=5,\n text_tolerance_x=5,\n column_tolerance=20,\n min_col_sep=24,\n row_header_tolerance=10,\n )\n\n # Skip the header (first five rows)\n data = data.iloc[6:]\n assert \"REAL ESTATE\" in data.iloc[0][0]\n\n # # Remove first row of header if we need to\n # for phrase in [\"prelim\", \"final\", \"budget\"]:\n # sel = data[0].str.lower().str.startswith(phrase)\n # data = data.loc[~sel]\n\n # # Remove empty columns\n # data = remove_empty_columns(data, use_nan=False)\n\n # Check number of columns\n if len(out):\n if len(data.columns) != len(out[-1].columns):\n raise ValueError(\"Column mismatch when parsing multiple pages\")\n\n # Save it\n out.append(data)\n\n # Return concatenation\n return pd.concat(out, axis=0, ignore_index=True)\n\n def transform(self, data: pd.DataFrame) -> pd.DataFrame:\n \"\"\"Transform the raw parsing data into a clean data frame.\"\"\"\n\n # Call base transform\n data = super().transform(data)\n\n # Determine columns for the report\n columns = get_column_names(self.month, self.year)\n\n ncols = len(data.columns)\n assert ncols in [11, 12, 14]\n\n if ncols == 14:\n data = data.drop(labels=[7, 8, 9, 10], axis=1)\n else:\n data = data.drop(labels=[data.columns[-6]], axis=1)\n\n # Set the columns\n columns = [\"name\"] + columns[-7:]\n data = data[[0] + list(data.columns[-7:])]\n\n assert len(columns) == len(data.columns)\n assert len(data) in [14, 15]\n\n # Do current/prior/total\n if len(data) == 14:\n index = rename_tax_rows(\n data,\n 0,\n [\"real_estate\", \"school_income\", \"use_and_occupancy\", \"liquor\"],\n )\n else:\n index = rename_tax_rows(\n data,\n 0,\n [\"real_estate\"], # , \"school_income\", \"use_and_occupancy\", \"liquor\"],\n )\n\n if \"PAYMENT\" in data.loc[index, 0]:\n data.loc[index, 0] = \"pilots_total\"\n index += 1\n\n index = rename_tax_rows(\n data,\n index,\n [\"school_income\", \"use_and_occupancy\", \"liquor\"],\n )\n\n if \"PAYMENT\" in data.loc[index, 0]:\n data.loc[index, 0] = \"pilots_total\"\n index += 1\n\n # Other non-tax\n data.loc[index, 0] = \"other_nontax_total\"\n index += 1\n\n # Total\n data.loc[index, 0] = \"total_revenue_total\"\n index += 1\n\n # Set the columns\n data.columns = columns\n\n # Split out current/prior/total into its own column\n data[\"kind\"] = data[\"name\"].apply(lambda x: x.split(\"_\")[-1])\n data[\"name\"] = data[\"name\"].apply(lambda x: \"_\".join(x.split(\"_\")[:-1]))\n\n return data\n\n def validate(self, data: pd.DataFrame) -> bool:\n \"\"\"Validate the input data.\"\"\"\n\n # Sum up\n t = data.query(\"kind == 'total' and name != 'total_revenue'\")\n t = t.filter(regex=f\"^{self.month_name}\", axis=1)\n\n # Compare to total\n for col in t.columns:\n total_revenue = data.query(\"name == 'total_revenue'\")[col].squeeze()\n diff = t[col].sum() - total_revenue\n assert diff < 5\n\n return True\n\n def load(self, data: pd.DataFrame) -> None:\n \"\"\"Load the data.\"\"\"\n\n # Get the path\n dirname = self.get_data_directory(\"processed\")\n path = dirname / f\"{self.year}-{self.month:02d}-tax.csv\"\n\n # Load\n super()._load_csv_data(data, path)\n"
] | [
[
"pandas.concat"
]
] |
code-backdoor/code-backdoor | [
"1eeb3d79aa8a54c8f08e8d0156b569de5edd974e"
] | [
"Birnn_Transformer/ncc/utils/graph.py"
] | [
"# -*- coding: utf-8 -*-\n\nimport dgl\nimport networkx as nx\nimport numpy as np\nimport torch\n\nfrom dataset.codesearchnet import MAX_SUB_TOKEN_LEN\n\n\ndef build_graph(tree_dict, dictionary, tree_leaf_subtoken=1, DGLGraph_PAD_WORD=-1) -> dgl.DGLGraph:\n # 叶子节点存的是拆开后的subtoken ,当然,如果token拆不开,那就还是一个token\n # 用来训练的.pt数据里叶子节点token保存格式是[\"a_hu\",[\"a\",\"hu\"]],\n # (1)tree_leaf_subtoken为1时 本函数只将其subtoken转换成wordid ,#即保存为[和a对应的id,和hu对应的id],比如[23,179]\n # 如果是拆不开的token,pt数据里格式是 [\"imq\",[\"imq\",PAD_WORD]]\n # 那么这里将其转换为[和imq对应的id,和codesum.PAD_WORD],比如[258,0]\n # pad到的长度由train val test整个数据集里token拆开后最大长度决定\n # (2)tree_leaf_subtoken为0时,本函数用的拆之前的token得到wordid,即比如用a_hu得到wordid\n nx_graph = nx.DiGraph()\n\n def _build(nid, idx, tree):\n # non-leaf node, 'children': [\"sub_token\", [\"sub\", \"token\", <PAD>, <PAD>, <PAD>]]\n if not isinstance(tree[idx]['children'][1], list):\n child_ids = tree[idx]['children']\n if nid is None:\n nx_graph.add_node(0, x=[DGLGraph_PAD_WORD] * MAX_SUB_TOKEN_LEN, y=int(idx), mask=0)\n # print('node={}, x={}, y={}, mask={}'.format(0, [DGLGraph_PAD_WORD] * MAX_SUB_TOKEN_LEN, int(idx), 0))\n nid = 0\n for idx in child_ids:\n cid = nx_graph.number_of_nodes()\n y_value = int(idx)\n if not isinstance(tree[str(idx)]['children'][1], list): # non-leaf node\n nx_graph.add_node(cid, x=[DGLGraph_PAD_WORD] * MAX_SUB_TOKEN_LEN, y=y_value, mask=0)\n # print(\n # 'node={}, x={}, y={}, mask={}'.format(cid, [DGLGraph_PAD_WORD] * MAX_SUB_TOKEN_LEN, y_value, 0))\n _build(cid, str(idx), tree)\n else: # leaf node\n if tree_leaf_subtoken:\n word_index = [dictionary.index(subtoken) for subtoken in tree[str(idx)]['children'][1]]\n else:\n word_index = [dictionary.index(tree[idx]['children'][0])]\n nx_graph.add_node(cid, x=word_index, y=y_value, mask=1)\n # print('node={}, x={}, y={}, mask={}'.format(cid, word_index, y_value, 1))\n nx_graph.add_edge(cid, nid) # 因为用的 DiGraph,所以这里添加的edge应该是cid指向nid,而nid是root节点的方向,cid是叶子节点的方向\n # print('edge={}->{}'.format(cid, nid))\n else: # leaf node\n if tree_leaf_subtoken:\n word_index = [dictionary.index(subtoken) for subtoken in tree[idx]['children'][-1]]\n else:\n word_index = [dictionary.index(tree[idx]['children'][0])]\n if nid is None:\n cid = 0\n else:\n cid = nx_graph.number_of_nodes()\n nx_graph.add_node(cid, x=word_index, y=int(idx), mask=1)\n # print('node={}, x={}, y={}, mask={}'.format(cid, word_index, int(idx), 1))\n\n if nid is not None:\n nx_graph.add_edge(cid, nid) # 因为用的 DiGraph,所以这里添加的edge应该是cid指向nid,而nid是root节点的方向,cid是叶子节点的方向\n # print('edge={}->{}'.format(cid, nid))\n\n _build(None, '0', tree_dict)\n dgl_graph = dgl.DGLGraph()\n\n dgl_graph.from_networkx(nx_graph, node_attrs=['x', 'y', 'mask'])\n assert len(tree_dict) == dgl_graph.number_of_nodes(), Exception('build dgl tree error')\n return dgl_graph\n\n\ndef tree2dgl(tree_dict, dictionary, DGLGraph_PAD_WORD=-1):\n \"\"\"\n if _subtoken == True, it means that we tokenize leaf node info into sub-tokens\n e.g. [\"sub_token\", [\"sub\", \"token\", <PAD>, <PAD>, <PAD>]]\n else, no tokenization. e.g. [\"sub_token\"]\n \"\"\"\n _subtoken = False\n for node in tree_dict.values():\n if isinstance(node['children'][1], list):\n _subtoken = True\n break\n\n def nonleaf_node_info():\n if _subtoken:\n return [DGLGraph_PAD_WORD] * MAX_SUB_TOKEN_LEN\n else:\n return [DGLGraph_PAD_WORD]\n\n def token2idx(node_info):\n \"\"\"\n node info => indices\n if _subtoken == True, [\"sub_token\", [\"sub\", \"token\", <PAD>, <PAD>, <PAD>]] => index([\"sub\", \"token\", <PAD>, <PAD>, <PAD>])\n else, [\"sub_token\"] => index([\"sub_token\"])\n \"\"\"\n if _subtoken:\n return [dictionary.index(subtoken) for subtoken in node_info[-1]]\n else:\n return [dictionary.index(node_info[0])]\n\n \"\"\"\n how to build DGL graph?\n node: \n x: node info (if it's non-leaf nodes, padded with [-1, ...]),\n y: current node idx\n mask: if leaf node, mask=1; else, mask=0\n * if current node is the root node,\n edge: child => parent \n \"\"\"\n dgl_graph = dgl.DGLGraph()\n ids = sorted(tree_dict.keys(), key=int)\n\n dgl_graph.add_nodes(\n len(tree_dict),\n data={\n 'x': torch.LongTensor([\n token2idx(tree_dict[idx]['children']) if isinstance(tree_dict[idx]['children'][1], list) \\\n else nonleaf_node_info()\n for idx in ids\n ]),\n 'y': torch.LongTensor(range(len(tree_dict))),\n 'mask': torch.LongTensor([isinstance(tree_dict[idx]['children'][1], list) for idx in ids]),\n }\n )\n\n for idx in ids:\n node = tree_dict[idx]\n if node['parent'] is not None:\n dgl_graph.add_edges(int(idx), int(node['parent']))\n # print('edge={}->{}'.format(int(idx), int(node['parent'])))\n\n return dgl_graph\n\n\ndef tree2nx2dgl(tree_dict, dictionary, DGLGraph_PAD_WORD=-1):\n \"\"\"\n if _subtoken == True, it means that we tokenize leaf node info into sub-tokens\n e.g. [\"sub_token\", [\"sub\", \"token\", <PAD>, <PAD>, <PAD>]]\n else, no tokenization. e.g. [\"sub_token\"]\n \"\"\"\n _subtoken = False\n for node in tree_dict.values():\n if isinstance(node['children'][1], list):\n _subtoken = True\n break\n\n def nonleaf_node_info():\n if _subtoken:\n return [DGLGraph_PAD_WORD] * MAX_SUB_TOKEN_LEN\n else:\n return [DGLGraph_PAD_WORD]\n\n def token2idx(node_info):\n \"\"\"\n node info => indices\n if _subtoken == True, [\"sub_token\", [\"sub\", \"token\", <PAD>, <PAD>, <PAD>]] => index([\"sub\", \"token\", <PAD>, <PAD>, <PAD>])\n else, [\"sub_token\"] => index([\"sub_token\"])\n \"\"\"\n if _subtoken:\n return [dictionary.index(subtoken) for subtoken in node_info[-1]]\n else:\n return [dictionary.index(node_info[0])]\n\n \"\"\"\n how to build DGL graph?\n node: \n x: node info (if it's non-leaf nodes, padded with [-1, ...]),\n y: current node idx\n mask: if leaf node, mask=1; else, mask=0\n * if current node is the root node,\n edge: child => parent \n \"\"\"\n\n nx_graph = nx.DiGraph()\n ids = sorted(tree_dict.keys(), key=int)\n\n for idx in ids:\n node = tree_dict[idx]\n\n nx_graph.add_node(\n int(idx),\n x=token2idx(tree_dict[idx]['children']) if isinstance(tree_dict[idx]['children'][1], list) \\\n else nonleaf_node_info(),\n y=int(idx),\n mask=int(isinstance(tree_dict[idx]['children'][1], list))\n )\n # print('node={}, x={}, y={}, mask={}'.format(\n # idx, token2idx(tree_dict[idx]['children']) if isinstance(tree_dict[idx]['children'][1], list) \\\n # else nonleaf_node_info(), int(idx), int(isinstance(tree_dict[idx]['children'][1], list))))\n if node['parent'] is not None:\n nx_graph.add_edge(int(idx), int(node['parent']))\n # print('edge={}->{}'.format(int(idx), int(node['parent'])))\n\n dgl_graph = dgl.DGLGraph()\n\n dgl_graph.from_networkx(nx_graph, node_attrs=['x', 'y', 'mask'])\n assert len(tree_dict) == dgl_graph.number_of_nodes(), Exception('build dgl tree error')\n return dgl_graph\n\n\ndef pack_graph(graphs):\n def get_root_node_info(dgl_trees):\n root_indices, node_nums = [None] * len(dgl_trees), [None] * len(dgl_trees)\n for ind, tree in enumerate(dgl_trees):\n topological_nodes = dgl.topological_nodes_generator(tree)\n root_ind_tree_dgldigraph = topological_nodes[-1].item()\n root_indices[ind] = root_ind_tree_dgldigraph\n all_num_node_tree_dgldigraph = tree.number_of_nodes()\n node_nums[ind] = all_num_node_tree_dgldigraph\n root_indices = np.array(root_indices)\n num_nodes = np.array(node_nums)\n return root_indices, num_nodes,\n\n # merge many dgl graphs into a huge one\n root_indices, node_nums, = get_root_node_info(graphs)\n packed_graph = dgl.batch(graphs)\n return packed_graph, root_indices, node_nums,\n\n\nif __name__ == '__main__':\n from ncc.tasks.summarization import SummarizationTask\n\n dict = SummarizationTask.load_dictionary(\n filename='/home/yang/.ncc/multi/summarization/data-mmap/ruby/binary_ast.dict.json'\n )\n\n bin_ast = {\n \"0\": {\"type\": \"method\", \"parent\": None, \"children\": [1, 2]},\n \"1\": {\"type\": \"def_keyword\", \"parent\": 0, \"children\": [\"def\", [\"def\", \"<pad>\", \"<pad>\", \"<pad>\", \"<pad>\"]]},\n \"2\": {\"type\": \"TMP\", \"parent\": 0, \"children\": [3, 4]},\n \"3\": {\"type\": \"identifier\", \"parent\": 2, \"children\": [\"set\", [\"set\", \"<pad>\", \"<pad>\", \"<pad>\", \"<pad>\"]]},\n \"4\": {\"type\": \"TMP\", \"parent\": 2, \"children\": [5, 10]},\n \"5\": {\"type\": \"method_parameters\", \"parent\": 4, \"children\": [6, 7]},\n \"6\": {\"type\": \"LeftParenOp\", \"parent\": 5, \"children\": [\"(\", [\"(\", \"<pad>\", \"<pad>\", \"<pad>\", \"<pad>\"]]},\n \"7\": {\"type\": \"TMP\", \"parent\": 5, \"children\": [8, 9]}, \"8\": {\"type\": \"identifier\", \"parent\": 7,\n \"children\": [\"set_attributes\",\n [\"set\", \"attributes\", \"<pad>\",\n \"<pad>\", \"<pad>\"]]},\n \"9\": {\"type\": \"LeftParenOp\", \"parent\": 7, \"children\": [\")\", [\")\", \"<pad>\", \"<pad>\", \"<pad>\", \"<pad>\"]]},\n \"10\": {\"type\": \"TMP\", \"parent\": 4, \"children\": [11, 26]},\n \"11\": {\"type\": \"assignment\", \"parent\": 10, \"children\": [12, 13]},\n \"12\": {\"type\": \"identifier\", \"parent\": 11,\n \"children\": [\"old_attributes\", [\"old\", \"attributes\", \"<pad>\", \"<pad>\", \"<pad>\"]]},\n \"13\": {\"type\": \"TMP\", \"parent\": 11, \"children\": [14, 15]},\n \"14\": {\"type\": \"AsgnOp\", \"parent\": 13, \"children\": [\"=\", [\"=\", \"<pad>\", \"<pad>\", \"<pad>\", \"<pad>\"]]},\n \"15\": {\"type\": \"method_call\", \"parent\": 13, \"children\": [16, 17]},\n \"16\": {\"type\": \"identifier\", \"parent\": 15,\n \"children\": [\"compute_attributes\", [\"compute\", \"attributes\", \"<pad>\", \"<pad>\", \"<pad>\"]]},\n \"17\": {\"type\": \"argument_list\", \"parent\": 15, \"children\": [18, 19]},\n \"18\": {\"type\": \"LeftParenOp\", \"parent\": 17,\n \"children\": [\"(\", [\"(\", \"<pad>\", \"<pad>\", \"<pad>\", \"<pad>\"]]},\n \"19\": {\"type\": \"TMP\", \"parent\": 17, \"children\": [20, 25]},\n \"20\": {\"type\": \"call\", \"parent\": 19, \"children\": [21, 22]},\n \"21\": {\"type\": \"identifier\", \"parent\": 20,\n \"children\": [\"set_attributes\", [\"set\", \"attributes\", \"<pad>\", \"<pad>\", \"<pad>\"]]},\n \"22\": {\"type\": \"TMP\", \"parent\": 20, \"children\": [23, 24]},\n \"23\": {\"type\": \"DotOp\", \"parent\": 22, \"children\": [\".\", [\".\", \"<pad>\", \"<pad>\", \"<pad>\", \"<pad>\"]]},\n \"24\": {\"type\": \"identifier\", \"parent\": 22,\n \"children\": [\"keys\", [\"keys\", \"<pad>\", \"<pad>\", \"<pad>\", \"<pad>\"]]},\n \"25\": {\"type\": \"LeftParenOp\", \"parent\": 19,\n \"children\": [\")\", [\")\", \"<pad>\", \"<pad>\", \"<pad>\", \"<pad>\"]]},\n \"26\": {\"type\": \"TMP\", \"parent\": 10, \"children\": [27, 34]},\n \"27\": {\"type\": \"method_call\", \"parent\": 26, \"children\": [28, 29]},\n \"28\": {\"type\": \"identifier\", \"parent\": 27,\n \"children\": [\"assign_attributes\", [\"assign\", \"attributes\", \"<pad>\", \"<pad>\", \"<pad>\"]]},\n \"29\": {\"type\": \"argument_list\", \"parent\": 27, \"children\": [30, 31]},\n \"30\": {\"type\": \"LeftParenOp\", \"parent\": 29,\n \"children\": [\"(\", [\"(\", \"<pad>\", \"<pad>\", \"<pad>\", \"<pad>\"]]},\n \"31\": {\"type\": \"TMP\", \"parent\": 29, \"children\": [32, 33]},\n \"32\": {\"type\": \"identifier\", \"parent\": 31,\n \"children\": [\"set_attributes\", [\"set\", \"attributes\", \"<pad>\", \"<pad>\", \"<pad>\"]]},\n \"33\": {\"type\": \"LeftParenOp\", \"parent\": 31,\n \"children\": [\")\", [\")\", \"<pad>\", \"<pad>\", \"<pad>\", \"<pad>\"]]},\n \"34\": {\"type\": \"TMP\", \"parent\": 26, \"children\": [35, 36]},\n \"35\": {\"type\": \"yield_keyword\", \"parent\": 34,\n \"children\": [\"yield\", [\"yield\", \"<pad>\", \"<pad>\", \"<pad>\", \"<pad>\"]]},\n \"36\": {\"type\": \"TMP\", \"parent\": 34, \"children\": [37, 46]},\n \"37\": {\"type\": \"ensure\", \"parent\": 36, \"children\": [38, 39]},\n \"38\": {\"type\": \"ensure_keyword\", \"parent\": 37,\n \"children\": [\"ensure\", [\"ensure\", \"<pad>\", \"<pad>\", \"<pad>\", \"<pad>\"]]},\n \"39\": {\"type\": \"method_call\", \"parent\": 37, \"children\": [40, 41]},\n \"40\": {\"type\": \"identifier\", \"parent\": 39,\n \"children\": [\"assign_attributes\", [\"assign\", \"attributes\", \"<pad>\", \"<pad>\", \"<pad>\"]]},\n \"41\": {\"type\": \"argument_list\", \"parent\": 39, \"children\": [42, 43]},\n \"42\": {\"type\": \"LeftParenOp\", \"parent\": 41,\n \"children\": [\"(\", [\"(\", \"<pad>\", \"<pad>\", \"<pad>\", \"<pad>\"]]},\n \"43\": {\"type\": \"TMP\", \"parent\": 41, \"children\": [44, 45]}, \"44\": {\"type\": \"identifier\", \"parent\": 43,\n \"children\": [\"old_attributes\",\n [\"old\", \"attributes\",\n \"<pad>\", \"<pad>\",\n \"<pad>\"]]},\n \"45\": {\"type\": \"LeftParenOp\", \"parent\": 43,\n \"children\": [\")\", [\")\", \"<pad>\", \"<pad>\", \"<pad>\", \"<pad>\"]]},\n \"46\": {\"type\": \"end_keyword\", \"parent\": 36,\n \"children\": [\"end\", [\"end\", \"<pad>\", \"<pad>\", \"<pad>\", \"<pad>\"]]}}\n nx2dgl_graph = build_graph(bin_ast, dict)\n dgl_graph = tree2dgl(bin_ast, dict)\n dgl_graph"
] | [
[
"numpy.array"
]
] |
jphacks/C_2111 | [
"df87580614d7e5c225ea30746e5f2cd0576bbc98"
] | [
"bert/wtfml/data_loaders/nlp/classification.py"
] | [
"import pandas as pd\r\nimport torch\r\nfrom transformers import BertJapaneseTokenizer\r\nfrom wtfml.data_loaders.nlp.utils import clean_sentence\r\nimport transformers\r\n\r\nclass BERTSimpleDataset:\r\n \"\"\"\r\n Dataset for bert which can accept clearning function\r\n \"\"\"\r\n\r\n def __init__(self, input_texts, target, clearning_function=clean_sentence):\r\n if isinstance(input_texts, pd.Series):\r\n input_texts = list(input_texts)\r\n self.input_texts = input_texts\r\n self.target = target\r\n self.tokenizer = BertJapaneseTokenizer.from_pretrained(\r\n \"cl-tohoku/bert-base-japanese-whole-word-masking\"\r\n )\r\n self.max_len = 144 # twitter\r\n self.clearning_function = clearning_function\r\n\r\n def __len__(self):\r\n return len(self.input_texts)\r\n\r\n def __getitem__(self, item):\r\n input_text = str(self.input_texts[item])\r\n if self.clearning_function:\r\n input_text = self.clearning_function(input_text)\r\n\r\n inputs = self.tokenizer.encode_plus(\r\n input_text,\r\n None,\r\n add_special_tokens=True,\r\n max_length=self.max_len,\r\n padding=\"max_length\",\r\n truncation=True,\r\n # return_tensors=\"pt\"\r\n )\r\n\r\n ids = inputs[\"input_ids\"]\r\n mask = inputs[\"attention_mask\"]\r\n token_type_ids = inputs[\"token_type_ids\"]\r\n target = self.target[item]\r\n \r\n return {\r\n \"ids\": torch.tensor(ids, dtype=torch.long),\r\n \"mask\": torch.tensor(mask, dtype=torch.long),\r\n \"token_type_ids\": torch.tensor(token_type_ids, dtype=torch.long),\r\n \"targets\": torch.tensor(target, dtype=torch.long), # floatからlongに変更\r\n }\r\n\r\n \r\nclass DistilBERTDataset:\r\n \"\"\"\r\n Dataset for bert which can accept clearning function\r\n \"\"\"\r\n\r\n def __init__(self, input_texts, target, clearning_function=clean_sentence):\r\n if isinstance(input_texts, pd.Series):\r\n input_texts = list(input_texts)\r\n self.input_texts = input_texts\r\n self.target = target\r\n \r\n self.tokenizer = transformers.DistilBertTokenizer.from_pretrained(\r\n \"cl-tohoku/bert-base-japanese-whole-word-masking\"\r\n )\r\n\r\n self.max_len = 144 # twitter\r\n self.clearning_function = clearning_function\r\n\r\n def __len__(self):\r\n return len(self.input_texts)\r\n\r\n def __getitem__(self, item):\r\n input_text = str(self.input_texts[item])\r\n if self.clearning_function:\r\n input_text = self.clearning_function(input_text)\r\n\r\n inputs = self.tokenizer.encode_plus(\r\n input_text,\r\n None,\r\n add_special_tokens=True,\r\n max_length=self.max_len,\r\n padding=\"max_length\",\r\n truncation=True,\r\n # return_tensors=\"pt\"\r\n )\r\n\r\n ids = inputs[\"input_ids\"]\r\n mask = inputs[\"attention_mask\"]\r\n # token_type_ids = inputs[\"token_type_ids\"]\r\n target = self.target[item]\r\n \r\n return {\r\n \"ids\": torch.tensor(ids, dtype=torch.long),\r\n \"mask\": torch.tensor(mask, dtype=torch.long),\r\n # \"token_type_ids\": torch.tensor(token_type_ids, dtype=torch.long),\r\n \"targets\": torch.tensor(target, dtype=torch.long), # floatからlongに変更\r\n }\r\n"
] | [
[
"torch.tensor"
]
] |
Evelkos/CellularEvolutionaryAlgorithm | [
"9633337a00e20cb0c4d8a679e72755e165113468"
] | [
"src/cec2017/utils.py"
] | [
"# cec2017.utils\n# Author: Duncan Tilley\n# Additional functions for graphing and benchmarking\n\n\ndef surface_plot(function, domain=(-100, 100), points=30, dimension=2, ax=None):\n \"\"\"\n Creates a surface plot of a function.\n\n Args:\n function (function): The objective function to be called at each point.\n domain (num, num): The inclusive (min, max) domain for each dimension.\n points (int): The number of points to collect on each dimension. A total\n of points^2 function evaluations will be performed.\n dimension (int): The dimension to pass to the function. If this is more\n than 2, the elements after the first 2 will simply be zero,\n providing a slice at x_3 = 0, ..., x_n = 0.\n ax (matplotlib axes): Optional axes to use (must have projection='3d').\n Note, if specified plt.show() will not be called.\n \"\"\"\n import matplotlib.pyplot as plt\n import numpy as np\n from mpl_toolkits import mplot3d\n\n # create points^2 tuples of (x,y) and populate z\n xys = np.linspace(domain[0], domain[1], points)\n xys = np.transpose([np.tile(xys, len(xys)), np.repeat(xys, len(xys))])\n zs = np.zeros(points * points)\n\n if dimension > 2:\n # concatenate remaining zeros\n tail = np.zeros(dimension - 2)\n for i in range(0, xys.shape[0]):\n zs[i] = function(np.concatenate([xys[i], tail]))\n else:\n for i in range(0, xys.shape[0]):\n zs[i] = function(xys[i])\n\n # create the plot\n ax_in = ax\n if ax is None:\n ax = plt.axes(projection=\"3d\")\n\n X = xys[:, 0].reshape((points, points))\n Y = xys[:, 1].reshape((points, points))\n Z = zs.reshape((points, points))\n ax.plot_surface(X, Y, Z, cmap=\"gist_ncar\", edgecolor=\"none\")\n ax.set_title(function.__name__)\n ax.set_xlabel(\"x\")\n ax.set_ylabel(\"y\")\n ax.set_zlabel(\"z\")\n\n if ax_in is None:\n plt.show()\n\n\ndef time(function, domain=(-100, 100), points=30):\n \"\"\"\n Returns the time in seconds to calculate points^2 evaluations of the\n given function.\n\n function\n The objective function to be called at each point.\n domain\n The inclusive (min, max) domain for each dimension.\n points\n The number of points to collect on each dimension. A total of points^2\n function evaluations will be performed.\n \"\"\"\n from time import time\n\n import numpy as np\n\n # create points^2 tuples of (x,y) and populate z\n xys = np.linspace(domain[0], domain[1], points)\n xys = np.transpose([np.tile(xys, len(xys)), np.repeat(xys, len(xys))])\n zs = np.zeros(points * points)\n\n before = time()\n for i in range(0, xys.shape[0]):\n zs[i] = function(xys[i])\n return time() - before\n"
] | [
[
"numpy.zeros",
"matplotlib.pyplot.axes",
"matplotlib.pyplot.show",
"numpy.concatenate",
"numpy.linspace"
]
] |
chamwen/NT-Benchmark | [
"d5a17a07fdfa89d80d47843c35ecf3e078b94371",
"d5a17a07fdfa89d80d47843c35ecf3e078b94371",
"d5a17a07fdfa89d80d47843c35ecf3e078b94371"
] | [
"NT_UDA/demo_syn_atdoc.py",
"NT_UDA/demo_syn_shot.py",
"NT_SSDA/demo_seed_dann.py"
] | [
"# -*- coding: utf-8 -*-\n# A Survey on Negative Transfer\n# https://github.com/chamwen/NT-Benchmark\nimport numpy as np\nimport argparse\nimport os\nimport torch as tr\nimport torch.nn as nn\nimport torch.optim as optim\nfrom utils import network, loss, utils\nfrom utils.network import calc_coeff\nfrom utils.dataloader import read_syn_src_tar\nfrom utils.utils import lr_scheduler_full, fix_random_seed, add_label_noise_noimg\nfrom utils.loss import CELabelSmooth, CDANE, Entropy, RandomLayer\nimport torch.utils.data as Data\n\n\ndef data_load(Xs, Ys, Xt, Yt, args):\n dset_loaders = {}\n train_bs = args.batch_size\n\n if args.noise_rate > 0:\n Ys = add_label_noise_noimg(Ys, args.seed, args.class_num, args.noise_rate)\n\n sample_idx_tar = tr.from_numpy(np.arange(len(Yt))).long()\n data_src = Data.TensorDataset(Xs, Ys)\n data_tar = Data.TensorDataset(Xt, Yt)\n data_tar_idx = Data.TensorDataset(Xt, Yt, sample_idx_tar)\n\n # for DAN/DANN/CDAN/MCC\n dset_loaders[\"source\"] = Data.DataLoader(data_src, batch_size=train_bs, shuffle=True, drop_last=True)\n dset_loaders[\"target\"] = Data.DataLoader(data_tar_idx, batch_size=train_bs, shuffle=True, drop_last=True)\n dset_loaders[\"Target\"] = Data.DataLoader(data_tar, batch_size=train_bs * 3, shuffle=False, drop_last=False)\n\n return dset_loaders\n\n\ndef train_target(args):\n X_src, y_src, X_tar, y_tar = read_syn_src_tar(args)\n dset_loaders = data_load(X_src, y_src, X_tar, y_tar, args)\n\n netF, netC = network.backbone_net(args, args.bottleneck)\n netF.load_state_dict(tr.load(args.mdl_init_dir + 'netF.pt'))\n netC.load_state_dict(tr.load(args.mdl_init_dir + 'netC.pt'))\n base_network = nn.Sequential(netF, netC)\n\n max_len = max(len(dset_loaders[\"source\"]), len(dset_loaders[\"target\"]))\n args.max_iter = args.max_epoch * max_len\n\n ad_net = network.AdversarialNetwork(args.bottleneck, 20).cuda()\n ad_net.load_state_dict(tr.load(args.mdl_init_dir + 'netD_full.pt'))\n random_layer = RandomLayer([args.bottleneck, args.class_num], args.bottleneck)\n random_layer.cuda()\n\n optimizer_f = optim.SGD(netF.parameters(), lr=args.lr * 0.1)\n optimizer_c = optim.SGD(netC.parameters(), lr=args.lr)\n optimizer_d = optim.SGD(ad_net.parameters(), lr=args.lr)\n\n max_len = max(len(dset_loaders[\"source\"]), len(dset_loaders[\"target\"]))\n max_iter = args.max_epoch * max_len\n interval_iter = max_iter // 10\n iter_num = 0\n base_network.train()\n\n class_num = args.class_num\n mem_fea = tr.rand(len(dset_loaders[\"target\"].dataset), args.bottleneck).cuda()\n mem_fea = mem_fea / tr.norm(mem_fea, p=2, dim=1, keepdim=True)\n mem_cls = tr.ones(len(dset_loaders[\"target\"].dataset), class_num).cuda() / class_num\n\n while iter_num < max_iter:\n try:\n inputs_source, labels_source = iter_source.next()\n except:\n iter_source = iter(dset_loaders[\"source\"])\n inputs_source, labels_source = iter_source.next()\n\n try:\n inputs_target, _, idx = iter_target.next()\n except:\n iter_target = iter(dset_loaders[\"target\"])\n inputs_target, _, idx = iter_target.next()\n\n if inputs_source.size(0) == 1:\n continue\n\n iter_num += 1\n lr_scheduler_full(optimizer_f, init_lr=args.lr * 0.1, iter_num=iter_num, max_iter=args.max_iter)\n lr_scheduler_full(optimizer_c, init_lr=args.lr, iter_num=iter_num, max_iter=args.max_iter)\n lr_scheduler_full(optimizer_d, init_lr=args.lr, iter_num=iter_num, max_iter=args.max_iter)\n\n inputs_source, inputs_target, labels_source = inputs_source.cuda(), inputs_target.cuda(), labels_source.cuda()\n features_source, outputs_source = base_network(inputs_source)\n features_target, outputs_target = base_network(inputs_target)\n features = tr.cat((features_source, features_target), dim=0)\n\n # new version img loss\n args.loss_trade_off = 1.0\n outputs = tr.cat((outputs_source, outputs_target), dim=0)\n softmax_out = nn.Softmax(dim=1)(outputs)\n entropy = Entropy(softmax_out)\n transfer_loss = CDANE([features, softmax_out], ad_net, entropy, calc_coeff(iter_num), random_layer=random_layer)\n classifier_loss = CELabelSmooth(num_classes=args.class_num, epsilon=args.smooth)(outputs_source, labels_source)\n\n # ATDOC\n dis = -tr.mm(features_target.detach(), mem_fea.t())\n for di in range(dis.size(0)):\n dis[di, idx[di]] = tr.max(dis)\n _, p1 = tr.sort(dis, dim=1)\n\n w = tr.zeros(features_target.size(0), mem_fea.size(0)).cuda()\n for wi in range(w.size(0)):\n for wj in range(args.K):\n w[wi][p1[wi, wj]] = 1 / args.K\n\n weight_, pred = tr.max(w.mm(mem_cls), 1)\n loss_ = nn.CrossEntropyLoss(reduction='none')(outputs_target, pred)\n classifier_loss_atdoc = tr.sum(weight_ * loss_) / (tr.sum(weight_).item())\n\n eff = iter_num / args.max_iter\n total_loss = args.loss_trade_off * transfer_loss + classifier_loss + args.tar_par * eff * classifier_loss_atdoc\n\n optimizer_f.zero_grad()\n optimizer_c.zero_grad()\n optimizer_d.zero_grad()\n total_loss.backward()\n optimizer_f.step()\n optimizer_c.step()\n optimizer_d.step()\n\n # label memory\n netF.eval()\n netC.eval()\n with tr.no_grad():\n features_target, outputs_target = netC(netF(inputs_target))\n features_target = features_target / tr.norm(features_target, p=2, dim=1, keepdim=True)\n softmax_out = nn.Softmax(dim=1)(outputs_target)\n outputs_target = softmax_out ** 2 / ((softmax_out ** 2).sum(dim=0))\n\n mem_fea[idx] = (1.0 - args.momentum) * mem_fea[idx] + args.momentum * features_target.clone()\n mem_cls[idx] = (1.0 - args.momentum) * mem_cls[idx] + args.momentum * outputs_target.clone()\n\n if iter_num % interval_iter == 0 or iter_num == max_iter:\n base_network.eval()\n\n acc_t_te = utils.cal_acc_base(dset_loaders[\"Target\"], base_network)\n log_str = 'Task: {}, Iter:{}/{}; Acc = {:.2f}%'.format(args.task_str, iter_num, max_iter, acc_t_te)\n print(log_str)\n\n base_network.train()\n\n return acc_t_te\n\n\nif __name__ == '__main__':\n\n data_name = 'moon'\n if data_name == 'moon': num_class = 2\n base_name_list = ['0', '1', '2', '3_45', '4_15', '6', '7', '8', '9']\n domain_list = ['Raw', 'Tl', 'Sl', 'Rt', 'Sh', 'Sk', 'Ns', 'Ol', 'Sc']\n file_list = [data_name + i for i in base_name_list]\n num_domain = len(domain_list)\n\n args = argparse.Namespace(bottleneck=64, lr=0.01, lr_decay1=0.1, lr_decay2=1.0,\n epsilon=1e-05, layer='wn', class_num=num_class, smooth=0)\n\n args.K = 5\n args.momentum = 1.0\n args.tar_par = 0.2\n\n args.method = 'CDANE-ATDOC'\n args.dset = data_name\n args.backbone = 'ShallowNet'\n args.batch_size = 32\n args.max_epoch = 50\n args.input_dim = 2\n args.mdl_init_dir = 'outputs/mdl_init/' + args.dset + '/'\n args.noise_rate = 0\n dset_n = args.dset + '_' + str(args.noise_rate)\n\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = '5'\n args.data_env = 'gpu' # 'local'\n args.seed = 2022\n fix_random_seed(args.seed)\n tr.backends.cudnn.deterministic = True\n print(dset_n, args.method)\n\n args.root_path = './data_synth/'\n args.local_dir = r'/mnt/ssd2/wenz/NT-Benchmark/NT_UDA/'\n args.result_dir = 'results/target/'\n\n acc_all = np.zeros((len(domain_list) - 1))\n for s in range(1, num_domain): # source\n for t in [0]: # target\n itr_idx = s - 1\n info_str = '\\n%s: %s --> %s' % (itr_idx, domain_list[s], domain_list[t])\n print(info_str)\n args.src, args.tar = file_list[s], file_list[t]\n args.task_str = domain_list[s] + '_' + domain_list[t]\n print(args)\n\n acc_all[itr_idx] = train_target(args)\n print('All acc: ', np.round(acc_all, 2))\n print('Avg acc: ', np.round(np.mean(acc_all), 2))\n",
"# -*- coding: utf-8 -*-\n# A Survey on Negative Transfer\n# https://github.com/chamwen/NT-Benchmark\nimport argparse\nimport os, sys\nimport os.path as osp\nimport numpy as np\nimport torch as tr\nimport torch.nn as nn\nimport torch.optim as optim\nfrom scipy.spatial.distance import cdist\nimport torch.utils.data as Data\nfrom utils import network, loss\nfrom utils.dataloader import read_syn_single\nfrom utils.utils import lr_scheduler, fix_random_seed, op_copy, cal_acc_noimg\n\n\ndef data_load(X, y, args):\n dset_loaders = {}\n train_bs = args.batch_size\n\n sample_idx = tr.from_numpy(np.arange(len(y))).long()\n data_tar = Data.TensorDataset(X, y, sample_idx)\n data_test = Data.TensorDataset(X, y, sample_idx)\n\n dset_loaders[\"target\"] = Data.DataLoader(data_tar, batch_size=train_bs, shuffle=True)\n dset_loaders[\"Target\"] = Data.DataLoader(data_test, batch_size=train_bs * 3, shuffle=False)\n return dset_loaders\n\n\ndef train_target(args):\n X_tar, y_tar = read_syn_single(args, args.tar)\n dset_loaders = data_load(X_tar, y_tar, args)\n\n # base network feature extract\n netF, netC = network.backbone_net(args, args.bottleneck)\n\n modelpath = args.output_dir_src + '/source_F.pt'\n netF.load_state_dict(tr.load(modelpath))\n modelpath = args.output_dir_src + '/source_C.pt'\n netC.load_state_dict(tr.load(modelpath))\n netC.eval()\n\n for k, v in netC.named_parameters():\n v.requires_grad = False\n\n param_group = []\n for k, v in netF.named_parameters():\n if args.lr_decay1 > 0:\n param_group += [{'params': v, 'lr': args.lr * args.lr_decay1}]\n else:\n v.requires_grad = False\n\n optimizer = optim.SGD(param_group)\n optimizer = op_copy(optimizer)\n\n max_iter = args.max_epoch * len(dset_loaders[\"target\"])\n interval_iter = max_iter // args.interval\n iter_num = 0\n\n while iter_num < max_iter:\n try:\n inputs_test, _, tar_idx = iter_test.next()\n except:\n iter_test = iter(dset_loaders[\"target\"])\n inputs_test, _, tar_idx = iter_test.next()\n\n if inputs_test.size(0) == 1:\n continue\n\n inputs_test = inputs_test.cuda()\n if iter_num % interval_iter == 0 and args.cls_par > 0:\n netF.eval()\n mem_label = obtain_label(dset_loaders[\"Target\"], netF, netC, args)\n mem_label = tr.from_numpy(mem_label).cuda()\n netF.train()\n\n iter_num += 1\n lr_scheduler(optimizer, iter_num=iter_num, max_iter=max_iter)\n features_test = netF(inputs_test)\n _, outputs_test = netC(features_test)\n\n # # loss definition\n if args.cls_par > 0:\n pred = mem_label[tar_idx].long()\n classifier_loss = nn.CrossEntropyLoss()(outputs_test, pred)\n classifier_loss *= args.cls_par\n else:\n classifier_loss = tr.tensor(0.0).cuda()\n\n if args.ent:\n softmax_out = nn.Softmax(dim=1)(outputs_test)\n entropy_loss = tr.mean(loss.Entropy(softmax_out))\n if args.gent:\n msoftmax = softmax_out.mean(dim=0)\n gentropy_loss = tr.sum(msoftmax * tr.log(msoftmax + args.epsilon))\n entropy_loss += gentropy_loss\n im_loss = entropy_loss * args.ent_par\n classifier_loss += im_loss\n\n optimizer.zero_grad()\n classifier_loss.backward()\n optimizer.step()\n\n if iter_num % interval_iter == 0 or iter_num == max_iter:\n netF.eval()\n acc_t_te, _ = cal_acc_noimg(dset_loaders[\"Target\"], netF, netC)\n log_str = 'Task: {}, Iter:{}/{}; Acc = {:.2f}%'.format(args.task_str, iter_num, max_iter, acc_t_te)\n print(log_str)\n netF.train()\n\n if iter_num == max_iter:\n print('{}, TL Acc = {:.2f}%'.format(args.task_str, acc_t_te))\n return acc_t_te\n\n\ndef obtain_label(loader, netF, netC, args):\n start_test = True\n with tr.no_grad():\n iter_test = iter(loader)\n for _ in range(len(loader)):\n data = iter_test.next()\n inputs = data[0]\n labels = data[1]\n inputs = inputs.cuda()\n feas = netF(inputs)\n _, outputs = netC(feas)\n if start_test:\n all_fea = feas.float().cpu()\n all_output = outputs.float().cpu()\n all_label = labels.float()\n start_test = False\n else:\n all_fea = tr.cat((all_fea, feas.float().cpu()), 0)\n all_output = tr.cat((all_output, outputs.float().cpu()), 0)\n all_label = tr.cat((all_label, labels.float()), 0)\n\n all_output = nn.Softmax(dim=1)(all_output)\n ent = tr.sum(-all_output * tr.log(all_output + args.epsilon), dim=1)\n unknown_weight = 1 - ent / np.log(args.class_num)\n _, predict = tr.max(all_output, 1)\n\n accuracy = tr.sum(tr.squeeze(predict).float() == all_label).item() / float(all_label.size()[0])\n if args.distance == 'cosine':\n all_fea = tr.cat((all_fea, tr.ones(all_fea.size(0), 1)), 1)\n all_fea = (all_fea.t() / tr.norm(all_fea, p=2, dim=1)).t()\n\n all_fea = all_fea.float().cpu().numpy()\n K = all_output.size(1)\n aff = all_output.float().cpu().numpy()\n initc = aff.transpose().dot(all_fea)\n initc = initc / (1e-8 + aff.sum(axis=0)[:, None])\n cls_count = np.eye(K)[predict].sum(axis=0)\n labelset = np.where(cls_count > args.threshold)\n labelset = labelset[0]\n # print(labelset)\n\n dd = cdist(all_fea, initc[labelset], args.distance)\n pred_label = dd.argmin(axis=1)\n pred_label = labelset[pred_label]\n\n for round in range(1): # SSL\n aff = np.eye(K)[pred_label]\n initc = aff.transpose().dot(all_fea)\n initc = initc / (1e-8 + aff.sum(axis=0)[:, None])\n dd = cdist(all_fea, initc[labelset], args.distance)\n pred_label = dd.argmin(axis=1)\n pred_label = labelset[pred_label]\n\n acc = np.sum(pred_label == all_label.float().numpy()) / len(all_fea)\n log_str = 'SSL_Acc = {:.2f}% -> {:.2f}%'.format(accuracy * 100, acc * 100)\n print(log_str)\n\n return pred_label.astype('int')\n\n\nif __name__ == \"__main__\":\n\n data_name = 'moon'\n if data_name == 'moon': num_class = 2\n base_name_list = ['0', '1', '2', '3_45', '4_15', '6', '7', '8', '9']\n domain_list = ['Raw', 'Tl', 'Sl', 'Rt', 'Sh', 'Sk', 'Ns', 'Ol', 'Sc']\n file_list = [data_name + i for i in base_name_list]\n num_domain = len(domain_list)\n\n args = argparse.Namespace(bottleneck=64, lr=0.01, lr_decay1=0.1, lr_decay2=1.0, ent=True,\n gent=True, cls_par=0.3, ent_par=1.0, epsilon=1e-05, layer='wn',\n threshold=0, class_num=num_class, distance='cosine')\n\n args.method = 'SHOT'\n args.dset = data_name\n args.backbone = 'ShallowNet'\n args.batch_size = 32\n args.interval = 2\n args.max_epoch = 5\n args.input_dim = 2\n args.mdl_init_dir = 'outputs/mdl_init/' + args.dset + '/'\n args.noise_rate = 0\n dset_n = args.dset + '_' + str(args.noise_rate)\n\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = '3'\n args.data_env = 'gpu' # 'local'\n args.seed = 2022\n fix_random_seed(args.seed)\n tr.backends.cudnn.deterministic = True\n\n args.dset = data_name\n args.root_path = './data_synth/'\n mdl_path = 'outputs/models/'\n args.output_src = mdl_path + dset_n + '/source/'\n print(dset_n, args.method)\n\n acc_all = np.zeros((len(domain_list) - 1))\n for s in range(1, num_domain): # source\n for t in [0]: # target\n itr_idx = s - 1\n info_str = '\\n%s: %s --> %s' % (itr_idx, domain_list[s], domain_list[t])\n print(info_str)\n args.src, args.tar = file_list[s], file_list[t]\n args.task_str = domain_list[s] + domain_list[t]\n\n args.name_src = domain_list[s]\n args.output_dir_src = osp.join(args.output_src, args.name_src)\n print(args)\n\n acc_all[itr_idx] = train_target(args)\n print('All acc: ', np.round(acc_all, 2))\n print('Avg acc: ', np.round(np.mean(acc_all), 2))\n",
"# -*- coding: utf-8 -*-\n# A Survey on Negative Transfer\n# https://github.com/chamwen/NT-Benchmark\nimport numpy as np\nimport argparse\nimport os\nimport torch as tr\nimport torch.nn as nn\nimport torch.optim as optim\nfrom utils import network, loss, utils\nfrom utils.LogRecord import LogRecord\nfrom utils.dataloader import read_seed_src_tar\nfrom utils.utils import lr_scheduler_full, fix_random_seed, data_load_noimg_ssda\nfrom utils.loss import CELabelSmooth, Entropy, ReverseLayerF\n\n\ndef train_target(args):\n X_src, y_src, X_tar, y_tar = read_seed_src_tar(args)\n dset_loaders = data_load_noimg_ssda(X_src, y_src, X_tar, y_tar, args)\n\n netF, netC = network.backbone_net(args, args.bottleneck)\n netF.load_state_dict(tr.load(args.mdl_init_dir + 'netF.pt'))\n netC.load_state_dict(tr.load(args.mdl_init_dir + 'netC.pt'))\n base_network = nn.Sequential(netF, netC)\n\n args.max_iter = args.max_epoch * len(dset_loaders[\"source\"])\n\n ad_net = network.feat_classifier(type=args.layer, class_num=2, bottleneck_dim=args.bottleneck).cuda()\n ad_net.load_state_dict(tr.load(args.mdl_init_dir + 'netD_clf.pt'))\n\n optimizer_f = optim.SGD(netF.parameters(), lr=args.lr * 0.1)\n optimizer_c = optim.SGD(netC.parameters(), lr=args.lr)\n optimizer_d = optim.SGD(ad_net.parameters(), lr=args.lr)\n\n max_iter = args.max_epoch * len(dset_loaders[\"source\"])\n interval_iter = max_iter // 10\n args.max_iter = max_iter\n iter_num = 0\n base_network.train()\n\n while iter_num < max_iter:\n try:\n inputs_source, labels_source = iter_source.next()\n except:\n iter_source = iter(dset_loaders[\"source\"])\n inputs_source, labels_source = iter_source.next()\n\n try:\n inputs_target_tr, labels_target_tr = iter_target_tr.next()\n except:\n iter_target_tr = iter(dset_loaders[\"target_tr\"])\n inputs_target_tr, labels_target_tr = iter_target_tr.next()\n\n try:\n inputs_target, _ = iter_target.next()\n except:\n iter_target = iter(dset_loaders[\"target_te\"])\n inputs_target, _ = iter_target.next()\n\n if inputs_source.size(0) == 1:\n continue\n\n iter_num += 1\n lr_scheduler_full(optimizer_f, init_lr=args.lr * 0.1, iter_num=iter_num, max_iter=args.max_iter)\n lr_scheduler_full(optimizer_c, init_lr=args.lr, iter_num=iter_num, max_iter=args.max_iter)\n lr_scheduler_full(optimizer_d, init_lr=args.lr, iter_num=iter_num, max_iter=args.max_iter)\n\n inputs_source, labels_source = inputs_source.cuda(), labels_source.cuda()\n inputs_target = inputs_target.cuda()\n\n inputs_target_tr, labels_target_tr = inputs_target_tr.cuda(), labels_target_tr.cuda()\n _, outputs_source = netC(netF(inputs_source))\n _, outputs_target_tr = netC(netF(inputs_target_tr))\n outputs_comb = tr.cat((outputs_source, outputs_target_tr), dim=0)\n labels_comb = tr.cat((labels_source, labels_target_tr), dim=0)\n\n feas_source = netF(inputs_source)\n feas_target_tr = netF(inputs_target_tr)\n fea_comb = tr.cat((feas_source, feas_target_tr), dim=0)\n feas_target = netF(inputs_target)\n\n # # loss definition\n p = float(iter_num) / max_iter\n alpha = 2. / (1. + np.exp(-10 * p)) - 1\n reverse_source, reverse_target = ReverseLayerF.apply(fea_comb, alpha), ReverseLayerF.apply(feas_target,\n alpha)\n _, domain_output_s = ad_net(reverse_source)\n _, domain_output_t = ad_net(reverse_target)\n domain_label_s = tr.ones(inputs_source.size()[0] + inputs_target_tr.size()[0]).long().cuda()\n domain_label_t = tr.zeros(inputs_target.size()[0]).long().cuda()\n\n classifier_loss = CELabelSmooth(num_classes=args.class_num, epsilon=args.smooth)(outputs_comb, labels_comb)\n adv_loss = nn.CrossEntropyLoss()(domain_output_s, domain_label_s) + nn.CrossEntropyLoss()(domain_output_t,\n domain_label_t)\n total_loss = classifier_loss + adv_loss\n\n optimizer_f.zero_grad()\n optimizer_c.zero_grad()\n optimizer_d.zero_grad()\n total_loss.backward()\n optimizer_f.step()\n optimizer_c.step()\n optimizer_d.step()\n\n if iter_num % interval_iter == 0 or iter_num == max_iter:\n base_network.eval()\n\n acc_t_te = utils.cal_acc_base(dset_loaders[\"Target\"], base_network)\n log_str = 'Task: {}, Iter:{}/{}; Acc = {:.2f}%'.format(args.task_str, iter_num, max_iter, acc_t_te)\n args.log.record(log_str)\n print(log_str)\n\n base_network.train()\n\n return acc_t_te\n\n\nif __name__ == '__main__':\n\n data_name = 'SEED'\n if data_name == 'SEED': chn, class_num, trial_num = 62, 3, 3394\n focus_domain_idx = [0, 1, 2]\n # focus_domain_idx = np.arange(15)\n domain_list = ['S' + str(i) for i in focus_domain_idx]\n num_domain = len(domain_list)\n\n args = argparse.Namespace(bottleneck=64, lr=0.01, lr_decay1=0.1, lr_decay2=1.0,\n epsilon=1e-05, layer='wn', smooth=0,\n N=num_domain, chn=chn, class_num=class_num)\n\n args.dset = data_name\n args.method = 'DANN'\n args.backbone = 'ShallowNet'\n args.batch_size = 32 # 32\n args.max_epoch = 50 # 50\n args.input_dim = 310\n args.norm = 'zscore'\n args.bz_tar_tr = args.batch_size\n args.bz_tar_te = args.batch_size * 2\n args.mdl_init_dir = 'outputs/mdl_init/' + args.dset + '/'\n args.noise_rate = 0\n dset_n = args.dset + '_' + str(args.noise_rate)\n args.tar_lbl_rate = 5 # [5, 10, ..., 50]/100\n\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = '6'\n args.data_env = 'gpu' # 'local'\n args.seed = 2022\n fix_random_seed(args.seed)\n tr.backends.cudnn.deterministic = True\n\n print(dset_n, args.method)\n print(args)\n\n args.local_dir = r'/mnt/ssd2/wenz/NT-Benchmark/NT_SSDA/'\n args.result_dir = 'results/target/'\n my_log = LogRecord(args)\n my_log.log_init()\n my_log.record('=' * 50 + '\\n' + os.path.basename(__file__) + '\\n' + '=' * 50)\n\n acc_all = np.zeros(num_domain * (num_domain - 1))\n for s in range(num_domain):\n for t in range(num_domain):\n if s != t:\n itr_idx = (num_domain - 1) * s + t\n if t > s: itr_idx -= 1\n info_str = '\\n%s: %s --> %s' % (itr_idx, domain_list[s], domain_list[t])\n print(info_str)\n args.src, args.tar = focus_domain_idx[s], focus_domain_idx[t]\n args.task_str = domain_list[s] + '_' + domain_list[t]\n print(args)\n\n my_log.record(info_str)\n args.log = my_log\n acc_all[itr_idx] = train_target(args)\n print('\\nSub acc: ', np.round(acc_all, 3))\n print('Avg acc: ', np.round(np.mean(acc_all), 3))\n\n acc_sub_str = str(np.round(acc_all, 3).tolist())\n acc_mean_str = str(np.round(np.mean(acc_all), 3).tolist())\n args.log.record(\"\\n==========================================\")\n args.log.record(acc_sub_str)\n args.log.record(acc_mean_str)\n\n"
] | [
[
"torch.sum",
"torch.utils.data.DataLoader",
"torch.load",
"numpy.mean",
"torch.nn.Softmax",
"torch.no_grad",
"torch.nn.CrossEntropyLoss",
"torch.norm",
"torch.nn.Sequential",
"torch.max",
"numpy.round",
"torch.utils.data.TensorDataset",
"torch.cat",
"torch.sort"
],
[
"torch.utils.data.DataLoader",
"scipy.spatial.distance.cdist",
"torch.no_grad",
"torch.log",
"numpy.log",
"torch.max",
"torch.nn.Softmax",
"torch.norm",
"torch.from_numpy",
"numpy.where",
"numpy.mean",
"numpy.eye",
"torch.optim.SGD",
"torch.load",
"torch.tensor",
"torch.utils.data.TensorDataset",
"torch.nn.CrossEntropyLoss",
"numpy.round",
"torch.squeeze"
],
[
"torch.load",
"numpy.zeros",
"torch.nn.CrossEntropyLoss",
"numpy.exp",
"torch.nn.Sequential",
"numpy.round",
"torch.cat",
"numpy.mean"
]
] |
yangwenbo99/UNIQUE | [
"50136f3169b82f20c8677f36c1b0882905b6d809"
] | [
"plot1.py"
] | [
"#!/bin/python3\n\n'''\nThis file is to plot a graph with the following setting.\n\n1. We first select an image x_0\n2. We then add some pertubation to the image to get x_1 (its type shall\n configurable in the future, but we set it to be random or loaded from file\n currently)\n3. Next, we plot f(x) for all x on the segment x_0 to x_1\n4. Finally, we optionally save the pertuabation for future work\n\nExample:\n python plot1.py --train '' --network lfc --ranking True --fidelity True --std_modeling True --std_loss '' --margin 0.025 --batch_size 128 --batch_size2 32 --image_size 384 --max_epochs 3 --lr 1e-4 --decay_interval 3 --decay_ratio 0.1 --fixvar --max_epochs2 12 --batch_size=16 --batch_size2=16 --ckpt_path=checkpoints_many/lfc -x /data_partition/yang/fyp/adv_1/IQA_database_syn/databaserelease2/jp2k/img4.bmp --pertubation_length 0.01\n\n python plot1.py --train '' --network lfc --ranking True --fidelity True --std_modeling True --std_loss '' --margin 0.025 --batch_size 128 --batch_size2 32 --image_size 384 --max_epochs 3 --lr 1e-4 --decay_interval 3 --decay_ratio 0.1 --fixvar --max_epochs2 12 --batch_size=16 --batch_size2=16 --ckpt_path=checkpoints_many/lfc_lip -x /data_partition/yang/fyp/adv_1/IQA_database_syn/databaserelease2/jp2k/img4.bmp --pertubation_length 0.01\n\n\n python plot1.py --train '' --network lfc --ranking True --fidelity True --std_modeling True --std_loss '' --margin 0.025 --batch_size 128 --batch_size2 32 --image_size 384 --max_epochs 3 --lr 1e-4 --decay_interval 3 --decay_ratio 0.1 --fixvar --max_epochs2 12 --batch_size=16 --batch_size2=16 --ckpt_path=checkpoints_many/lfc_nom -x /data_partition/yang/fyp/adv_1/IQA_database_syn/databaserelease2/jp2k/img4.bmp --pertubation_length 0.01 --force_normalization\n\n python plot1.py --train '' --network lfc_relu --ranking True --fidelity True --std_modeling True --std_loss '' --margin 0.025 --batch_size 128 --batch_size2 32 --image_size 384 --max_epochs 3 --lr 1e-4 --decay_interval 3 --decay_ratio 0.1 --fixvar --max_epochs2 12 --batch_size=16 --batch_size2=16 --ckpt_path=checkpoints_many/lfc_relu_nom -x /data_partition/yang/fyp/adv_1/IQA_database_syn/databaserelease2/jp2k/img4.bmp --pertubation_length 0.01 --force_normalization\n\n python plot1.py --train '' --network lfc_relu --ranking True --fidelity True --std_modeling True --std_loss '' --margin 0.025 --batch_size 128 --batch_size2 32 --image_size 384 --max_epochs 3 --lr 1e-4 --decay_interval 3 --decay_ratio 0.1 --fixvar --max_epochs2 12 --batch_size=16 --batch_size2=16 --ckpt_path=checkpoints_many/lfc_relu_nom_lip -x /data_partition/yang/fyp/adv_1/IQA_database_syn/databaserelease2/jp2k/img4.bmp --pertubation_length 0.01 --force_normalization\n'''\n\nimport argparse\nimport TrainModel\nimport scipy.io as sio\nimport os\nimport torch\nfrom PIL import Image\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom torchvision import transforms\n\ndef parse_config():\n parser = argparse.ArgumentParser()\n\n parser.add_argument('-x', '--img', type=str, help='the base image')\n parser.add_argument('-p', '--pertubation', type=str, default='',\n help='the pertubation of the image, will be randomly generated if not presented')\n parser.add_argument('--pertubation_length', type=float, default=0.01,\n help='the length of the pertubataion, if random generation is nessesary')\n parser.add_argument('-s', '--save_pertubation', type=str, default='',\n help='whether the pertubation should be saved')\n\n parser.add_argument(\"--train\", type=bool, default=True)\n parser.add_argument('--get_scores', type=bool, default=False)\n parser.add_argument(\"--use_cuda\", type=bool, default=True)\n # parser.add_argument(\"--device\", type=str, default=\"cuda\")\n parser.add_argument(\"--resume\", action='store_true')\n parser.add_argument(\"--seed\", type=int, default=19901116)\n\n parser.add_argument(\"--backbone\", type=str, default='resnet34')\n parser.add_argument(\"--fc\", type=bool, default=True)\n parser.add_argument('--scnn_root', type=str, default='saved_weights/scnn.pkl')\n\n parser.add_argument(\"--network\", type=str, default=\"basecnn\",\n help='basecnn or dbcnn or lfc')\n\n parser.add_argument(\"--representation\", type=str, default=\"BCNN\")\n\n parser.add_argument(\"--ranking\", type=bool, default=True,\n help='True for learning-to-rank False for regular regression')\n\n parser.add_argument(\"--fidelity\", type=bool, default=True,\n help='True for fidelity loss False for regular ranknet with CE loss')\n\n parser.add_argument(\"--std_modeling\", type=bool,\n default=True) # True for modeling std False for not\n parser.add_argument(\"--std_loss\", type=bool, default=True)\n parser.add_argument(\"--fixvar\", action='store_true') #+\n parser.add_argument(\"--force_normalization\", action='store_true')\n parser.add_argument(\"--lipschitz\", action='store_true')\n parser.add_argument(\"--margin\", type=float, default=0.025)\n\n parser.add_argument(\"--split\", type=int, default=1)\n parser.add_argument(\"--trainset\", type=str, default=\"./IQA_database/\")\n parser.add_argument(\"--live_set\", type=str, default=\"./IQA_database/databaserelease2/\")\n parser.add_argument(\"--csiq_set\", type=str, default=\"./IQA_database/CSIQ/\")\n parser.add_argument(\"--tid2013_set\", type=str, default=\"./IQA_database/TID2013/\")\n parser.add_argument(\"--bid_set\", type=str, default=\"./IQA_database/BID/\")\n #parser.add_argument(\"--cid_set\", type=str, default=\"./IQA_database/CID2013_camera/\")\n parser.add_argument(\"--clive_set\", type=str, default=\"./IQA_database/ChallengeDB_release/\")\n parser.add_argument(\"--koniq10k_set\", type=str, default=\"./IQA_database/koniq-10k/\")\n parser.add_argument(\"--kadid10k_set\", type=str, default=\"./IQA_database/kadid10k/\")\n\n parser.add_argument(\"--eval_live\", type=bool, default=True)\n parser.add_argument(\"--eval_csiq\", type=bool, default=True)\n parser.add_argument(\"--eval_tid2013\", type=bool, default=False)\n parser.add_argument(\"--eval_kadid10k\", type=bool, default=True)\n parser.add_argument(\"--eval_bid\", type=bool, default=True)\n parser.add_argument(\"--eval_clive\", type=bool, default=True)\n parser.add_argument(\"--eval_koniq10k\", type=bool, default=True)\n\n parser.add_argument(\"--split_modeling\", type=bool, default=False)\n\n parser.add_argument('--ckpt_path', default='./checkpoint', type=str,\n metavar='PATH', help='path to checkpoints')\n parser.add_argument('--ckpt', default=None, type=str, help='name of the checkpoint to load')\n\n parser.add_argument(\"--train_txt\", type=str, default='train.txt') # train.txt | train_synthetic.txt | train_authentic.txt | train_sub2.txt | train_score.txt\n\n parser.add_argument(\"--batch_size\", type=int, default=128)\n parser.add_argument(\"--batch_size2\", type=int, default=32)\n parser.add_argument(\"--image_size\", type=int, default=384, help='None means random resolution')\n parser.add_argument(\"--max_epochs\", type=int, default=3)\n parser.add_argument(\"--max_epochs2\", type=int, default=12)\n parser.add_argument(\"--lr\", type=float, default=1e-4)\n parser.add_argument(\"--decay_interval\", type=int, default=3)\n parser.add_argument(\"--decay_ratio\", type=float, default=0.1)\n parser.add_argument(\"--epochs_per_eval\", type=int, default=1)\n parser.add_argument(\"--epochs_per_save\", type=int, default=1)\n\n parser.add_argument(\"--verbose\", action='store_true')\n\n config = parser.parse_args()\n config.to_test = []\n\n return config\n\n\ndef main(config):\n t = TrainModel.Trainer(config)\n # checking compatability\n if config.fixvar and not config.network.startswith('lfc'):\n raise NotImplementedError()\n if str(config.backbone).startswith('lfc') and not config.std_modeling:\n raise NotImplementedError()\n\n\n model = t.model\n pil_img = Image.open(config.img)\n # pil_img = pil_img.reshape((1,) + tuple(pil_img.shape))\n img = t.test_transform(pil_img).to(t.device)\n\n if config.pertubation:\n with open(config.pertubation, 'rb') as f:\n pertubation = torch.load(f)\n else:\n pertubation = torch.rand(img.shape) * config.pertubation_length\n pertubation = pertubation.to(t.device)\n\n img = img.unsqueeze(0)\n print(img.shape)\n\n if config.save_pertubation:\n with open(config.save_pertubation, 'wb') as f:\n torch.save(pertubation, f)\n\n should_normalize = not config.network.startswith('lfc') or config.force_normalization\n\n if should_normalize:\n normalization_transform = \\\n transforms.Normalize(mean=(0.485, 0.456, 0.406),\n std=(0.229, 0.224, 0.225))\n pertubation = normalization_transform(pertubation)\n\n x = list(np.linspace(0, 1, 100))\n y = [t.predict_single_image(img + p * pertubation).detach().cpu().numpy() for p in x]\n plt.plot(x, y)\n plt.show()\n\n\nif __name__ == \"__main__\":\n config = parse_config()\n main(config)\n"
] | [
[
"torch.load",
"torch.rand",
"torch.save",
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"numpy.linspace"
]
] |
AlexErfan/Image_manipulation_detection | [
"f07008b86112ae7d40a3728c715c53b6054ecc70"
] | [
"lib/datasets/dist_fake.py"
] | [
"# --------------------------------------------------------\n# Fast R-CNN\n# Copyright (c) 2015 Microsoft\n# Licensed under The MIT License [see LICENSE for details]\n# Written by Peng Zhou\n# --------------------------------------------------------\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nfrom lib.datasets.imdb import imdb\nimport lib.datasets.ds_utils as ds_utils\nimport numpy as np\nimport scipy.sparse\nimport scipy.io as sio\nimport lib.utils.cython_bbox\nimport pickle\nimport subprocess\nimport uuid\nimport pdb\nfrom .voc_eval import voc_eval\nfrom lib.config import config as cfg \nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\n\nclass dist_fake(imdb):\n def __init__(self, image_set, year, dist_path=None):\n imdb.__init__(self, image_set)\n self._year = year\n self._image_set = image_set.split('dist_')[1]\n self._dist_path = self._get_default_path() if dist_path is None \\\n else dist_path\n self._data_path=self._dist_path\n self._classes = ('__background__', # always index 0\n 'tamper','authentic')\n self._classes = ('authentic', # always index 0\n 'tamper')\n #self.classes =('authentic', # always index 0\n #'splicing','removal')\n self._class_to_ind = dict(list(zip(self.classes, list(range(self.num_classes)))))\n self._image_ext = {'.png','.jpg','.tif','.bmp','.JPG'}\n self._image_index = self._load_image_set_index()\n # Default to roidb handler\n self._roidb_handler = self.gt_roidb\n\n assert os.path.exists(self._data_path), \\\n 'Path does not exist: {}'.format(self._data_path)\n\n def image_path_at(self, i):\n \"\"\"\n Return the absolute path to image i in the image sequence.\n \"\"\"\n return self.image_path_from_index(os.path.splitext(self._image_index[i].split(' ')[0])[0])\n\n def image_path_from_index(self, index):\n \"\"\"\n Construct an image path from the image's \"index\" identifier.\n \"\"\"\n for ext in self._image_ext:\n #image_path = os.path.join('/home-3/pengzhou@umd.edu/work/xintong/medifor/portrait/test_data',\n #index + ext)\n image_path = os.path.join(self._data_path,\n index + ext)\n image_path1=os.path.join('/home-3/pengzhou@umd.edu/work/pengzhou/dataset/NC2016_Test0613',\n index + ext)\n if os.path.isfile(image_path):\n return image_path\n elif os.path.isfile(image_path1):\n return image_path1\n else:\n continue\n assert os.path.isfile(image_path) and os.path.isfile(image_path1), \\\n 'Path does not exist: {}'.format(image_path)\n return image_path\n\n def _load_image_set_index(self):\n \"\"\"\n Load the indexes listed in this dataset's image set file.\n \"\"\"\n # Example path to image set file:\n # self._devkit_path + /VOCdevkit2007/VOC2007/ImageSets/Main/val.txt\n image_set_file = os.path.join(self._data_path,\n self._image_set + '.txt')\n assert os.path.exists(image_set_file), \\\n 'Path does not exist: {}'.format(image_set_file)\n with open(image_set_file) as f:\n image_index = [x.strip() for x in f.readlines()]\n #print(image_index)\n return image_index\n\n def _get_default_path(self):\n \"\"\"\n Return the default path where PASCAL VOC is expected to be installed.\n \"\"\"\n return os.path.join(cfg.DATA_DIR, 'NC2016_Test0613')\n\n def gt_roidb(self):\n \"\"\"\n Return the database of ground-truth regions of interest.\n\n This function loads/saves from/to a cache file to speed up future calls.\n \"\"\"\n cache_file = os.path.join(self.cache_path, self.name + '_gt_roidb.pkl')\n if os.path.exists(cache_file):\n with open(cache_file, 'rb') as fid:\n try:\n roidb = pickle.load(fid)\n except:\n roidb = pickle.load(fid, encoding='bytes')\n print('{} gt roidb loaded from {}'.format(self.name, cache_file))\n return roidb\n\n gt_roidb = [self.roidb_gt(index)\n for index in self.image_index]\n with open(cache_file, 'wb') as fid:\n pickle.dump(gt_roidb, fid, pickle.HIGHEST_PROTOCOL)\n print('wrote gt roidb to {}'.format(cache_file))\n\n return gt_roidb\n\n def rpn_roidb(self):\n if int(self._year) == 2007 or self._image_set != 'test':\n gt_roidb = self.gt_roidb()\n rpn_roidb = self._load_rpn_roidb(gt_roidb)\n roidb = imdb.merge_roidbs(gt_roidb, rpn_roidb)\n else:\n roidb = self._load_rpn_roidb(None)\n\n return roidb\n def roidb_gt(self,image_id):\n num_objs = int(len(image_id.split(' ')[1:])/5)\n\n boxes = np.zeros((num_objs, 4), dtype=np.uint16)\n gt_classes = np.zeros((num_objs), dtype=np.int32)\n overlaps = np.zeros((num_objs, self.num_classes), dtype=np.float32)\n # \"Seg\" area for pascal is just the box area\n seg_areas = np.zeros((num_objs), dtype=np.float32)\n\n # Load object bounding boxes into a data frame.\n for ix in range(num_objs):\n bbox = image_id.split(' ')[ix*5+1:ix*5+5]\n # Make pixel indexes 0-based\n x1 = float(bbox[0]) \n y1 = float(bbox[1]) \n x2 = float(bbox[2]) \n y2 = float(bbox[3])\n if x1<0:\n x1=0\n if y1<0:\n y1=0 \n try:\n cls=self._class_to_ind[image_id.split(' ')[ix*5+5]]\n except:\n if int(image_id.split(' ')[ix*5+5])==0:\n print('authentic')\n cls=2\n else:\n cls = int(image_id.split(' ')[ix*5+5])\n boxes[ix, :] = [x1, y1, x2, y2]\n gt_classes[ix] = cls\n overlaps[ix, cls] = 1.0\n seg_areas[ix] = (x2 - x1 ) * (y2 - y1)\n\n overlaps = scipy.sparse.csr_matrix(overlaps)\n\n return {'boxes': boxes,\n 'gt_classes': gt_classes,\n 'gt_overlaps': overlaps,\n 'flipped': False,\n 'JPGed':False,\n 'noised':False,\n 'seg_areas': seg_areas}\n\n def _load_rpn_roidb(self, gt_roidb):\n filename = self.config['rpn_file']\n print('loading {}'.format(filename))\n assert os.path.exists(filename), \\\n 'rpn data not found at: {}'.format(filename)\n with open(filename, 'rb') as f:\n box_list = pickle.load(f)\n return self.create_roidb_from_box_list(box_list, gt_roidb)\n\n def _load_pascal_annotation(self, index):\n \"\"\"\n Load image and bounding boxes info from XML file in the PASCAL VOC\n format.\n \"\"\"\n filename = os.path.join(self._data_path, 'Annotations', index + '.xml')\n tree = ET.parse(filename)\n objs = tree.findall('object')\n if not self.config['use_diff']:\n # Exclude the samples labeled as difficult\n non_diff_objs = [\n obj for obj in objs if int(obj.find('difficult').text) == 0]\n # if len(non_diff_objs) != len(objs):\n # print 'Removed {} difficult objects'.format(\n # len(objs) - len(non_diff_objs))\n objs = non_diff_objs\n num_objs = len(objs)\n\n boxes = np.zeros((num_objs, 4), dtype=np.uint16)\n gt_classes = np.zeros((num_objs), dtype=np.int32)\n overlaps = np.zeros((num_objs, self.num_classes), dtype=np.float32)\n # \"Seg\" area for pascal is just the box area\n seg_areas = np.zeros((num_objs), dtype=np.float32)\n\n # Load object bounding boxes into a data frame.\n for ix, obj in enumerate(objs):\n bbox = obj.find('bndbox')\n # Make pixel indexes 0-based\n x1 = float(bbox.find('xmin').text) - 1\n y1 = float(bbox.find('ymin').text) - 1\n x2 = float(bbox.find('xmax').text) - 1\n y2 = float(bbox.find('ymax').text) - 1\n cls = self._class_to_ind[obj.find('name').text.lower().strip()]\n boxes[ix, :] = [x1, y1, x2, y2]\n gt_classes[ix] = cls\n overlaps[ix, cls] = 1.0\n seg_areas[ix] = (x2 - x1 + 1) * (y2 - y1 + 1)\n\n overlaps = scipy.sparse.csr_matrix(overlaps)\n\n return {'boxes': boxes,\n 'gt_classes': gt_classes,\n 'gt_overlaps': overlaps,\n 'flipped': False,\n 'seg_areas': seg_areas}\n\n def _get_comp_id(self):\n comp_id = (self._comp_id + '_' + self._salt if self.config['use_salt']\n else self._comp_id)\n return comp_id\n\n def _get_voc_results_file_template(self):\n # VOCdevkit/results/VOC2007/Main/<comp_id>_det_test_aeroplane.txt\n filename = 'nist_' + self._image_set + '_{:s}.txt'\n path = os.path.join(\n '.',\n filename)\n return path\n\n def _get_voc_noise_results_file_template(self):\n # VOCdevkit/results/VOC2007/Main/<comp_id>_det_test_aeroplane.txt\n filename = 'nist_' + self._image_set + '_{:s}_noise.txt'\n path = os.path.join(\n '.',\n filename)\n return path\n\n def _write_voc_results_file(self, all_boxes):\n for cls_ind, cls in enumerate(self.classes):\n if cls == '__background__':\n continue\n print('Writing {} VOC results file'.format(cls))\n filename = self._get_voc_results_file_template().format(cls)\n print(filename)\n with open(filename, 'w') as f:\n for im_ind, index in enumerate(self.image_index):\n dets = all_boxes[cls_ind][im_ind]\n if dets == []:\n continue\n # the VOCdevkit expects 1-based indices\n for k in range(dets.shape[0]):\n #pdb.set_trace()\n f.write('{:s} {:.3f} {:.1f} {:.1f} {:.1f} {:.1f}\\n'.format(index.split(' ')[0], dets[k, -1],\n dets[k, 0] + 1, dets[k, 1] + 1,\n dets[k, 2] + 1, dets[k, 3] + 1))\n #pdb.set_trace()\n\n def _do_python_eval(self, output_dir='output'):\n annopath = os.path.join(\n self._dist_path,\n 'coco_multi' ,\n 'Annotations',\n '{:s}.xml')\n imagesetfile = os.path.join(\n self._dist_path,\n self._image_set + '.txt')\n cachedir = os.path.join(self._dist_path, 'annotations_cache')\n aps = []\n # The PASCAL VOC metric changed in 2010\n #use_07_metric = True if int(self._year) < 2010 else False\n use_07_metric = False\n print('dist metric? ' + ('Yes' if use_07_metric else 'No'))\n if not os.path.isdir(output_dir):\n os.mkdir(output_dir)\n for i, cls in enumerate(self._classes):\n if cls == '__background__' or cls == self.classes[0]:\n cls_ind=0\n continue\n else:\n cls_ind=self._class_to_ind[cls]\n #elif cls=='median_filtering':\n #cls_ind=3\n #continue\n filename = self._get_voc_results_file_template().format(cls)\n filename2 = self._get_voc_noise_results_file_template().format(cls)\n print(cls_ind)\n rec, prec, ap = voc_eval(\n filename,filename2, annopath, imagesetfile, cls_ind, cachedir, ovthresh=0.5,\n use_07_metric=use_07_metric,fuse=False)\n aps += [ap]\n print(('AP for {} = {:.4f},recall = {:.4f}, precision = {:.4f}'.format(cls, ap,rec[-1],prec[-1])))\n with open(os.path.join(output_dir, cls + '_pr.pkl'), 'wb') as f:\n pickle.dump({'rec': rec, 'prec': prec, 'ap': ap}, f)\n fig=plt.figure()\n plt.plot(rec,prec)\n fig.suptitle('PR curve for {} detection'.format(cls),fontsize=20)\n plt.xlabel('recall',fontsize=15)\n plt.xlim((0,1.0))\n plt.ylim((0,1.0))\n plt.ylabel('precision',fontsize=15)\n fig.savefig('{}.jpg'.format(cls))\n\n print(('Mean AP = {:.4f}'.format(np.mean(aps))))\n print('~~~~~~~~')\n print('Results:')\n for ap in aps:\n print(('{:.3f}'.format(ap)))\n print(('{:.3f}'.format(np.mean(aps))))\n print('~~~~~~~~')\n print('')\n print('--------------------------------------------------------------')\n print('Results computed with the **unofficial** Python eval code.')\n print('Results should be very close to the official MATLAB eval code.')\n print('Recompute with `./tools/reval.py --matlab ...` for your paper.')\n print('-- Thanks, The Management')\n print('--------------------------------------------------------------')\n\n def _do_matlab_eval(self, output_dir='output'):\n print('-----------------------------------------------------')\n print('Computing results with the official MATLAB eval code.')\n print('-----------------------------------------------------')\n path = os.path.join(cfg.ROOT_DIR, 'lib', 'datasets',\n 'VOCdevkit-matlab-wrapper')\n cmd = 'cd {} && '.format(path)\n cmd += '{:s} -nodisplay -nodesktop '.format(cfg.MATLAB)\n cmd += '-r \"dbstop if error; '\n cmd += 'voc_eval(\\'{:s}\\',\\'{:s}\\',\\'{:s}\\',\\'{:s}\\'); quit;\"' \\\n .format(self._devkit_path, self._get_comp_id(),\n self._image_set, output_dir)\n print(('Running:\\n{}'.format(cmd)))\n status = subprocess.call(cmd, shell=True)\n\n def evaluate_detections(self, all_boxes, output_dir):\n self._write_voc_results_file(all_boxes)\n self._do_python_eval(output_dir)\n #if self.config['matlab_eval']:\n #self._do_matlab_eval(output_dir)\n if self.config['cleanup']:\n for cls in self._classes:\n if cls == '__background__':\n continue\n filename = self._get_voc_results_file_template().format(cls)\n #os.remove(filename)\n\n def competition_mode(self, on):\n if on:\n self.config['use_salt'] = False\n self.config['cleanup'] = False\n else:\n self.config['use_salt'] = True\n self.config['cleanup'] = True\n\n\nif __name__ == '__main__':\n from datasets.dist_fake import dist_fake\n\n d = dist_fake('trainval', '2007')\n res = d.roidb\n from IPython import embed;\n\n embed()\n"
] | [
[
"numpy.zeros",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.ylim",
"matplotlib.use",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.xlabel",
"numpy.mean"
]
] |
andrerubeis/AIF360 | [
"c0ce6f2e3eff9cab0ccce0bc0a05b681a5df7e44"
] | [
"examples/demo_optim_data_preproc..py"
] | [
"# %% md\n\n#### This notebook demonstrates the use of an optimized data pre-processing algorithm for bias mitigation\n\n# - The\n# debiasing\n# function\n# used is implemented in the\n# `OptimPreproc`\n\n#\n# class .\n# - Define\n# parameters\n# for optimized pre - processing specific to the dataset.\n#\n#\n# - Divide\n# the\n# dataset\n# into\n# training, validation, and testing\n# partitions.\n# - Learn\n# the\n# optimized\n# pre - processing\n# transformation\n# from the training\n#\n# data.\n# - Train\n# classifier\n# on\n# original\n# training\n# data.\n# - Estimate\n# the\n# optimal\n# classification\n# threshold, that\n# maximizes\n# balanced\n# accuracy\n# without\n# fairness\n# constraints(\n# from the original\n#\n# validation\n# set).\n# - Determine\n# the\n# prediction\n# scores\n# for original testing data.Using the estimated optimal classification threshold, compute accuracy and fairness metrics.\n# - Transform\n# the\n# testing\n# set\n# using\n# the\n# learned\n# probabilistic\n# transformation.\n# - Determine\n# the\n# prediction\n# scores\n# for transformed testing data.Using the estimated optimal classification threshold, compute accuracy and fairness metrics.\n#\n\n# %%\n\n# Load all necessary packages\nimport sys\n\nsys.path.append(\"../\")\nimport numpy as np\nfrom tqdm import tqdm\n\n\nfrom aif360.datasets import BinaryLabelDataset\nfrom aif360.datasets import AdultDataset, GermanDataset, CompasDataset\nfrom aif360.metrics import BinaryLabelDatasetMetric\nfrom aif360.metrics import ClassificationMetric\nfrom aif360.metrics.utils import compute_boolean_conditioning_vector\nfrom aif360.algorithms.preprocessing.optim_preproc import OptimPreproc\nfrom aif360.algorithms.preprocessing.optim_preproc_helpers.data_preproc_functions \\\n import load_preproc_data_adult, load_preproc_data_german, load_preproc_data_compas\nfrom aif360.algorithms.preprocessing.optim_preproc_helpers.distortion_functions \\\n import get_distortion_adult, get_distortion_german, get_distortion_compas\nfrom aif360.algorithms.preprocessing.optim_preproc_helpers.opt_tools import OptTools\nfrom common_utils import compute_metrics\n\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.metrics import accuracy_score\n\nfrom IPython.display import Markdown, display\nimport matplotlib.pyplot as plt\n\n# %% md\n\n#### Load dataset and specify options\n\n# %%\n\n# import dataset\ndataset_used = \"adult\" # \"adult\", \"german\", \"compas\"\nprotected_attribute_used = 1 # 1, 2\n\nif dataset_used == \"adult\":\n if protected_attribute_used == 1:\n privileged_groups = [{'sex': 1}]\n unprivileged_groups = [{'sex': 0}]\n dataset_orig = load_preproc_data_adult(['sex'])\n else:\n privileged_groups = [{'race': 1}]\n unprivileged_groups = [{'race': 0}]\n dataset_orig = load_preproc_data_adult(['race'])\n\n optim_options = {\n \"distortion_fun\": get_distortion_adult,\n \"epsilon\": 0.05,\n \"clist\": [0.99, 1.99, 2.99],\n \"dlist\": [.1, 0.05, 0]\n }\n\nelif dataset_used == \"german\":\n if protected_attribute_used == 1:\n privileged_groups = [{'sex': 1}]\n unprivileged_groups = [{'sex': 0}]\n dataset_orig = load_preproc_data_german(['sex'])\n optim_options = {\n \"distortion_fun\": get_distortion_german,\n \"epsilon\": 0.05,\n \"clist\": [0.99, 1.99, 2.99],\n \"dlist\": [.1, 0.05, 0]\n }\n\n else:\n privileged_groups = [{'age': 1}]\n unprivileged_groups = [{'age': 0}]\n dataset_orig = load_preproc_data_german(['age'])\n optim_options = {\n \"distortion_fun\": get_distortion_german,\n \"epsilon\": 0.1,\n \"clist\": [0.99, 1.99, 2.99],\n \"dlist\": [.1, 0.05, 0]\n }\n\nelif dataset_used == \"compas\":\n if protected_attribute_used == 1:\n privileged_groups = [{'sex': 1}]\n unprivileged_groups = [{'sex': 0}]\n dataset_orig = load_preproc_data_compas(['sex'])\n else:\n privileged_groups = [{'race': 1}]\n unprivileged_groups = [{'race': 0}]\n dataset_orig = load_preproc_data_compas(['race'])\n\n optim_options = {\n \"distortion_fun\": get_distortion_compas,\n \"epsilon\": 0.05,\n \"clist\": [0.99, 1.99, 2.99],\n \"dlist\": [.1, 0.05, 0]\n }\n\n# random seed\nnp.random.seed(1)\n\n# Split into train, validation, and test\ndataset_orig_train, dataset_orig_vt = dataset_orig.split([0.7], shuffle=True)\ndataset_orig_valid, dataset_orig_test = dataset_orig_vt.split([0.5], shuffle=True)\n\n# %% md\n\n#### Display dataset attributes\n\n# %%\n\n# print out some labels, names, etc.\ndisplay(Markdown(\"#### Training Dataset shape\"))\nprint(dataset_orig_train.features.shape)\ndisplay(Markdown(\"#### Favorable and unfavorable labels\"))\nprint(dataset_orig_train.favorable_label, dataset_orig_train.unfavorable_label)\ndisplay(Markdown(\"#### Protected attribute names\"))\nprint(dataset_orig_train.protected_attribute_names)\ndisplay(Markdown(\"#### Privileged and unprivileged protected attribute values\"))\nprint(dataset_orig_train.privileged_protected_attributes,\n dataset_orig_train.unprivileged_protected_attributes)\ndisplay(Markdown(\"#### Dataset feature names\"))\nprint(dataset_orig_train.feature_names)\n\n# %% md\n\n#### Metric for original training data\n\n# %%\n\n# Metric for the original dataset\nmetric_orig_train = BinaryLabelDatasetMetric(dataset_orig_train,\n unprivileged_groups=unprivileged_groups,\n privileged_groups=privileged_groups)\ndisplay(Markdown(\"#### Original training dataset\"))\nprint(\n \"Difference in mean outcomes between unprivileged and privileged groups = %f\" % metric_orig_train.mean_difference())\n\n# %% md\n\n#### Train with and transform the original training data\n\n# %%\n\nOP = OptimPreproc(OptTools, optim_options,\n unprivileged_groups=unprivileged_groups,\n privileged_groups=privileged_groups)\n\nOP = OP.fit(dataset_orig_train)\n\n# Transform training data and align features\ndataset_transf_train = OP.transform(dataset_orig_train, transform_Y=True)\ndataset_transf_train = dataset_orig_train.align_datasets(dataset_transf_train)\n\n# %% md\n\n#### Metric with the transformed training data\n\n# %%\n\nmetric_transf_train = BinaryLabelDatasetMetric(dataset_transf_train,\n unprivileged_groups=unprivileged_groups,\n privileged_groups=privileged_groups)\ndisplay(Markdown(\"#### Transformed training dataset\"))\nprint(\n \"Difference in mean outcomes between unprivileged and privileged groups = %f\" % metric_transf_train.mean_difference())\n\n# %% md\n\n# Optimized\n# preprocessing\n# has\n# reduced\n# the\n# disparity in favorable\n# outcomes\n# between\n# the\n# privileged and unprivileged\n# groups(training\n# data).\n\n# %%\n\n### Testing\nassert np.abs(metric_transf_train.mean_difference()) < np.abs(metric_orig_train.mean_difference())\n\n# %% md\n\n#### Load, clean up original test data and compute metric\n\n# %%\n\ndataset_orig_test = dataset_transf_train.align_datasets(dataset_orig_test)\ndisplay(Markdown(\"#### Testing Dataset shape\"))\nprint(dataset_orig_test.features.shape)\n\nmetric_orig_test = BinaryLabelDatasetMetric(dataset_orig_test,\n unprivileged_groups=unprivileged_groups,\n privileged_groups=privileged_groups)\ndisplay(Markdown(\"#### Original test dataset\"))\nprint(\n \"Difference in mean outcomes between unprivileged and privileged groups = %f\" % metric_orig_test.mean_difference())\n\n# %% md\n\n#### Transform test data and compute metric\n\n# %%\n\ndataset_transf_test = OP.transform(dataset_orig_test, transform_Y=True)\ndataset_transf_test = dataset_orig_test.align_datasets(dataset_transf_test)\n\nmetric_transf_test = BinaryLabelDatasetMetric(dataset_transf_test,\n unprivileged_groups=unprivileged_groups,\n privileged_groups=privileged_groups)\ndisplay(Markdown(\"#### Transformed test dataset\"))\nprint(\n \"Difference in mean outcomes between unprivileged and privileged groups = %f\" % metric_transf_test.mean_difference())\n\n# %% md\n\n# Optimized\n# preprocessing\n# has\n# reduced\n# the\n# disparity in favorable\n# outcomes\n# between\n# the\n# privileged and unprivileged\n# groups(test\n# data).\n\n# %%\n\n### Testing\nassert np.abs(metric_transf_test.mean_difference()) < np.abs(metric_orig_test.mean_difference())\n\n# %% md\n\n### Train classifier on original data\n\n# %%\n\n# Logistic regression classifier and predictions\nscale_orig = StandardScaler()\nX_train = scale_orig.fit_transform(dataset_orig_train.features)\ny_train = dataset_orig_train.labels.ravel()\n\nlmod = LogisticRegression()\nlmod.fit(X_train, y_train)\ny_train_pred = lmod.predict(X_train)\n\n# positive class index\npos_ind = np.where(lmod.classes_ == dataset_orig_train.favorable_label)[0][0]\n\ndataset_orig_train_pred = dataset_orig_train.copy()\ndataset_orig_train_pred.labels = y_train_pred\n\n# %% md\n\n#### Obtain scores original test set\n\n# %%\n\ndataset_orig_valid_pred = dataset_orig_valid.copy(deepcopy=True)\nX_valid = scale_orig.transform(dataset_orig_valid_pred.features)\ny_valid = dataset_orig_valid_pred.labels\ndataset_orig_valid_pred.scores = lmod.predict_proba(X_valid)[:, pos_ind].reshape(-1, 1)\n\ndataset_orig_test_pred = dataset_orig_test.copy(deepcopy=True)\nX_test = scale_orig.transform(dataset_orig_test_pred.features)\ny_test = dataset_orig_test_pred.labels\ndataset_orig_test_pred.scores = lmod.predict_proba(X_test)[:, pos_ind].reshape(-1, 1)\n\n# %% md\n\n### Find the optimal classification threshold from the validation set\n\n# %%\n\nnum_thresh = 100\nba_arr = np.zeros(num_thresh)\nclass_thresh_arr = np.linspace(0.01, 0.99, num_thresh)\nfor idx, class_thresh in enumerate(class_thresh_arr):\n fav_inds = dataset_orig_valid_pred.scores > class_thresh\n dataset_orig_valid_pred.labels[fav_inds] = dataset_orig_valid_pred.favorable_label\n dataset_orig_valid_pred.labels[~fav_inds] = dataset_orig_valid_pred.unfavorable_label\n\n classified_metric_orig_valid = ClassificationMetric(dataset_orig_valid,\n dataset_orig_valid_pred,\n unprivileged_groups=unprivileged_groups,\n privileged_groups=privileged_groups)\n\n ba_arr[idx] = 0.5 * (classified_metric_orig_valid.true_positive_rate() \\\n + classified_metric_orig_valid.true_negative_rate())\n\nbest_ind = np.where(ba_arr == np.max(ba_arr))[0][0]\nbest_class_thresh = class_thresh_arr[best_ind]\n\nprint(\"Best balanced accuracy (no fairness constraints) = %.4f\" % np.max(ba_arr))\nprint(\"Optimal classification threshold (no fairness constraints) = %.4f\" % best_class_thresh)\n\n# %% md\n\n### Predictions and fairness metrics from original test set\n\n# %%\n\ndisplay(Markdown(\"#### Predictions from original testing data\"))\n\nbal_acc_arr_orig = []\ndisp_imp_arr_orig = []\navg_odds_diff_arr_orig = []\n\ndisplay(Markdown(\"#### Testing set\"))\ndisplay(Markdown(\"##### Raw predictions - No fairness constraints\"))\n\nfor thresh in tqdm(class_thresh_arr):\n\n fav_inds = dataset_orig_test_pred.scores > thresh\n dataset_orig_test_pred.labels[fav_inds] = dataset_orig_test_pred.favorable_label\n dataset_orig_test_pred.labels[~fav_inds] = dataset_orig_test_pred.unfavorable_label\n\n if (thresh == best_class_thresh):\n disp = True\n else:\n disp = False\n\n metric_test_bef = compute_metrics(dataset_orig_test, dataset_orig_test_pred,\n unprivileged_groups, privileged_groups, disp=disp)\n\n bal_acc_arr_orig.append(metric_test_bef[\"Balanced accuracy\"])\n avg_odds_diff_arr_orig.append(metric_test_bef[\"Average odds difference\"])\n disp_imp_arr_orig.append(metric_test_bef[\"Disparate impact\"])\n\n# %%\n\nfig, ax1 = plt.subplots(figsize=(10, 7))\nax1.plot(class_thresh_arr, bal_acc_arr_orig)\nax1.set_xlabel('Classification Thresholds', fontsize=16, fontweight='bold')\nax1.set_ylabel('Balanced Accuracy', color='b', fontsize=16, fontweight='bold')\nax1.xaxis.set_tick_params(labelsize=14)\nax1.yaxis.set_tick_params(labelsize=14)\n\nax2 = ax1.twinx()\nax2.plot(class_thresh_arr, np.abs(1.0 - np.array(disp_imp_arr_orig)), color='r')\nax2.set_ylabel('abs(1-disparate impact)', color='r', fontsize=16, fontweight='bold')\nax2.axvline(np.array(class_thresh_arr)[best_ind],\n color='k', linestyle=':')\nax2.yaxis.set_tick_params(labelsize=14)\nax2.grid(True)\n\ndisp_imp_at_best_bal_acc_orig = np.abs(1.0 - np.array(disp_imp_arr_orig))[best_ind]\n\n# %% md\n\n# ```abs(1 - disparate\n# impact)``` must\n# be\n# close\n# to\n# zero\n# for classifier predictions to be fair.\n\n# %% md\n\n### Train classifier on transformed data and obtain predictions with its fairness metrics\n\n# %%\n\nscale_transf = StandardScaler()\nX_train = scale_transf.fit_transform(dataset_transf_train.features)\ny_train = dataset_transf_train.labels.ravel()\n\nlmod = LogisticRegression()\nlmod.fit(X_train, y_train)\ny_train_pred = lmod.predict(X_train)\n\ndataset_transf_train_pred = dataset_transf_train.copy()\ndataset_transf_train_pred.labels = y_train_pred\n\n# %% md\n\n### Predictions and fairness metrics from transformed test set\n\n# %%\n\ndataset_transf_test_pred = dataset_transf_test.copy(deepcopy=True)\nX_test = scale_transf.transform(dataset_transf_test_pred.features)\ny_test = dataset_transf_test_pred.labels\ndataset_transf_test_pred.scores = lmod.predict_proba(X_test)[:, pos_ind].reshape(-1, 1)\n\n# %%\n\ndisplay(Markdown(\"#### Predictions from transformed testing data\"))\n\nbal_acc_arr_transf = []\ndisp_imp_arr_transf = []\navg_odds_diff_arr_transf = []\n\ndisplay(Markdown(\"#### Testing set\"))\ndisplay(Markdown(\"##### Transformed predictions - No fairness constraints\"))\n\nfor thresh in tqdm(class_thresh_arr):\n\n fav_inds = dataset_transf_test_pred.scores > thresh\n dataset_transf_test_pred.labels[fav_inds] = dataset_transf_test_pred.favorable_label\n dataset_transf_test_pred.labels[~fav_inds] = dataset_transf_test_pred.unfavorable_label\n\n if (thresh == best_class_thresh):\n disp = True\n else:\n disp = False\n\n metric_test_bef = compute_metrics(dataset_transf_test, dataset_transf_test_pred,\n unprivileged_groups, privileged_groups, disp=disp)\n\n bal_acc_arr_transf.append(metric_test_bef[\"Balanced accuracy\"])\n avg_odds_diff_arr_transf.append(metric_test_bef[\"Average odds difference\"])\n disp_imp_arr_transf.append(metric_test_bef[\"Disparate impact\"])\n\n# %%\n\nfig, ax1 = plt.subplots(figsize=(10, 7))\nax1.plot(class_thresh_arr, bal_acc_arr_transf)\nax1.set_xlabel('Classification Thresholds', fontsize=16, fontweight='bold')\nax1.set_ylabel('Balanced Accuracy', color='b', fontsize=16, fontweight='bold')\nax1.xaxis.set_tick_params(labelsize=14)\nax1.yaxis.set_tick_params(labelsize=14)\n\nax2 = ax1.twinx()\nax2.plot(class_thresh_arr, np.abs(1.0 - np.array(disp_imp_arr_transf)), color='r')\nax2.set_ylabel('abs(1-disparate impact)', color='r', fontsize=16, fontweight='bold')\nax2.axvline(np.array(class_thresh_arr)[best_ind],\n color='k', linestyle=':')\nax2.yaxis.set_tick_params(labelsize=14)\nax2.grid(True)\n\ndisp_imp_at_best_bal_acc_transf = np.abs(1.0 - np.array(disp_imp_arr_transf))[best_ind]\n\n# %% md\n\n# ```abs(1 - disparate\n# impact)``` must\n# be\n# close\n# to\n# zero\n# for classifier predictions to be fair.This measure has improved using classifier trained using the transformed data compared to the original data.\n\n\n# %%\n\n### testing\nassert disp_imp_at_best_bal_acc_transf < disp_imp_at_best_bal_acc_orig\n\n# %% md\n\n# Summary of Results\n# We\n# show\n# the\n# optimal\n# classification\n# thresholds, and the\n# fairness and accuracy\n# metrics.\n\n# %% md\n\n### Classification Thresholds\n\n# | Dataset | Classification\n# threshold |\n# | - | - |\n# | Adult | 0.2674 |\n# | German | 0.6732 |\n# | Compas | 0.5148 |\n\n# %% md\n\n### Fairness Metric: Disparate impact, Accuracy Metric: Balanced accuracy\n\n#### Performance\n\n# | Dataset | Sex(Acc - Bef) | Sex(Acc - Aft) | Sex(Fair - Bef) | Sex(Fair - Aft) | Race / Age(Acc - Bef) | Race / Age(\n# Acc - Aft) | Race / Age(Fair - Bef) | Race / Age(Fair - Aft) |\n# | - | - | - | - | - | - | - | - | - |\n# | Adult(Test) | 0.7417 | 0.7021 | 0.2774 | 0.7729 | 0.7417 | 0.7408 | 0.4423 | 0.7645 |\n# | German(Test) | 0.6524 | 0.5698 | 0.9948 | 1.0664 | 0.6524 | 0.6067 | 0.3824 | 0.8228 |\n# | Compas(Test) | 0.6774 | 0.6606 | 0.6631 | 0.8085 | 0.6774 | 0.6790 | 0.6600 | 0.8430 |\n\n# %%\n\n\n"
] | [
[
"numpy.array",
"numpy.zeros",
"numpy.random.seed",
"matplotlib.pyplot.subplots",
"numpy.max",
"sklearn.linear_model.LogisticRegression",
"sklearn.preprocessing.StandardScaler",
"numpy.where",
"numpy.linspace"
]
] |
kasmith/geometry | [
"805b525ae8ffebb6bb1d84c094f76533d88dbb7a"
] | [
"geometry/shapes.py"
] | [
"\"\"\"Functions that work on collections of shapes\n\"\"\"\n\nfrom __future__ import division, print_function\nimport numpy as np\nfrom .convex import convex_area, convex_centroid\n\n__all__ = ['recenter_polygon', 'centroid_for_shapes',\n 'centroid_for_uncomputed_shapes', 'recenter_system',\n 'rescale_and_recenter_system', 'rotate_polygon',\n 'rotate_system', 'mirror_polygon', 'mirror_system',\n 'find_concave_outline']\n\ndef recenter_polygon(vertices):\n \"\"\"Returns a new convex polygon with centroid at (0,0)\n\n Args:\n vertices (list): list of (x,y) vertices of convex polygon\n\n Returns:\n A list just like the input with the recentered vertices (but possibly\n transformed into numpy arrays)\n \"\"\"\n centroid = convex_centroid(vertices)\n new_verts = []\n for v in vertices:\n v = np.array(v)\n new_verts.append(v - centroid)\n return new_verts\n\ndef centroid_for_shapes(centroids, areas = None):\n \"\"\"Calculates the centroid for a set of shapes\n\n Requires pre-computed centroids and areas\n\n Args:\n centroids (list): list of (x,y) centroids for each shape\n areas (list): list of areas (floats) for each shape (if not given,\n assumes they are all equal)\n\n Returns:\n The (x,y) position of the weighted centroid (as np.array)\n \"\"\"\n gc = np.zeros(2)\n area = 0\n if areas is None:\n areas = np.ones(len(centroids))\n for pc, a in zip(centroids, areas):\n gc += np.array(pc)*a\n area += a\n gc /= area\n return np.array(gc)\n\n\ndef centroid_for_uncomputed_shapes(shape_list):\n \"\"\"Like centroid_for_shapes but calculates centroids & areas\n\n Args:\n shape_list (list): a list of list of vertices (one for each shape)\n\n Returns:\n The (x,y) position of the weighted centroid (as np.array)\n \"\"\"\n centroids = []\n areas = []\n for s in shape_list:\n centroids.append(convex_centroid(s))\n areas.append(convex_area(s))\n return centroid_for_shapes(centroids, areas)\n\n\ndef recenter_system(shape_list):\n \"\"\"Recenters a set of shapes around the centroid of all of them\n\n Args:\n shape_list (list): a list of list of vertices (one for each shape)\n\n Returns:\n List of two items:\n * Similar format as input, but transformed so that calculating the\n centroid_for_uncomputed_shapes() on that list returns (0,0)\n * The grand centroid for the system in original coordinates\n \"\"\"\n centroids = []\n areas = []\n new_shapes = []\n # Decompose each of the individual shapes\n for s in shape_list:\n c = convex_centroid(s)\n a = convex_area(s)\n new_s = []\n for v in s:\n new_s.append(np.array(v) - c)\n centroids.append(c)\n areas.append(a)\n new_shapes.append(new_s)\n # Find the grand centroid & new centers of each shape\n center = centroid_for_shapes(centroids, areas)\n re_centroids = [c - center for c in centroids]\n # Go back and change the vertices of each shape\n final_shapes = []\n for ns,c in zip(new_shapes, re_centroids):\n final_shapes.append([s+c for s in ns])\n return final_shapes, center\n\n\ndef rescale_and_recenter_system(shape_list, total_area):\n \"\"\"Recenters a set of shapes and resizes them to have a total fixed area\n\n Args:\n shape_list (list): a list of list of vertices (one for each shape)\n total_area (float): the area to fix the shapes to\n\n Returns:\n List of two items:\n * Similar format as input, but transformed so that calculating the\n `centroid_for_uncomputed_shapes()` on that list returns (0,0) and summing\n the areas gets to `total_area`\n * The grand centroid for the system in original coordinates\n \"\"\"\n centroids = []\n areas = []\n new_shapes = []\n # Decompose each of the individual shapes\n for s in shape_list:\n c = convex_centroid(s)\n a = convex_area(s)\n new_s = []\n for v in s:\n new_s.append(np.array(v) - c)\n centroids.append(c)\n areas.append(a)\n new_shapes.append(new_s)\n # Find the grand centroid & new centers of each shape\n center = centroid_for_shapes(centroids, areas)\n re_centroids = [c - center for c in centroids]\n # Find rescaling factor\n tot_a = sum(areas)\n dim_scale = np.sqrt(total_area / tot_a)\n # Go back and change the vertices of each shape\n final_shapes = []\n for ns,c in zip(new_shapes, re_centroids):\n final_shapes.append([(s+c)*dim_scale for s in ns])\n return final_shapes, center\n\ndef rotate_polygon(vertices, angle, center_point = [0., 0.]):\n \"\"\"Rotates a shape around a given point (the origin)\n\n Args:\n vertices (list): A list of (x,y) vertices\n angle (float): Angle in radians to rotate counterclockwise\n center_point ([float, float]): (x,y) point to rotate around\n\n Returns:\n A list of vertices rotated around the center point\n \"\"\"\n np_o = np.array(center_point)\n np_vs = [np.array(v) - np_o for v in vertices]\n rot_mat = np.array([[np.cos(angle), -np.sin(angle)],\n [np.sin(angle), np.cos(angle)]])\n return [np.dot(rot_mat, v)+np_o for v in np_vs]\n\ndef rotate_system(shape_list, angle, center_point = None):\n \"\"\"Rotates a set of shapes around a given point\n\n If no center point is given, assume the center of mass of the shape\n\n Args:\n shape_list (list): A list of list of (x,y) vertices\n angle (float): Angle in radians to rotate counterclockwise\n center_point ([float, float]): (x,y) point to rotate around\n\n Returns:\n A new shape list with rotated vertices\n \"\"\"\n if center_point is None:\n center_point = centroid_for_uncomputed_shapes(shape_list)\n return [rotate_polygon(s, angle, center_point) for s in shape_list]\n\ndef mirror_polygon(vertices, axes=(False, True), center_point=None):\n \"\"\"Mirrors a polygon around an x or y line\n\n If center_point is None, mirror around the center of the shape\n\n Args:\n vertices (list): A list of (x,y) vertices\n axes ([bool, bool]): Whether to mirror around the (x,y) axes\n center_point ([float, float]): (x,y) point to mirror around\n\n Returns:\n A new polygon with rotated vertices\n \"\"\"\n if center_point is None:\n center_point = convex_centroid(vertices)\n xm = -1 if axes[0] else 1\n ym = -1 if axes[1] else 1\n return [np.array([xm*(v[0]-center_point[0])+center_point[0],\n ym*(v[1]-center_point[1])+center_point[1]]) for v\n in vertices]\n\ndef mirror_system(shape_list, axes=(False, True), center_point=None):\n \"\"\"Mirrors a polygon around an x or y line\n\n Mirrors around the center of the system if center_point is None\n\n Args:\n shape_list (list): A list of list of (x,y) vertices\n axes ([bool, bool]): Whether to mirror around the (x,y) axes\n center_point ([float, float]): (x,y) point to mirror around\n\n Returns:\n A new shape list with rotated vertices\n \"\"\"\n if center_point is None:\n center_point = centroid_for_uncomputed_shapes(shape_list)\n return [mirror_polygon(s, axes, center_point) for s in shape_list]\n\n\ndef _point_equal(p1, p2):\n return p1[0]==p2[0] and p1[1] == p2[1]\n\ndef _arr_eq(a1, a2):\n return all(_point_equal(p1,p2) for p1, p2 in zip(a1, a2))\n\ndef find_concave_outline(shape_list):\n \"\"\"Find the outline of a set of shapes\n\n Assuming all shapes have edges in common with other shapes where they touch,\n provides a set of vertices for drawing the outline\n\n Args:\n shape_list (list): A list of list of (x,y) vertices\n\n Returns:\n A list of ordered (x,y) vertices for drawing an outline\n \"\"\"\n # Find the most lower-right point\n current_shape = shape_list[0]\n current_pt = current_shape[0]\n test_idx = 1\n next_test_dir = 1\n for s in shape_list:\n for i in range(len(s)):\n p = s[i]\n if ((p[0] < current_pt[0]) or\n (p[0] == current_pt[0] and p[1] < current_pt[1])):\n # Replace\n current_pt = p\n current_shape = s\n test_idx = (i+1) % len(s)\n next_test_dir = 1\n vertex_list = [current_pt]\n # Keep going until you reach back to the first point\n while not _point_equal(current_shape[test_idx], vertex_list[0]):\n # Iterate through all the shapes to try to find a matching edge\n checking = True\n for s in (s for s in shape_list if not _arr_eq(s, current_shape)):\n if checking: # Way to break out if match found\n for i in range(len(s)):\n spt = s[i]\n if _point_equal(current_pt, spt):\n spt_after = s[(i+1) % len(s)]\n spt_before = s[(i-1) % len(s)]\n test_pt = current_shape[test_idx]\n if _point_equal(test_pt, spt_after):\n test_idx = (i-1) % len(s)\n next_test_dir = -1\n current_shape = s\n checking = False\n elif _point_equal(test_pt, spt_before):\n test_idx = (i+1) % len(s)\n next_test_dir = 1\n current_shape = s\n checking = False\n # Have you exhausted all shapes?\n if checking:\n current_pt = current_shape[test_idx]\n vertex_list.append(current_pt)\n test_idx += next_test_dir\n test_idx %= len(current_shape)\n return vertex_list\n"
] | [
[
"numpy.sqrt",
"numpy.zeros",
"numpy.cos",
"numpy.array",
"numpy.sin",
"numpy.dot"
]
] |
krfricke/pytorch-lightning | [
"fbd887df9d487da4c57d884e01b3401af140b1bc",
"fbd887df9d487da4c57d884e01b3401af140b1bc"
] | [
"tests/strategies/test_ddp_strategy_with_comm_hook.py",
"tests/utilities/test_apply_func_torchtext.py"
] | [
"# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom unittest import mock\n\nimport pytest\nimport torch\n\nfrom pytorch_lightning import Trainer\nfrom pytorch_lightning.strategies import DDPSpawnStrategy, DDPStrategy\nfrom pytorch_lightning.utilities import _TORCH_GREATER_EQUAL_1_10\nfrom tests.helpers import BoringModel\nfrom tests.helpers.runif import RunIf\n\nif torch.distributed.is_available():\n from torch.distributed.algorithms.ddp_comm_hooks import default_hooks as default\n from torch.distributed.algorithms.ddp_comm_hooks import powerSGD_hook as powerSGD\n\n if _TORCH_GREATER_EQUAL_1_10:\n import torch.distributed.algorithms.ddp_comm_hooks.post_localSGD_hook as post_localSGD\n\n\nclass TestDDPStrategy(DDPStrategy):\n def __init__(self, expected_ddp_comm_hook_name, *args, **kwargs):\n self.expected_ddp_comm_hook_name = expected_ddp_comm_hook_name\n super().__init__(*args, **kwargs)\n\n def teardown(self):\n # check here before unwrapping DistributedDataParallel in self.teardown\n attached_ddp_comm_hook_name = self.model._get_ddp_logging_data()[\"comm_hook\"]\n assert attached_ddp_comm_hook_name == self.expected_ddp_comm_hook_name\n return super().teardown()\n\n\n@RunIf(min_cuda_gpus=2, min_torch=\"1.9.0\", skip_windows=True, standalone=True)\ndef test_ddp_fp16_compress_comm_hook(tmpdir):\n \"\"\"Test for DDP FP16 compress hook.\"\"\"\n model = BoringModel()\n strategy = TestDDPStrategy(\n expected_ddp_comm_hook_name=default.fp16_compress_hook.__qualname__,\n ddp_comm_hook=default.fp16_compress_hook,\n )\n trainer = Trainer(\n max_epochs=1,\n accelerator=\"gpu\",\n devices=2,\n strategy=strategy,\n default_root_dir=tmpdir,\n sync_batchnorm=True,\n fast_dev_run=True,\n enable_progress_bar=False,\n enable_model_summary=False,\n )\n trainer.fit(model)\n assert trainer.state.finished, f\"Training failed with {trainer.state}\"\n\n\n@RunIf(min_cuda_gpus=2, min_torch=\"1.9.0\", skip_windows=True, standalone=True)\ndef test_ddp_sgd_comm_hook(tmpdir):\n \"\"\"Test for DDP FP16 compress hook.\"\"\"\n model = BoringModel()\n strategy = TestDDPStrategy(\n expected_ddp_comm_hook_name=powerSGD.powerSGD_hook.__qualname__,\n ddp_comm_state=powerSGD.PowerSGDState(process_group=None),\n ddp_comm_hook=powerSGD.powerSGD_hook,\n )\n trainer = Trainer(\n max_epochs=1,\n accelerator=\"gpu\",\n devices=2,\n strategy=strategy,\n default_root_dir=tmpdir,\n sync_batchnorm=True,\n fast_dev_run=True,\n enable_progress_bar=False,\n enable_model_summary=False,\n )\n trainer.fit(model)\n assert trainer.state.finished, f\"Training failed with {trainer.state}\"\n\n\n@RunIf(min_cuda_gpus=2, min_torch=\"1.9.0\", skip_windows=True, standalone=True)\ndef test_ddp_fp16_compress_wrap_sgd_comm_hook(tmpdir):\n \"\"\"Test for DDP FP16 compress wrapper for SGD hook.\"\"\"\n model = BoringModel()\n strategy = TestDDPStrategy(\n expected_ddp_comm_hook_name=default.fp16_compress_wrapper(powerSGD.powerSGD_hook).__qualname__,\n ddp_comm_state=powerSGD.PowerSGDState(process_group=None),\n ddp_comm_hook=powerSGD.powerSGD_hook,\n ddp_comm_wrapper=default.fp16_compress_wrapper,\n )\n trainer = Trainer(\n max_epochs=1,\n accelerator=\"gpu\",\n devices=2,\n strategy=strategy,\n default_root_dir=tmpdir,\n sync_batchnorm=True,\n fast_dev_run=True,\n enable_progress_bar=False,\n enable_model_summary=False,\n )\n trainer.fit(model)\n assert trainer.state.finished, f\"Training failed with {trainer.state}\"\n\n\n@RunIf(min_cuda_gpus=2, min_torch=\"1.9.0\", skip_windows=True, standalone=True)\ndef test_ddp_spawn_fp16_compress_comm_hook(tmpdir):\n \"\"\"Test for DDP Spawn FP16 compress hook.\"\"\"\n model = BoringModel()\n strategy = DDPSpawnStrategy(ddp_comm_hook=default.fp16_compress_hook)\n trainer = Trainer(\n max_epochs=1,\n accelerator=\"gpu\",\n devices=2,\n strategy=strategy,\n default_root_dir=tmpdir,\n sync_batchnorm=True,\n fast_dev_run=True,\n enable_progress_bar=False,\n enable_model_summary=False,\n )\n trainer.fit(model)\n assert trainer.state.finished, f\"Training failed with {trainer.state}\"\n\n\n@RunIf(min_cuda_gpus=2, min_torch=\"1.10.0\", skip_windows=True, standalone=True)\ndef test_ddp_post_local_sgd_comm_hook(tmpdir):\n \"\"\"Test for DDP post-localSGD hook.\"\"\"\n model = BoringModel()\n strategy = TestDDPStrategy(\n expected_ddp_comm_hook_name=post_localSGD.post_localSGD_hook.__qualname__,\n ddp_comm_state=post_localSGD.PostLocalSGDState(\n process_group=None,\n subgroup=None,\n start_localSGD_iter=8,\n ),\n ddp_comm_hook=post_localSGD.post_localSGD_hook,\n model_averaging_period=4,\n )\n trainer = Trainer(\n fast_dev_run=True,\n accelerator=\"gpu\",\n devices=2,\n strategy=strategy,\n default_root_dir=tmpdir,\n sync_batchnorm=True,\n enable_progress_bar=False,\n enable_model_summary=False,\n )\n trainer.fit(model)\n assert trainer.state.finished, f\"Training failed with {trainer.state}\"\n\n\n@RunIf(skip_windows=True, min_torch=\"1.10.0\", min_cuda_gpus=2, standalone=True)\n@mock.patch(\"torch.distributed.algorithms.model_averaging.averagers.PeriodicModelAverager.average_parameters\")\ndef test_post_local_sgd_model_averaging(average_parameters_mock, tmpdir):\n \"\"\"Test that when using DDP with post-localSGD, model averaging is called.\"\"\"\n model = BoringModel()\n\n # test regular ddp does not call model averaging\n trainer = Trainer(\n fast_dev_run=True,\n accelerator=\"gpu\",\n devices=2,\n strategy=\"ddp\",\n default_root_dir=tmpdir,\n sync_batchnorm=True,\n enable_progress_bar=False,\n enable_model_summary=False,\n )\n\n trainer.fit(model)\n average_parameters_mock.assert_not_called()\n\n # test ddp with post-localSGD does call model averaging\n ddp_strategy = DDPStrategy(\n ddp_comm_state=post_localSGD.PostLocalSGDState(\n process_group=None,\n subgroup=None,\n start_localSGD_iter=8,\n ),\n ddp_comm_hook=post_localSGD.post_localSGD_hook,\n model_averaging_period=4,\n )\n\n trainer = Trainer(\n fast_dev_run=True,\n accelerator=\"gpu\",\n devices=2,\n strategy=ddp_strategy,\n default_root_dir=tmpdir,\n sync_batchnorm=True,\n )\n\n trainer.fit(model)\n average_parameters_mock.assert_called()\n\n\n@RunIf(skip_windows=True, min_torch=\"1.10.0\", min_cuda_gpus=2, standalone=True)\n@mock.patch(\"torch.distributed.algorithms.model_averaging.averagers.PeriodicModelAverager.average_parameters\")\ndef test_post_local_sgd_model_averaging_value_error(average_parameters_mock, tmpdir):\n \"\"\"Test that when using DDP with post-localSGD a ValueError is thrown when the optmizer is\n ZeroRedundancyOptimizer.\"\"\"\n from torch.distributed.optim import ZeroRedundancyOptimizer\n\n class OptimizerModel(BoringModel):\n def configure_optimizers(self):\n return ZeroRedundancyOptimizer(params=self.parameters(), optimizer_class=torch.optim.Adam, lr=0.01)\n\n model = OptimizerModel()\n strategy = DDPStrategy(\n ddp_comm_state=post_localSGD.PostLocalSGDState(\n process_group=None,\n subgroup=None,\n start_localSGD_iter=8,\n ),\n ddp_comm_hook=post_localSGD.post_localSGD_hook,\n model_averaging_period=4,\n )\n\n trainer = Trainer(\n fast_dev_run=True,\n accelerator=\"gpu\",\n devices=2,\n strategy=strategy,\n default_root_dir=tmpdir,\n sync_batchnorm=True,\n enable_progress_bar=False,\n enable_model_summary=False,\n )\n\n with pytest.raises(ValueError, match=\"Currently model averaging cannot work with a distributed optimizer\"):\n trainer.fit(model)\n\n average_parameters_mock.assert_not_called()\n",
"# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport pytest\nimport torch\n\nfrom pytorch_lightning.utilities.apply_func import move_data_to_device\nfrom pytorch_lightning.utilities.imports import _TORCHTEXT_LEGACY\nfrom tests.helpers.runif import RunIf\nfrom tests.helpers.torchtext_utils import get_dummy_torchtext_data_iterator\n\n\n@pytest.mark.parametrize(\"include_lengths\", [False, True])\n@pytest.mark.parametrize(\"device\", [torch.device(\"cuda\", 0)])\n@pytest.mark.skipif(not _TORCHTEXT_LEGACY, reason=\"torchtext.legacy is deprecated.\")\n@RunIf(min_cuda_gpus=1)\ndef test_batch_move_data_to_device_torchtext_include_lengths(include_lengths, device):\n data_iterator, _ = get_dummy_torchtext_data_iterator(num_samples=3, batch_size=3, include_lengths=include_lengths)\n data_iter = iter(data_iterator)\n batch = next(data_iter)\n\n with pytest.deprecated_call(match=\"The `torchtext.legacy.Batch` object is deprecated\"):\n batch_on_device = move_data_to_device(batch, device)\n\n if include_lengths:\n # tensor with data\n assert batch_on_device.text[0].device == device\n # tensor with length of data\n assert batch_on_device.text[1].device == device\n else:\n assert batch_on_device.text.device == device\n\n\n@pytest.mark.parametrize(\"include_lengths\", [False, True])\n@pytest.mark.skipif(not _TORCHTEXT_LEGACY, reason=\"torchtext.legacy is deprecated.\")\ndef test_batch_move_data_to_device_torchtext_include_lengths_cpu(include_lengths):\n test_batch_move_data_to_device_torchtext_include_lengths(include_lengths, torch.device(\"cpu\"))\n"
] | [
[
"torch.distributed.algorithms.ddp_comm_hooks.powerSGD_hook.PowerSGDState",
"torch.distributed.is_available",
"torch.distributed.algorithms.ddp_comm_hooks.post_localSGD_hook.PostLocalSGDState",
"torch.distributed.algorithms.ddp_comm_hooks.default_hooks.fp16_compress_wrapper"
],
[
"torch.device"
]
] |
nathanheidacker/AlphaGradient | [
"cf031058f3e91381575e2df44cc029bcc7f4cc73"
] | [
"alphagradient/utils.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"Standard utility functions used throughout AlphaGradient\"\"\"\n\n# Standard Imports\nfrom __future__ import annotations\n\nfrom abc import ABC, abstractmethod\nimport builtins\nfrom datetime import (\n date,\n datetime,\n time,\n timedelta,\n)\nimport math\nfrom pathlib import Path\n\n# Third Party Imports\nimport numpy as np\nimport pandas as pd\n\n# Typing\nfrom typing import (\n TYPE_CHECKING,\n Any,\n Literal,\n Generator,\n Generic,\n Iterable,\n Optional,\n TypeVar,\n Union,\n)\n\nT = TypeVar(\"T\")\n\n\nclass PropertyType(Generic[T]):\n \"\"\"A Type class for property objects themselves, before being bound to a class instance\"\"\"\n\n def fget(self, *args: Any) -> T:\n ...\n\n\nProperty = builtins.property\n\"\"\"A Type for builtin properties that have been bound to a class instance\"\"\"\n\nPyNumber = Union[int, float]\n\"\"\"Numeric type that does not include complex numbers (only native python types)\"\"\"\n\nNumber = Union[PyNumber, np.number, pd.core.arrays.numeric.NumericDtype]\n\"\"\"Numeric type that does not include complex numbers\"\"\"\n\nDatetimeLike = Union[pd.Timestamp, np.datetime64, date, datetime, str]\n\"\"\"Objects convertable to python datetimes\"\"\"\n\nTimeLike = Union[time, str]\n\"\"\"Objects convertable to python time objects\"\"\"\n\nDateOrTime = Union[DatetimeLike, time]\n\"\"\"Objects that are either DatetimeLike or TimeLike in nature\"\"\"\n\nif TYPE_CHECKING:\n from typeshed import SupportsLessThanT as SLTT\n\n_global_persistent_path: PropertyType[Path]\n\n\ndef auto_batch(iterable: Iterable) -> Generator:\n \"\"\"\n Returns a generator which yields automatically sized batches\n\n Given a sized iterable, determines an optimal batch size to be used for\n multiprocessing purposes. Using this batch size, returns a generator which\n yields batches of the iterable with the optimal size\n\n Parameters:\n iterable: An iterable from which to create a batch generator\n\n Returns:\n The batch generator of the iterable input\n \"\"\"\n return get_batches(iterable, auto_batch_size(iterable))\n\n\ndef auto_batch_size(iterable: Iterable) -> int:\n \"\"\"\n Returns a multiprocessing-optimal batch size for an iterable\n\n Given an iterable, returns an integer value representing an optimal batch\n size for use in python's multiprocessing library\n\n Parameters:\n iterable (Iterable): Sized iterable to determine optimal batch size for\n\n Returns:\n The optimal batch size for multiprocessing\n \"\"\"\n # Converting to a sized iterable to guarantee __len__ functionality\n iterable = list(iterable)\n\n # Output Parameters\n horizontal_offset = 10000\n horizontal_stretch = 70 / 100_000_000\n vertical_offset = 100\n\n # Building the quadratic\n output: Number\n output = len(iterable) - horizontal_offset\n output = output**2\n output *= -1\n output *= horizontal_stretch\n output += vertical_offset\n\n # Output bounded between 30 and 100\n return bounded(int(output), lower=30, upper=100)\n\n\ndef bounded(\n to_bound: SLTT, lower: Optional[SLTT] = None, upper: Optional[SLTT] = None\n) -> SLTT:\n \"\"\"\n Bounds an object between a lower and upper bound\n\n Given an object that defines behavior for comparison (__lt__, __gt__),\n returns the object bounded between the lower and upper bounds. Boundaries\n will be ommited if they are not provided (None). If lower and upper are not\n None, they must be of the same type as to_bound.\n\n Type Explanation:\n SLTT (SupportsLessThanT): A TypeVar which implements the __lt__ method.\n\n Parameters:\n to_bound (SLTT): the object to be bounded\n lower (Optional[SLTT]): the lower boundary of the operation\n upper (Optional[SLTT]): the upper boundary of the operation\n\n Returns:\n The bounded object\n \"\"\"\n if lower is None and upper is None:\n raise ValueError(\n \"Of the parameters 'lower' and 'upper', at least one must be\" \"specified\"\n )\n if lower:\n to_bound = max(to_bound, lower)\n if upper:\n to_bound = min(to_bound, upper)\n\n return to_bound\n\n\ndef deconstruct_dt(dt: DateOrTime) -> dict[str, float]:\n \"\"\"\n Returns a dictionary of datetime attribute values on object 'dt'\n\n Given a DatetimeLike object, returns a dictionary where keys are the\n object's date and time related attribute names, and values are the object's\n associated attribute values.\n\n Parameters:\n dt (DateOrTime): the dt to deconstruct\n\n Returns:\n A dictionary of attributes and their associated values on dt\n\n Raises:\n TypeError: Raised if dt is not a datetime-like object, as it wont have\n the proper attributes.\n \"\"\"\n # The potential attributes to be accessed\n d = [\"year\", \"month\", \"day\"]\n t = [\"hour\", \"minute\", \"second\", \"microsecond\"]\n attrs = []\n\n # Accept string arguments to convert to datetime\n if isinstance(dt, str):\n dt = read_timestring(dt)\n\n # Determine which elements should be accessed on the dt\n if isinstance(dt, datetime):\n attrs = d + t\n elif isinstance(dt, time):\n attrs = t\n elif isinstance(dt, date):\n attrs = d\n else:\n raise TypeError(f\"{dt=} is not a valid datetime object\")\n\n # Collecting the attributes\n dtdict = {}\n for attr in attrs:\n dtdict[attr] = getattr(dt, attr)\n\n return dtdict\n\n\ndef get_batches(iterable: Iterable, size: int = 100) -> Generator:\n \"\"\"\n Returns a generator of the iterable which yields batches of the given size\n\n Given an iterable, uses the size parameter to create a generator which\n yields batches of the iterable of the given size.\n\n Parameter:\n iterable: The iterable to yield batches of\n size: The batch size of the returned generator\n\n Returns:\n A generator which yields batches of size 'size' of the iterable\n \"\"\"\n # Because we will be indexing the iterable, we must instantiate the entire\n # thing in memory in case it isnt (ie generators)\n iterable = list(iterable)\n last = len(iterable)\n for i in range(math.ceil(last / size)):\n start = i * size\n end = start + size\n end = end if end < last else last\n yield iterable[start:end]\n\n\ndef get_time(t: DateOrTime) -> time:\n \"\"\"\n Given a timestring or datetime-like object, returns a datetime.time object\n\n Given an object t which represents a time or a datetime, returns a native\n python datetime.time object of the appropriate time. t can be an isoformat\n time string or datetime string, or a datetime-like object\n\n Parameters:\n dt (DateOrTime): The time object to convert\n\n Returns:\n The converted datetime.time object\n \"\"\"\n if isinstance(t, (time, str)):\n return to_time(t)\n return to_datetime(t).time()\n\n\ndef get_weekday(dt: DatetimeLike) -> str:\n \"\"\"\n Returns the day of the week on which a DatetimeLike object falls\n\n Parameters:\n dt (DatetimeLike): The object whose weekday is determined\n\n Returns:\n String of the day of the week on which the DatetimeLike object falls\n \"\"\"\n weekdays = {\n 0: \"Monday\",\n 1: \"Tuesday\",\n 2: \"Wednesday\",\n 3: \"Thursday\",\n 4: \"Friday\",\n 5: \"Saturday\",\n 6: \"Sunday\",\n }\n\n return weekdays[to_datetime(dt).weekday()]\n\n\ndef is_func(f: Any) -> bool:\n \"\"\"\n Returns a boolean value indicating whether or not f is a kind of function\n\n Given an object f, returns a boolean value indicating whether or not the\n object is a function. Idenfities all python objects whose sole or primary\n purpose is to be called directly, rather than objects that simply support\n an implementation of __call__.\n\n Behavior is slightly different than the inspect module's isfunction(), as it\n includes methods (bound and unbound), as well as abstract, static, and class\n methods.\n\n A 'function' is an instance of any of the following:\n * function\n * method (bound or unbound)\n * staticmethod\n * classmethod\n * abstractmethod\n * lambda\n * built-in-function\n\n Parameters:\n f: The object who's status as a function is being determined\n\n Returns:\n True if f is a method, function, builtin-method-or-function, or lambda,\n else False\n \"\"\"\n\n # Fake class to access type 'method' and 'classmethod'\n class C:\n def method(self):\n pass\n\n # Getting abstract base methods\n class ABCC(ABC):\n @abstractmethod\n def amethod(self):\n pass\n\n # Fake function to access type 'function'\n def func():\n pass\n\n # Getting classic and static methods\n cmethod = classmethod(func)\n smethod = staticmethod(func)\n\n # Fake lambda to access type 'lambda'\n lamb = lambda: None\n\n # Fake instance to access type 'bound method'\n c = C()\n\n # Gathering all callable types\n functype = type(func)\n methodtype = type(C.method)\n classmethodtype = type(cmethod)\n staticmethodtype = type(smethod)\n abstractmethodtype = type(ABCC.amethod)\n boundmethodtype = type(c.method)\n lambdatype = type(lamb)\n builtintype = type(print)\n\n return isinstance(\n f,\n (\n functype,\n methodtype,\n boundmethodtype,\n lambdatype,\n builtintype,\n abstractmethodtype,\n classmethodtype,\n staticmethodtype,\n ),\n )\n\n\ndef nearest_expiry(\n expiry: DatetimeLike, method: Literal[\"after\", \"before\", \"both\"] = \"after\"\n) -> datetime:\n \"\"\"\n Returns the nearest valid expiry to the input datetime object\n\n Determining expiries for options contracts can be difficult, because they\n must fall on a business day, and their expiry time must be the market close.\n Given an expiry whose validity is unknown, this function returns the\n nearest expiry that is guaranteed to be valid. If the given expiry is\n valid, it will be unchanged when it is returned.\n\n The method argument is used to determine how the 'nearest' is defined. It\n has three options: \"after\", \"before\", and \"both\"\n\n Method must be one of the following string literals:\n * \"after\": returns the nearest expiry that is AFTER the input expiry\n * \"before\": returns the nearest expiry that is BEFORE the input expiry.\n * | \"both\": compares the distances of the nearest before and after, and\n | return the smaller of the two. In the case that they are equal, the\n | date determined by \"after\" will be used.\n\n The default argument is \"after\" because using \"before\" or \"both\" can\n potentially lead to dangerous behavior for algorithms, as it can return an\n expiry which is before the current date of the algorithm. This can cause\n options contracts to initialize as expired. Only change the method\n argument if you are positive that the returned expiry will be greater\n than the algorithm's current date.\n\n Parameters:\n expiry (DatetimeLike):\n The expiry who's closest valid expiry will be determined\n\n method:\n One of \"after\", \"before\", or \"both\"\n\n Returns:\n The nearest valid expiry\n \"\"\"\n\n # Ensuring expiry is a pydatetime\n expiry = to_datetime(expiry)\n\n # All expiries must expire at market close (4PM)\n expiry = set_time(expiry, \"4:00 PM\")\n\n # Change the expiry day if it is not a weekday\n if expiry.weekday() > 4:\n\n # Closest AFTER\n if method == \"after\":\n dist = 7 - expiry.weekday()\n expiry += timedelta(days=dist)\n\n # Closest BEFORE\n elif method == \"before\":\n dist = expiry.weekday() - 4\n expiry -= timedelta(days=dist)\n\n # Comparing both\n elif method == \"both\":\n bdist = expiry.weekday() - 4\n adist = 7 - expiry.weekday()\n if bdist < adist:\n expiry -= timedelta(days=bdist)\n else:\n expiry += timedelta(days=adist)\n\n return expiry\n\n\ndef optimal_start(\n start: datetime,\n max_start: datetime,\n min_end: datetime,\n end: Optional[DatetimeLike] = None,\n t: Optional[TimeLike] = None,\n) -> datetime:\n \"\"\"\n Based an Environment's instantiated/tracked assets, returns an optimal datetime\n for starting a backtest\n\n Returns a backtest starting datetime that:\n * Is guaranteed to be within the date range of all intantiated assets\n * | Is guaranteed to have ample time for calculations of historical\n | volatility, beta, percent change etc. BEFORE the start date\n * Automatically adjusts to accomodate shorter ending periods\n\n Parameters:\n start:\n A datetime object indictating the actual starting datetime\n\n max_start:\n A datetime object indicating the maximum possible starting datetime\n\n min_end:\n A datetime object indicating the minimum possible ending datetime\n\n end (Optional[DatetimeLike]):\n The desired endpoint on which to base the optimal start point\n\n t (Optional[TimeLike]):\n The returned optimal start's time\n\n Returns:\n The optimal starting datetime\n \"\"\"\n end = min_end if end is None else to_datetime(end)\n\n # If the maximum start date is before the minimum end date, there is\n # no valid 'optimal start', because there is no date range that allows\n # backtesting of all available data.\n if max_start >= end:\n return start\n\n # Determining the optimal start period. To avoid errors, we will not sync to the beginning\n optimal_delta = (end - max_start) / 2\n optimal_date = max_start + optimal_delta\n\n # Setting the optimal date's time to market open unless specified otherwise\n t = \"00:00:00\" if t is None else to_time(t)\n set_time(optimal_date, t)\n\n # Bounding the date to acceptable minimums and maximums\n lower_bound = set_time(max_start + timedelta(days=1), t)\n upper_bound = set_time(max_start + timedelta(days=365), t)\n optimal_start = bounded(optimal_date, lower=lower_bound, upper=upper_bound)\n\n return optimal_start\n\n\ndef progress_print(to_print: Any, last: list[int] = [0]) -> None:\n \"\"\"Prints, but returns the carriage to the front of the last print\"\"\"\n print(\"\\r\" + (\" \" * last[0]), end=\"\\r\", flush=True) # type: ignore[operator]\n print(to_print, end=\"\", flush=True)\n last[0] = len(str(to_print))\n\n\ndef read_timestring(timestring: str) -> time:\n \"\"\"\n Given a timestring, returns a datetime.time object representative of the time\n\n This function reads in 'timestrings', which are one of two things:\n #. | Isoformat times as strings, using 24 hours\n | (eg 04:00:00, 18:30, 02:59:59.99, etc)\n\n #. | Strings based on 12 hour clocks\n | (see ag.utils.read_twelve_hour_timestring docs)\n\n Using this timestring, returns a python datetime.time object corresponding\n to the time in the timestring. if dtype is set to dict, a deconstructed\n datetime attr dictionary will instead be returned. For more info on\n dtdicts, read the docs for ag.utils.deconstruct_dt\n\n Parameters:\n timestring:\n string representing the time\n\n dtype:\n The type of data to return\n\n Returns:\n The time or dict object corresponding to the time in the timestring\n \"\"\"\n try:\n return read_twelve_hour_timestring(timestring)\n except (TypeError, ValueError) as e:\n return time.fromisoformat(timestring)\n\n\ndef read_twelve_hour_timestring(timestring: str) -> time:\n \"\"\"Reads a timestring based on a 12 hour clock and returns a time\n\n Given a timestring representing a time on a 12 hour clock, returns the\n appropriate time object\n\n Must be formatted as follows:\n * hour | This is the only required value, integer\n * minute | separated from hour by a colon, optional, integer\n * second | separated from minute by a colon, optional, float\n * AM/PM | string 'AM' or 'PM', separated from second by a space\n\n When AM or PM is not provided in the timestring, AM will be assumed.\n\n Valid Examples:\n * '4:30 PM'\n * '4:30 AM'\n * '1 PM'\n * '1'\n * '11:59:59.999 PM'\n * '12:00:00 AM'\n\n Invalid Examples:\n * '0:00'\n * '13:30'\n * '103 PM'\n * '0'\n * '22'\n * '4:30:99 PM'\n * '3:99 PM'\n\n Parameters:\n timestring: The string containing the time to convert to a time object\n\n Returns:\n The corresponding time object\n\n Raises:\n TypeError:\n When timestring is not a string. Only str objects can be parsed\n\n ValueError:\n When the timetring is invalid / improperly formatted.\n \"\"\"\n # Timestrings must be strs\n if not isinstance(timestring, str):\n raise TypeError(f\"timestring must be a string, got {type(timestring)}\")\n\n # Variable Initialization\n ampm = \"AM\"\n info = []\n timestring = timestring.split(\" \") # type: ignore[assignment]\n\n # Getting AM/PM component\n if len(timestring) > 1:\n ampm = timestring[1]\n\n # Getting individual time components\n info = timestring[0].split(\":\")\n\n # isoformat is 00:00:00.00, max 3 colons\n if len(info) > 4:\n raise ValueError(f\"Failed to parse timestring {timestring}\")\n\n # collecting the attributes necessary to create a time object\n tdict = {}\n attrs = [\"hour\", \"minute\", \"second\", \"microsecond\"]\n for attr, value in zip(attrs, info):\n tdict[attr] = int(value)\n\n # Setting missing components to 0\n for attr in attrs:\n if not tdict.get(attr):\n tdict[attr] = 0\n\n # hours less and 1 and more than 12 are off limits in 12 hour clocks\n if not 1 <= tdict[\"hour\"] <= 12:\n raise ValueError(f\"Failed to parse timestring {timestring}\")\n\n # 12:30 AM is 00:30 isoformat\n if ampm == \"AM\" and tdict[\"hour\"] == 12:\n tdict[\"hour\"] == 0\n\n # 12:30 PM is 12:30 isoformat, 1:30 PM is 13:30 isoformat\n elif ampm == \"PM\" and tdict[\"hour\"] < 12:\n tdict[\"hour\"] += 12\n\n # Building and returning a time object\n return time(**tdict) # type: ignore[arg-type]\n\n\ndef set_time(dt: DatetimeLike, t: DateOrTime) -> datetime:\n \"\"\"Sets the given datetime-like object to the given time\n\n Given a DatetimeLike object 'dt' and a time-like object 't', returns a\n datetime like object that shares the date of dt and the time of t.\n\n Very similar to datetime.combine, but accepts datetime objects for both\n inputs.\n\n Parameters:\n dt (DatetimeLike): Datetime to convert\n t (DateOrTime): Time to convert to\n\n Returns:\n python datetime.datetime object with converted time\n \"\"\"\n # Initializing the new time that will be set\n newtime: dict[str, float] = {}\n\n # Reading the necessary time attributes\n if isinstance(t, str):\n t = read_timestring(t)\n newtime = deconstruct_dt(t)\n elif isinstance(t, time):\n newtime = deconstruct_dt(t)\n else:\n newtime = deconstruct_dt(to_datetime(t).time())\n\n # Creating the new datetime with t=t\n return to_datetime(dt).replace(**newtime) # type: ignore [arg-type]\n\n\ndef timestring(t: DateOrTime) -> str:\n \"\"\"Converts a time-like object to a 12-hour-clock timestring\n\n Given a time-like object t, returns a timestring represented by the\n 12-hour-clock (eg. 4:30 PM).\n\n Parameters:\n t (DateOrTime):\n date or time object to read into a 12-hour-clock-based timestring\n\n Returns:\n A string representing the time on a 12-hour-clock\n \"\"\"\n # Ensuring that t is a time object\n if not isinstance(t, time):\n t = to_datetime(t).time()\n\n # Deconstructing components to create a time string\n ampm = \"AM\"\n hour = t.hour\n minute = t.minute if t.minute > 9 else f\"0{t.minute}\"\n if hour > 12:\n ampm = \"PM\"\n hour -= 12\n return f\"{hour}:{minute} {ampm}\"\n\n\ndef to_datetime(dtlike: DatetimeLike) -> datetime:\n \"\"\"\n Given a datetime-like object, converts it to a python standard datetime\n\n Parameters:\n dtlike (DatetimeLike):\n The Datetime-convertable object\n\n Returns:\n The converted python datetime\n\n Raises:\n TypeError: Only accepts python-datetime-convertable objects\n \"\"\"\n if isinstance(dtlike, datetime):\n return dtlike\n elif isinstance(dtlike, pd.Timestamp):\n return dtlike.to_pydatetime()\n elif isinstance(dtlike, np.datetime64):\n return pd.Timestamp(dtlike).to_pydatetime()\n elif isinstance(dtlike, date):\n return datetime.combine(dtlike, datetime.min.time())\n elif isinstance(dtlike, str):\n return datetime.fromisoformat(dtlike)\n\n raise TypeError(f\"Can not convert passed object {dtlike} to python datetime\")\n\n\ndef to_step(current: datetime, delta: Union[DateOrTime, timedelta, float]) -> timedelta:\n \"\"\"\n Converts an ambiguous delta object to a python timedelta\n\n Given an amiguous object which can in some way be interpreted as a timedelta\n relative to some 'current' time, converts that object to an appropriate\n timedelta object, or 'step' in time.\n\n Parameters:\n current:\n The 'current' time, which determines how to interpret the delta\n\n delta (Union[DateOrTime, timedelta, float]);\n The object being passed that may represent a 'step' in time\n\n Returns:\n the appropriate timedelta 'step'\n\n Raises:\n TypeError:\n When passed a type that can not be coerced/interpreted\n\n ValueError:\n When a type-appropriate object can not be coerced, or is in some way\n invalid (eg. the step in time is BEFORE the current time)\n \"\"\"\n # Multiple parses must be made on strings to successfully coerce all of them\n if isinstance(delta, str):\n try:\n delta = set_time(current, read_timestring(delta))\n except ValueError:\n delta = datetime.fromisoformat(delta) # type: ignore[arg-type]\n\n elif isinstance(delta, time):\n delta = set_time(current, delta)\n\n elif isinstance(delta, (float, int)):\n delta = current + timedelta(days=delta)\n\n elif isinstance(delta, timedelta):\n delta = current + delta\n\n # if isinstance(delta, DatetimeLike):\n else:\n delta = to_datetime(delta)\n\n if delta > current:\n return delta - current\n\n raise ValueError(\n f\"Passed delta {delta} is prior to current time {current}. Please \"\n \"choose a time AFTER the current date.\"\n )\n\n\ndef to_time(tlike: TimeLike) -> time:\n \"\"\"\n Given a TimeLike object, converts it to a python standard time object\n\n Parameters:\n tlike:\n The time-convertable object\n\n Returns:\n The converted python time object\n\n Raises:\n TypeError: Only accepts python-time-convertable objects\n \"\"\"\n if isinstance(tlike, str):\n return read_timestring(tlike)\n elif isinstance(tlike, time):\n return tlike\n\n raise TypeError(f\"Can not convert passed object {tlike} to python time\")\n\n\nclass NullClass:\n \"\"\"\n A class designed to take the place of other functions, modules, or classes\n\n This class stands in place of a function, class, or module attached to\n another class as an attribute. When an attribute is initialized as a\n NullClass, one can safely access it as an attribute, call it, and access\n attributes on it. These actions can also be performed recursively; any of\n these operations performed on the nullclass will simply return itself,\n allowing them to be chained infinitely.\n\n Use this class in place of another function or class in order to safely\n use an attribute without making constant checks.\n\n This is most useful in place of functions/classes that perform\n logging/printing, but also makes sense in place of functions that modify\n things in place or always return None.\n\n Examples:\n .. highlight:: python\n .. code-block:: python\n\n class MyClass:\n def __init__(self, data, verbose=False):\n # This is cleaner and more pythonic than...\n self.print = print if verbose else NullClass()\n self.print(\"Initialized as Verbose!\")\n\n # Alternative 1\n self.print = print if verbose else lambda *args, **kwargs: None\n self.print(\"Initialized as Verbose!\")\n\n # Alternative 2\n self.print = print if print is verbose else None\n if self.print is not None:\n self.print(\"Initialized as Verbose!\")\n\n # Alternative 3\n self.verbose = verbose\n if self.verbose:\n print(\"Initialized as Verbose!\")\n\n # etc etc etc...\n\n # This is cleaner and more pythonic than...\n self.tqdm = tqdm.progress_bar if verbose else NullClass()\n with self.tqdm(total=1000) as pbar:\n while condition:\n self.do_something()\n pbar.update(1) # Safe!\n\n # Alternative\n self.verbose = verbose\n if verbose:\n with tqdm.progress_bar(total=1000) as pbar:\n while condition:\n self.do_something()\n pbar.update(1)\n else:\n while condition:\n self.do_something() # gross.\n \"\"\"\n\n def __call__(self, *args: Any, **kwargs: Any) -> NullClass:\n return self\n\n def __getattr__(self, attr: str) -> NullClass:\n return self\n\n def __enter__(self, *args, **kwargs) -> NullClass:\n return self\n\n def __exit__(self, *args, **kwargs) -> None:\n pass\n\n def __bool__(self) -> bool:\n return False\n"
] | [
[
"pandas.Timestamp"
]
] |
ozcell/gym_wmgds_ma | [
"c2cb22943913361947216b908d50decc46616e99"
] | [
"gym_wmgds/envs/mujoco/ant.py"
] | [
"import numpy as np\nfrom gym_wmgds import utils\nfrom gym_wmgds.envs.mujoco import mujoco_env\n\nclass AntEnv(mujoco_env.MujocoEnv, utils.EzPickle):\n def __init__(self):\n mujoco_env.MujocoEnv.__init__(self, 'ant.xml', 5)\n utils.EzPickle.__init__(self)\n\n def step(self, a):\n xposbefore = self.get_body_com(\"torso\")[0]\n self.do_simulation(a, self.frame_skip)\n xposafter = self.get_body_com(\"torso\")[0]\n forward_reward = (xposafter - xposbefore)/self.dt\n ctrl_cost = .5 * np.square(a).sum()\n contact_cost = 0.5 * 1e-3 * np.sum(\n np.square(np.clip(self.sim.data.cfrc_ext, -1, 1)))\n survive_reward = 1.0\n reward = forward_reward - ctrl_cost - contact_cost + survive_reward\n state = self.state_vector()\n notdone = np.isfinite(state).all() \\\n and state[2] >= 0.2 and state[2] <= 1.0\n done = not notdone\n ob = self._get_obs()\n return ob, reward, done, dict(\n reward_forward=forward_reward,\n reward_ctrl=-ctrl_cost,\n reward_contact=-contact_cost,\n reward_survive=survive_reward)\n\n def _get_obs(self):\n return np.concatenate([\n self.sim.data.qpos.flat[2:],\n self.sim.data.qvel.flat,\n np.clip(self.sim.data.cfrc_ext, -1, 1).flat,\n ])\n\n def reset_model(self):\n qpos = self.init_qpos + self.np_random.uniform(size=self.model.nq, low=-.1, high=.1)\n qvel = self.init_qvel + self.np_random.randn(self.model.nv) * .1\n self.set_state(qpos, qvel)\n return self._get_obs()\n\n def viewer_setup(self):\n self.viewer.cam.distance = self.model.stat.extent * 0.5\n"
] | [
[
"numpy.square",
"numpy.isfinite",
"numpy.clip"
]
] |
SivilTaram/dialogue-utterance-rewriter-pytorch | [
"92c2254958b7a1ee9199836f7f2236575270983f"
] | [
"onmt/encoders/bert.py"
] | [
"\"\"\"\nImplementation from: https://raw.githubusercontent.com/Zenglinxiao/OpenNMT-py/bert/onmt/encoders/bert.py\n@Author: Zenglinxiao\n\"\"\"\n\nimport torch.nn as nn\nfrom onmt.encoders.transformer import TransformerEncoderLayer\nfrom onmt.utils.misc import sequence_mask\n\n\nclass BertEncoder(nn.Module):\n \"\"\"BERT Encoder: A Transformer Encoder with LayerNorm and BertPooler.\n :cite:`DBLP:journals/corr/abs-1810-04805`\n\n Args:\n embeddings (onmt.modules.BertEmbeddings): embeddings to use\n num_layers (int): number of encoder layers.\n d_model (int): size of the model\n heads (int): number of heads\n d_ff (int): size of the inner FF layer\n dropout (float): dropout parameters\n \"\"\"\n\n def __init__(self, embeddings, num_layers=12, d_model=768, heads=12,\n d_ff=3072, dropout=0.1, attention_dropout=0.1,\n max_relative_positions=0):\n super(BertEncoder, self).__init__()\n self.num_layers = num_layers\n self.d_model = d_model\n self.heads = heads\n self.dropout = dropout\n # Feed-Forward size should be 4*d_model as in paper\n self.d_ff = d_ff\n\n self.embeddings = embeddings\n # Transformer Encoder Block\n self.encoder = nn.ModuleList(\n [TransformerEncoderLayer(d_model, heads, d_ff,\n dropout, attention_dropout,\n max_relative_positions=max_relative_positions,\n activation='gelu') for _ in range(num_layers)])\n\n self.layer_norm = nn.LayerNorm(d_model, eps=1e-12)\n self.pooler = BertPooler(d_model)\n\n @classmethod\n def from_opt(cls, opt, embeddings):\n \"\"\"Alternate constructor.\"\"\"\n return cls(\n embeddings,\n opt.enc_layers,\n opt.word_vec_size,\n opt.heads,\n opt.transformer_ff,\n opt.dropout[0] if type(opt.dropout) is list else opt.dropout,\n opt.attention_dropout[0] if type(opt.attention_dropout)\n is list else opt.attention_dropout,\n opt.max_relative_positions\n )\n\n def forward(self, input_ids, lengths, token_type_ids=None):\n \"\"\"\n Args:\n input_ids (Tensor): ``(seq_len, batch_size, feature_dim)``, padding ids=0\n lengths (Tensor): ``(batch_size)``, record length of sequence\n token_type_ids (seq_len, batch_size): ``(B, S)``, A(0), B(1), pad(0)\n Returns:\n all_encoder_layers (list of Tensor): ``(B, S, H)``, token level\n pooled_output (Tensor): ``(B, H)``, sequence level\n \"\"\"\n # remove the feature dimension\n # seq_len x batch_size\n\n emb = self.embeddings(input_ids, token_type_ids)\n\n out = emb.transpose(0, 1).contiguous()\n # [batch, seq] -> [batch, 1, seq]\n mask = ~sequence_mask(lengths).unsqueeze(1)\n\n for layer in self.encoder:\n out = layer(out, mask)\n out = self.layer_norm(out)\n\n return emb, out.transpose(0, 1).contiguous(), lengths\n\n def update_dropout(self, dropout):\n self.dropout = dropout\n self.embeddings.update_dropout(dropout)\n for layer in self.encoder:\n layer.update_dropout(dropout)\n\n\nclass BertPooler(nn.Module):\n def __init__(self, hidden_size):\n \"\"\"A pooling block (Linear layer followed by Tanh activation).\n\n Args:\n hidden_size (int): size of hidden layer.\n \"\"\"\n\n super(BertPooler, self).__init__()\n self.dense = nn.Linear(hidden_size, hidden_size)\n self.activation_fn = nn.Tanh()\n\n def forward(self, hidden_states):\n \"\"\"hidden_states[:, 0, :] --> {Linear, Tanh} --> Returns.\n\n Args:\n hidden_states (Tensor): last layer's hidden_states, ``(B, S, H)``\n Returns:\n pooled_output (Tensor): transformed output of last layer's hidden\n \"\"\"\n\n first_token_tensor = hidden_states[:, 0, :] # [batch, d_model]\n pooled_output = self.activation_fn(self.dense(first_token_tensor))\n return pooled_output"
] | [
[
"torch.nn.LayerNorm",
"torch.nn.Linear",
"torch.nn.Tanh"
]
] |
emaballarin/phytorch | [
"68cf0a630e2fee9dd98f08639edcceb2389adf35"
] | [
"tests/cosmology/test_cosmology_apsuite.py"
] | [
"# Based on the astropy test suite (v4.2.1)\n# (https://github.com/astropy/astropy/blob/v4.2.1/astropy/cosmology/tests/test_cosmology.py)\nfrom io import StringIO\nfrom typing import Type\n\nimport numpy as np\nimport pytest\nimport torch\nfrom pytest import mark\nfrom torch import tensor\n\nimport phytorch.cosmology.drivers.analytic\nimport phytorch.cosmology.drivers.analytic_diff\nimport phytorch.cosmology.special\nfrom phytorch.constants import codata2014, G as Newton_G\nfrom phytorch.cosmology.special import AbstractFlatLambdaCDMR, AbstractLambdaCDMR\nfrom phytorch.units.astro import Gpc, Gyr, Mpc\nfrom phytorch.units.si import cm, gram, kelvin, km, s\nfrom phytorch.units.unit import Unit\nfrom tests.common.closeness import close\nfrom tests.common.dtypes import with_default_double\n\n\nZERO = torch.zeros(())\nONE = torch.ones(())\nSMALL = 1e-16\nZ = tensor([0, 0.5, 1, 2])\n\nH70 = 70 * km/s/Mpc\nH704 = 70.4 * km/s/Mpc\n\n\ndef test_critical_density():\n fac = (Newton_G / codata2014.G).to(Unit())\n\n cosmo = AbstractFlatLambdaCDMR()\n cosmo.H0 = H704\n cosmo.Om0 = 0.272\n\n # constants defined only so accurately\n assert ((cosmo.critical_density0 * fac).to(gram / cm**3) - 9.309668456020899e-30) < 1e-9\n assert cosmo.critical_density0 == cosmo.critical_density(0)\n\n assert close((cosmo.critical_density(tensor([1, 5])) * fac).to(gram / cm**3).value,\n [2.70352772e-29, 5.53739080e-28])\n\n\ndef test_xtfuncs():\n cosmo = AbstractLambdaCDMR()\n cosmo.H0, cosmo.Om0, cosmo.Ode0, cosmo.Neff, cosmo.Tcmb0 = H70, 0.3, 0.5, 3.04, 2.725 * kelvin\n\n z = tensor([2, 3.2])\n assert close(cosmo.lookback_time_integrand(tensor(3)), 0.052218976654969378)\n assert close(cosmo.lookback_time_integrand(z), [0.10333179, 0.04644541])\n assert close(cosmo.abs_distance_integrand(tensor(3)), 3.3420145059180402)\n assert close(cosmo.abs_distance_integrand(z), [2.7899584, 3.44104758])\n\n\ndef test_zeroing():\n cosmo = AbstractLambdaCDMR()\n cosmo.Om0 = 0.27\n cosmo.Ode0 = 0\n cosmo.Or0 = 0\n\n assert cosmo.Ode(1.5) == 0\n assert (cosmo.Ode(Z) == ZERO).all()\n assert cosmo.Or(1.5) == 0\n assert (cosmo.Or(Z) == ZERO).all()\n # TODO: add neutrinos\n # assert allclose(cosmo.Onu(1.5), [0, 0, 0, 0])\n # assert allclose(cosmo.Onu(z), [0, 0, 0, 0])\n assert (cosmo.Ob(Z) == ZERO).all()\n\n\ndef test_matter():\n cosmo = AbstractFlatLambdaCDMR()\n cosmo.Om0 = 0.3\n cosmo.Ob0 = 0.045\n\n assert cosmo.Om(0) == 0.3\n assert cosmo.Ob(0) == 0.045\n assert close(cosmo.Om(Z), [0.3, 0.59124088, 0.77419355, 0.92045455])\n assert close(cosmo.Ob(Z), [0.045, 0.08868613, 0.11612903, 0.13806818])\n assert close(cosmo.Odm(Z), [0.255, 0.50255474, 0.65806452, 0.78238636])\n assert close(cosmo.Ob(Z) + cosmo.Odm(Z), cosmo.Om(Z))\n\n\ndef test_ocurv():\n cosmo = AbstractFlatLambdaCDMR()\n cosmo.Om0 = 0.3\n\n assert cosmo.Ok0 == 0\n assert cosmo.Ok(0) == 0\n assert (cosmo.Ok(Z) == ZERO).all()\n\n cosmo = AbstractLambdaCDMR()\n cosmo.Om0 = 0.3\n cosmo.Ode0 = 0.5\n assert abs(cosmo.Ok0 - 0.2) < SMALL\n assert abs(cosmo.Ok(0) - 0.2) < SMALL\n assert close(cosmo.Ok(Z), [0.2, 0.22929936, 0.21621622, 0.17307692])\n\n assert (cosmo.Ok(Z) + cosmo.Om(Z) + cosmo.Ode(Z) == ONE).all()\n\n\ndef test_ode():\n cosmo = AbstractFlatLambdaCDMR()\n cosmo.Om0 = 0.3\n\n assert cosmo.Ode(0) == cosmo.Ode0\n assert close(cosmo.Ode(Z), [0.7, 0.408759, 0.2258065, 0.07954545])\n\n\ndef test_tcmb():\n cosmo = AbstractFlatLambdaCDMR()\n cosmo.H0 = H704\n cosmo.Om0 = 0.272\n cosmo.Tcmb0 = 2.5 * kelvin\n\n assert cosmo.Tcmb(2) == 7.5 * kelvin\n assert (cosmo.Tcmb(tensor([0, 1, 2, 3, 9.])).to(kelvin).value == tensor([2.5, 5, 7.5, 10, 25])).all()\n\n\ndef test_efunc_vs_invefunc():\n cosmo = AbstractLambdaCDMR()\n cosmo.Om0 = 0.3\n cosmo.Ode0 = 0.7\n\n assert cosmo.efunc(0.5) * cosmo.inv_efunc(0.5) == 1\n assert (cosmo.efunc(Z) * cosmo.inv_efunc(Z) == ONE).all()\n # TODO: test this for subclasses?\n\n\nclass BaseLambdaCDMDriverTest:\n flat_cosmo_cls: Type[phytorch.cosmology.special.BaseFlatLambdaCDM]\n cosmo_cls: Type[phytorch.cosmology.special.BaseLambdaCDM]\n\n\nclass BaseLambdaCDMTest(BaseLambdaCDMDriverTest):\n flat_cosmo_cls: Type[phytorch.cosmology.special.FlatLambdaCDM]\n cosmo_cls: Type[phytorch.cosmology.special.LambdaCDM]\n\n @with_default_double\n @mark.parametrize(('func', 'vals', 'unit', 'rtol'), (\n # From the astropy test suite:\n # Test values were taken from the following web cosmology\n # calculators on 27th Feb 2012:\n # Wright: http://www.astro.ucla.edu/~wright/CosmoCalc.html\n # (https://ui.adsabs.harvard.edu/abs/2006PASP..118.1711W)\n # Kempner: http://www.kempner.net/cosmic.php\n # iCosmos: http://www.icosmos.co.uk/index.html\n (phytorch.cosmology.special.FlatLambdaCDM.comoving_distance,\n (3364.5, 3364.8, 3364.7988), Mpc, 1e-4),\n (phytorch.cosmology.special.FlatLambdaCDM.angular_diameter_distance,\n (1682.3, 1682.4, 1682.3994), Mpc, 1e-4),\n (phytorch.cosmology.special.FlatLambdaCDM.luminosity_distance,\n (6729.2, 6729.6, 6729.5976), Mpc, 1e-4),\n (phytorch.cosmology.special.FlatLambdaCDM.lookback_time,\n (7.841, 7.84178, 7.843), Gyr, 1e-3),\n (phytorch.cosmology.special.FlatLambdaCDM.lookback_distance,\n (2404.0, 2404.24, 2404.4), Mpc, 1e-3),\n ))\n def test_flat_z1(self, func, vals, unit, rtol):\n cosmo = self.flat_cosmo_cls()\n cosmo.H0 = H70\n cosmo.Om0 = 0.27\n\n assert close(getattr(cosmo, func.__name__)(1).to(unit).value, vals, rtol=rtol)\n\n @mark.parametrize('Om0, Ode0, vals', (\n (0.27, 0.73, (29.123, 159.529, 630.427, 1178.531, 2181.485, 3654.802)),\n (0.27, 0, (20.501, 99.019, 380.278, 747.049, 1558.363, 3123.814)),\n (2, 0, (12.619, 44.708, 114.904, 173.709, 258.82, 358.992))\n ))\n def test_comoving_volume(self, Om0, Ode0, vals):\n z = tensor([0.5, 1, 2, 3, 5, 9])\n # for (Om0, Ode0), vals in zip(\n # ((0.27, 0.73), (0.27, 0), (2, 0)),\n # # Form Ned Wright's calculator: not very *accurate* (sic), so\n # # like astropy, test to very low precision\n # ((29.123, 159.529, 630.427, 1178.531, 2181.485, 3654.802),\n # (20.501, 99.019, 380.278, 747.049, 1558.363, 3123.814),\n # (12.619, 44.708, 114.904, 173.709, 258.82, 358.992))\n # ):\n c = self.cosmo_cls()\n c.H0, c.Om0, c.Ode0 = H70, Om0, Ode0\n\n assert close(c.comoving_volume(z).to(Gpc**3).value, vals, rtol=1e-2)\n\n # TODO: (requires integration) test_differential_comoving_volume\n\n icosmo_flat = \"\"\"\\\n # from icosmo (icosmo.org)\n # Om 0.3 w -1 h 0.7 Ol 0.7\n # z comoving_transvers_dist angular_diameter_dist luminosity_dist\n 0.0000000 0.0000000 0.0000000 0.0000000\n 0.16250000 669.77536 576.15085 778.61386\n 0.32500000 1285.5964 970.26143 1703.4152\n 0.50000000 1888.6254 1259.0836 2832.9381\n 0.66250000 2395.5489 1440.9317 3982.6000\n 0.82500000 2855.5732 1564.6976 5211.4210\n 1.0000000 3303.8288 1651.9144 6607.6577\n 1.1625000 3681.1867 1702.2829 7960.5663\n 1.3250000 4025.5229 1731.4077 9359.3408\n 1.5000000 4363.8558 1745.5423 10909.640\n 1.6625000 4651.4830 1747.0359 12384.573\n 1.8250000 4916.5970 1740.3883 13889.387\n 2.0000000 5179.8621 1726.6207 15539.586\n 2.1625000 5406.0204 1709.4136 17096.540\n 2.3250000 5616.5075 1689.1752 18674.888\n 2.5000000 5827.5418 1665.0120 20396.396\n 2.6625000 6010.4886 1641.0890 22013.414\n 2.8250000 6182.1688 1616.2533 23646.796\n 3.0000000 6355.6855 1588.9214 25422.742\n 3.1625000 6507.2491 1563.3031 27086.425\n 3.3250000 6650.4520 1537.6768 28763.205\n 3.5000000 6796.1499 1510.2555 30582.674\n 3.6625000 6924.2096 1485.0852 32284.127\n 3.8250000 7045.8876 1460.2876 33996.408\n 4.0000000 7170.3664 1434.0733 35851.832\n 4.1625000 7280.3423 1410.2358 37584.767\n 4.3250000 7385.3277 1386.9160 39326.870\n 4.5000000 7493.2222 1362.4040 41212.722\n 4.6625000 7588.9589 1340.2135 42972.480\n \"\"\"\n\n icosmo_open = \"\"\"\\\n # from icosmo (icosmo.org)\n # Om 0.3 w -1 h 0.7 Ol 0.1\n # z comoving_transvers_dist angular_diameter_dist luminosity_dist\n 0.0000000 0.0000000 0.0000000 0.0000000\n 0.16250000 643.08185 553.18868 747.58265\n 0.32500000 1200.9858 906.40441 1591.3062\n 0.50000000 1731.6262 1154.4175 2597.4393\n 0.66250000 2174.3252 1307.8648 3614.8157\n 0.82500000 2578.7616 1413.0201 4706.2399\n 1.0000000 2979.3460 1489.6730 5958.6920\n 1.1625000 3324.2002 1537.2024 7188.5829\n 1.3250000 3646.8432 1568.5347 8478.9104\n 1.5000000 3972.8407 1589.1363 9932.1017\n 1.6625000 4258.1131 1599.2913 11337.226\n 1.8250000 4528.5346 1603.0211 12793.110\n 2.0000000 4804.9314 1601.6438 14414.794\n 2.1625000 5049.2007 1596.5852 15968.097\n 2.3250000 5282.6693 1588.7727 17564.875\n 2.5000000 5523.0914 1578.0261 19330.820\n 2.6625000 5736.9813 1566.4113 21011.694\n 2.8250000 5942.5803 1553.6158 22730.370\n 3.0000000 6155.4289 1538.8572 24621.716\n 3.1625000 6345.6997 1524.4924 26413.975\n 3.3250000 6529.3655 1509.6799 28239.506\n 3.5000000 6720.2676 1493.3928 30241.204\n 3.6625000 6891.5474 1478.0799 32131.840\n 3.8250000 7057.4213 1462.6780 34052.058\n 4.0000000 7230.3723 1446.0745 36151.862\n 4.1625000 7385.9998 1430.7021 38130.224\n 4.3250000 7537.1112 1415.4199 40135.117\n 4.5000000 7695.0718 1399.1040 42322.895\n 4.6625000 7837.5510 1384.1150 44380.133\n \"\"\"\n\n icosmo_closed = \"\"\"\\\n # from icosmo (icosmo.org)\n # Om 2 w -1 h 0.7 Ol 0.1\n # z comoving_transvers_dist angular_diameter_dist luminosity_dist\n 0.0000000 0.0000000 0.0000000 0.0000000\n 0.16250000 601.80160 517.67879 699.59436\n 0.32500000 1057.9502 798.45297 1401.7840\n 0.50000000 1438.2161 958.81076 2157.3242\n 0.66250000 1718.6778 1033.7912 2857.3019\n 0.82500000 1948.2400 1067.5288 3555.5381\n 1.0000000 2152.7954 1076.3977 4305.5908\n 1.1625000 2312.3427 1069.2914 5000.4410\n 1.3250000 2448.9755 1053.3228 5693.8681\n 1.5000000 2575.6795 1030.2718 6439.1988\n 1.6625000 2677.9671 1005.8092 7130.0873\n 1.8250000 2768.1157 979.86398 7819.9270\n 2.0000000 2853.9222 951.30739 8561.7665\n 2.1625000 2924.8116 924.84161 9249.7167\n 2.3250000 2988.5333 898.80701 9936.8732\n 2.5000000 3050.3065 871.51614 10676.073\n 2.6625000 3102.1909 847.01459 11361.774\n 2.8250000 3149.5043 823.39982 12046.854\n 3.0000000 3195.9966 798.99915 12783.986\n 3.1625000 3235.5334 777.30533 13467.908\n 3.3250000 3271.9832 756.52790 14151.327\n 3.5000000 3308.1758 735.15017 14886.791\n 3.6625000 3339.2521 716.19347 15569.263\n 3.8250000 3368.1489 698.06195 16251.319\n 4.0000000 3397.0803 679.41605 16985.401\n 4.1625000 3422.1142 662.87926 17666.664\n 4.3250000 3445.5542 647.05243 18347.576\n 4.5000000 3469.1805 630.76008 19080.493\n 4.6625000 3489.7534 616.29199 19760.729\n \"\"\"\n\n @mark.parametrize('Om0, Ode0, data', (\n (0.3, 0.7, icosmo_flat), (0.3, 0.1, icosmo_open), (2, 0.1, icosmo_closed)\n ))\n def test_flat_open_closed_icosmo(self, Om0, Ode0, data):\n cosmo = self.cosmo_cls()\n cosmo.H0, cosmo.Om0, cosmo.Ode0 = H70, Om0, Ode0\n\n z, dm, da, dl = (tensor(_, dtype=torch.get_default_dtype())\n for _ in np.loadtxt(StringIO(data), unpack=True))\n\n assert close(cosmo.comoving_transverse_distance(z).to(Mpc).value, dm)\n assert close(cosmo.angular_diameter_distance(z).to(Mpc).value, da)\n assert close(cosmo.luminosity_distance(z).to(Mpc).value, dl)\n\n def test_distmod(self):\n cosmo = self.flat_cosmo_cls()\n cosmo.H0, cosmo.Om0 = H704, 0.272\n\n assert cosmo.hubble_distance.to(Mpc) == 4258.415596590909\n assert close(cosmo.distmod(tensor([1, 5])), [44.124857, 48.40167258])\n\n @with_default_double\n def test_negdistmod(self):\n cosmo = self.cosmo_cls()\n cosmo.H0, cosmo.Om0, cosmo.Ode0 = H70, 0.2, 1.3\n z = tensor([50, 100])\n assert close(cosmo.luminosity_distance(z).to(Mpc).value, [16612.44047622, -46890.79092244])\n assert close(cosmo.distmod(z), [46.102167189, 48.355437790944])\n\n def test_comoving_distance_z1z2(self):\n cosmo = self.cosmo_cls()\n cosmo.Om0, cosmo.Ode0 = 0.3, 0.8\n\n with pytest.raises(RuntimeError):\n cosmo.comoving_distance_z1z2(tensor((1, 2)), tensor((3, 4, 5)))\n\n assert cosmo.comoving_distance_z1z2(1, 2) == - cosmo.comoving_distance_z1z2(2, 1)\n assert close(\n cosmo.comoving_distance_z1z2(tensor([0, 0, 2, 0.5, 1]), tensor([2, 1, 1, 2.5, 1.1])).to(Mpc).value,\n [3767.90579253, 2386.25591391, -1381.64987862, 2893.11776663, 174.1524683]\n )\n\n @with_default_double\n @mark.parametrize('Om0, val', (\n # (0, 2997.92458), # TODO: cannot do Om0=0 with LambdaCDM, need special cosmology\n (1, 1756.1435599923348),\n ))\n def test_distance_in_special_cosmologies(self, Om0, val):\n cosmo = self.flat_cosmo_cls()\n cosmo.Om0 = Om0\n\n assert close(cosmo.comoving_distance(0).to(Mpc).value, 0)\n assert close(cosmo.comoving_distance(1).to(Mpc).value, val)\n\n @with_default_double\n def test_comoving_transverse_distance_z1z2(self):\n z1, z2 = tensor([0, 0, 2, 0.5, 1]), tensor([2, 1, 1, 2.5, 1.1])\n\n cosmo = self.flat_cosmo_cls()\n cosmo.Om0 = 0.3\n\n with pytest.raises(RuntimeError):\n cosmo.comoving_transverse_distance_z1z2(tensor((1, 2)), tensor((3, 4, 5)))\n\n assert close(cosmo.comoving_transverse_distance_z1z2(1, 2).to(Mpc).value, 1313.2232194828466)\n\n assert close(cosmo.comoving_distance_z1z2(z1, z2).to(Mpc).value,\n cosmo.comoving_transverse_distance_z1z2(z1, z2).to(Mpc).value)\n\n cosmo = self.flat_cosmo_cls()\n cosmo.Om0 = 1.5\n assert close(\n cosmo.comoving_transverse_distance_z1z2(z1, z2).to(Mpc).value,\n [2202.72682564, 1559.51679971, -643.21002593, 1408.36365679, 85.09286258]\n )\n assert close(cosmo.comoving_distance_z1z2(z1, z2).to(Mpc).value,\n cosmo.comoving_transverse_distance_z1z2(z1, z2).to(Mpc).value)\n\n cosmo = self.cosmo_cls()\n cosmo.Om0, cosmo.Ode0 = 0.3, 0.5\n assert close(\n cosmo.comoving_transverse_distance_z1z2(z1, z2).to(Mpc).value,\n [3535.931375645655, 2226.430046551708, -1208.6817970036532, 2595.567367601969, 151.36592003406884]\n )\n\n cosmo = self.cosmo_cls()\n cosmo.Om0, cosmo.Ode0 = 1, 0.2\n assert close(\n cosmo.comoving_transverse_distance_z1z2(0.1, tensor([0, 0.1, 0.2, 0.5, 1.1, 2])).to(Mpc).value,\n [-281.31602666724865, 0, 248.58093707820436, 843.9331377460543, 1618.6104987686672, 2287.5626543279927]\n )\n\n def test_angular_diameter_distance_z1z2(self):\n cosmo = self.flat_cosmo_cls()\n cosmo.H0, cosmo.Om0 = H704, 0.272\n\n with pytest.raises(RuntimeError):\n cosmo.angular_diameter_distance_z1z2(tensor((1, 2)), tensor((3, 4, 5)))\n\n assert close(cosmo.angular_diameter_distance_z1z2(1, 2).to(Mpc).value, 646.22968662822018)\n assert close(\n cosmo.angular_diameter_distance_z1z2(tensor([0, 0, 2, 0.5, 1]), tensor([2, 1, 1, 2.5, 1.1])).to(Mpc).value,\n [1760.0628637762106, 1670.7497657219858, -969.34452994, 1159.0970895962193, 115.72768186186921]\n )\n assert close(\n cosmo.angular_diameter_distance_z1z2(0.1, tensor([0.1, 0.2, 0.5, 1.1, 2])).to(Mpc).value,\n [0, 332.09893173, 986.35635069, 1508.37010062, 1621.07937976]\n )\n\n # Non-flat (positive Ok0) test\n cosmo = self.cosmo_cls()\n cosmo.H0, cosmo.Om0, cosmo.Ode0 = H704, 0.2, 0.5\n assert close(cosmo.angular_diameter_distance_z1z2(1, 2).to(Mpc).value, 620.1175337852428)\n\n # Non-flat (negative Ok0) test\n cosmo = self.cosmo_cls()\n cosmo.Om0, cosmo.Ode0 = 2, 1\n assert close(cosmo.angular_diameter_distance_z1z2(1, 2).to(Mpc).value, 228.42914659246014)\n\n def test_absorption_distance(self):\n cosmo = self.flat_cosmo_cls()\n cosmo.H0, cosmo.Om0 = H704, 0.272\n assert close(cosmo.absorption_distance(3), 7.98685853)\n assert close(cosmo.absorption_distance(tensor([1, 3])), [1.72576635, 7.98685853])\n\n\nclass BaseLambdaCDMRTest(BaseLambdaCDMDriverTest):\n flat_cosmo_cls: Type[phytorch.cosmology.special.FlatLambdaCDMR]\n cosmo_cls: Type[phytorch.cosmology.special.LambdaCDMR]\n\n @with_default_double\n def test_ogamma(self):\n z = tensor([1, 10, 500, 1000])\n\n for Neff, Tcmb0, vals in (\n # (3, 0, [1651.9, 858.2, 26.855, 13.642]), # cannot have Or0=0\n (3, 2.725, [1651.8, 857.9, 26.767, 13.582]),\n (3, 4, [1651.4, 856.6, 26.489, 13.405]),\n # (3.04, 0, [1651.91, 858.205, 26.8586, 13.6469]), # cannot have Or0=0\n (3.04, 2.725, [1651.76, 857.817, 26.7688, 13.5841]),\n (3.04, 4, [1651.21, 856.411, 26.4845, 13.4028]),\n ):\n cosmo = self.flat_cosmo_cls()\n cosmo.H0, cosmo.Om0, cosmo.Neff, cosmo.Tcmb0 = H70, 0.3, Neff, Tcmb0*kelvin\n\n assert close(cosmo.angular_diameter_distance(z).to(Mpc).value, vals, rtol=5e-4)\n\n # from astropy: Just to be really sure, we also do a version where the\n # integral is analytic, which is a Ode = 0 flat universe. In this case\n # Integrate(1/E(x),{x,0,z}) = 2 ( sqrt((1+Or z)/(1+z)) - 1 )/(Or - 1)\n # Recall that c/H0 * Integrate(1/E) is FLRW.comoving_distance.\n hubdis = (299792.458 / 70.0)\n Neff = 3.04\n for Tcmb0 in (2.725, 5):\n Ogamma0h2 = 4 * 5.670373e-8 / 299792458**3 * Tcmb0**4 / 1.87837e-26\n Onu0h2 = Ogamma0h2 * 7/8 * (4 / 11)**(4/3) * Neff\n Or0 = (Ogamma0h2 + Onu0h2) / 0.7**2\n vals = 2 * hubdis * (((1 + Or0*z) / (1+z))**0.5 - 1) / (Or0 - 1)\n\n cosmo = self.flat_cosmo_cls()\n cosmo.H0, cosmo.Neff, cosmo.Tcmb0, cosmo.Ode0 = H70, Neff, Tcmb0 * kelvin, 0\n\n assert close(cosmo.comoving_distance(z).to(Mpc).value, vals)\n\n\nclass TestAnalyticLambdaCDM(BaseLambdaCDMTest):\n flat_cosmo_cls = phytorch.cosmology.drivers.analytic.FlatLambdaCDM\n cosmo_cls = phytorch.cosmology.drivers.analytic.LambdaCDM\n\n\nclass TestAnalyticCDMR(BaseLambdaCDMRTest):\n flat_cosmo_cls = phytorch.cosmology.drivers.analytic.FlatLambdaCDMR\n cosmo_cls = phytorch.cosmology.drivers.analytic.LambdaCDMR\n\n\nclass TestAnalyticDiffLambdaCDM(BaseLambdaCDMTest):\n flat_cosmo_cls = phytorch.cosmology.drivers.analytic_diff.FlatLambdaCDM\n cosmo_cls = phytorch.cosmology.drivers.analytic_diff.LambdaCDM\n\n\nclass TestAnalyticDiffCDMR(BaseLambdaCDMRTest):\n flat_cosmo_cls = phytorch.cosmology.drivers.analytic_diff.FlatLambdaCDMR\n cosmo_cls = phytorch.cosmology.drivers.analytic_diff.LambdaCDMR\n\n\n# TODO: (age...) test_age\n# TODO: (age...) test_age_in_special_cosmologies\n# TODO: (neutrinos, weird models...) test_distances\n"
] | [
[
"torch.zeros",
"torch.ones",
"torch.tensor",
"torch.get_default_dtype"
]
] |
Ankuraxz/gan | [
"b956c7d571539fd1053b3df3dddddbcbd27be65c"
] | [
"tensorflow_gan/examples/progressive_gan/networks_test.py"
] | [
"# coding=utf-8\n# Copyright 2020 The TensorFlow GAN Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# python2 python3\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\nimport tensorflow as tf\n\nfrom tensorflow_gan.examples.progressive_gan import layers\nfrom tensorflow_gan.examples.progressive_gan import networks\n\n\ndef _get_grad_norm(ys, xs):\n \"\"\"Compute 2-norm of dys / dxs.\"\"\"\n return tf.sqrt(\n tf.add_n([\n tf.reduce_sum(input_tensor=tf.square(g))\n for g in tf.gradients(ys=ys, xs=xs)\n ]))\n\n\ndef _num_filters_stub(block_id):\n return networks.num_filters(block_id, 8, 1, 8)\n\n\nclass NetworksTest(tf.test.TestCase):\n\n def test_resolution_schedule_correct(self):\n rs = networks.ResolutionSchedule(\n start_resolutions=[5, 3], scale_base=2, num_resolutions=3)\n self.assertEqual(rs.start_resolutions, (5, 3))\n self.assertEqual(rs.scale_base, 2)\n self.assertEqual(rs.num_resolutions, 3)\n self.assertEqual(rs.final_resolutions, (20, 12))\n self.assertEqual(rs.scale_factor(1), 4)\n self.assertEqual(rs.scale_factor(2), 2)\n self.assertEqual(rs.scale_factor(3), 1)\n with self.assertRaises(ValueError):\n rs.scale_factor(0)\n with self.assertRaises(ValueError):\n rs.scale_factor(4)\n\n def test_block_name(self):\n self.assertEqual(networks.block_name(10), 'progressive_gan_block_10')\n\n def test_min_total_num_images(self):\n self.assertEqual(networks.min_total_num_images(7, 8, 4), 52)\n\n def test_compute_progress(self):\n if tf.executing_eagerly():\n progress_output = []\n for current_image_id in [0, 3, 6, 7, 8, 10, 15, 29, 100]:\n progress = networks.compute_progress(\n current_image_id,\n stable_stage_num_images=7,\n transition_stage_num_images=8,\n num_blocks=2)\n with self.cached_session(use_gpu=True) as sess:\n progress_output.append(sess.run(progress))\n else:\n current_image_id_ph = tf.compat.v1.placeholder(tf.int32, [])\n progress = networks.compute_progress(\n current_image_id_ph,\n stable_stage_num_images=7,\n transition_stage_num_images=8,\n num_blocks=2)\n with self.cached_session(use_gpu=True) as sess:\n progress_output = [\n sess.run(progress, feed_dict={current_image_id_ph: cur_image_id})\n for cur_image_id in [0, 3, 6, 7, 8, 10, 15, 29, 100]\n ]\n\n self.assertArrayNear(progress_output,\n [0.0, 0.0, 0.0, 0.0, 0.125, 0.375, 1.0, 1.0, 1.0],\n 1.0e-6)\n\n def test_generator_alpha(self):\n with self.cached_session(use_gpu=True) as sess:\n alpha_fixed_block_id = [\n sess.run(\n networks._generator_alpha(2, tf.constant(progress, tf.float32)))\n for progress in [0, 0.2, 1, 1.2, 2, 2.2, 3]\n ]\n alpha_fixed_progress = [\n sess.run(\n networks._generator_alpha(block_id, tf.constant(1.2, tf.float32)))\n for block_id in range(1, 5)\n ]\n\n self.assertArrayNear(alpha_fixed_block_id, [0, 0.2, 1, 0.8, 0, 0, 0],\n 1.0e-6)\n self.assertArrayNear(alpha_fixed_progress, [0, 0.8, 0.2, 0], 1.0e-6)\n\n def test_discriminator_alpha(self):\n with self.cached_session(use_gpu=True) as sess:\n alpha_fixed_block_id = [sess.run(networks._discriminator_alpha(\n 2, tf.constant(progress, tf.float32))) for progress in\n [0, 0.2, 1, 1.2, 2, 2.2, 3]]\n alpha_fixed_progress = [sess.run(networks._discriminator_alpha(\n block_id, tf.constant(1.2, tf.float32))) for block_id in range(1, 5)]\n\n self.assertArrayNear(alpha_fixed_block_id, [1, 1, 1, 0.8, 0, 0, 0], 1.0e-6)\n self.assertArrayNear(alpha_fixed_progress, [0, 0.8, 1, 1], 1.0e-6)\n\n def test_blend_images_in_stable_stage(self):\n x_np = np.random.normal(size=[2, 8, 8, 3])\n x = tf.constant(x_np, tf.float32)\n x_blend = networks.blend_images(\n x,\n progress=tf.constant(0.0),\n resolution_schedule=networks.ResolutionSchedule(\n scale_base=2, num_resolutions=2),\n num_blocks=2)\n with self.cached_session(use_gpu=True) as sess:\n x_blend_np = sess.run(x_blend)\n x_blend_expected_np = sess.run(layers.upscale(layers.downscale(x, 2), 2))\n self.assertNDArrayNear(x_blend_np, x_blend_expected_np, 1.0e-6)\n\n def test_blend_images_in_transition_stage(self):\n x_np = np.random.normal(size=[2, 8, 8, 3])\n x = tf.constant(x_np, tf.float32)\n x_blend = networks.blend_images(\n x,\n tf.constant(0.2),\n resolution_schedule=networks.ResolutionSchedule(\n scale_base=2, num_resolutions=2),\n num_blocks=2)\n with self.cached_session(use_gpu=True) as sess:\n x_blend_np = sess.run(x_blend)\n x_blend_expected_np = 0.8 * sess.run(\n layers.upscale(layers.downscale(x, 2), 2)) + 0.2 * x_np\n self.assertNDArrayNear(x_blend_np, x_blend_expected_np, 1.0e-6)\n\n def test_num_filters(self):\n self.assertEqual(networks.num_filters(1, 4096, 1, 256), 256)\n self.assertEqual(networks.num_filters(5, 4096, 1, 256), 128)\n\n def test_generator_grad_norm_progress(self):\n if tf.executing_eagerly():\n # tf.placeholder() is not compatible with eager execution.\n return\n stable_stage_num_images = 2\n transition_stage_num_images = 3\n\n current_image_id_ph = tf.compat.v1.placeholder(tf.int32, [])\n progress = networks.compute_progress(\n current_image_id_ph,\n stable_stage_num_images,\n transition_stage_num_images,\n num_blocks=3)\n z = tf.random.normal([2, 10], dtype=tf.float32)\n x, _ = networks.generator(\n z, progress, _num_filters_stub,\n networks.ResolutionSchedule(\n start_resolutions=(4, 4), scale_base=2, num_resolutions=3))\n fake_loss = tf.reduce_sum(input_tensor=tf.square(x))\n grad_norms = [\n _get_grad_norm(\n fake_loss,\n tf.compat.v1.trainable_variables('.*/progressive_gan_block_1/.*')),\n _get_grad_norm(\n fake_loss,\n tf.compat.v1.trainable_variables('.*/progressive_gan_block_2/.*')),\n _get_grad_norm(\n fake_loss,\n tf.compat.v1.trainable_variables('.*/progressive_gan_block_3/.*'))\n ]\n\n grad_norms_output = None\n with self.cached_session(use_gpu=True) as sess:\n sess.run(tf.compat.v1.global_variables_initializer())\n x1_np = sess.run(x, feed_dict={current_image_id_ph: 0.12})\n x2_np = sess.run(x, feed_dict={current_image_id_ph: 1.8})\n grad_norms_output = np.array([\n sess.run(grad_norms, feed_dict={current_image_id_ph: i})\n for i in range(15) # total num of images\n ])\n\n self.assertEqual((2, 16, 16, 3), x1_np.shape)\n self.assertEqual((2, 16, 16, 3), x2_np.shape)\n # The gradient of block_1 is always on.\n self.assertEqual(\n np.argmax(grad_norms_output[:, 0] > 0), 0,\n 'gradient norms {} for block 1 is not always on'.format(\n grad_norms_output[:, 0]))\n # The gradient of block_2 is on after 1 stable stage.\n self.assertEqual(\n np.argmax(grad_norms_output[:, 1] > 0), 3,\n 'gradient norms {} for block 2 is not on at step 3'.format(\n grad_norms_output[:, 1]))\n # The gradient of block_3 is on after 2 stable stage + 1 transition stage.\n self.assertEqual(\n np.argmax(grad_norms_output[:, 2] > 0), 8,\n 'gradient norms {} for block 3 is not on at step 8'.format(\n grad_norms_output[:, 2]))\n\n def test_discriminator_grad_norm_progress(self):\n if tf.executing_eagerly():\n # tf.placeholder() is not compatible with eager execution.\n return\n stable_stage_num_images = 2\n transition_stage_num_images = 3\n\n current_image_id_ph = tf.compat.v1.placeholder(tf.int32, [])\n progress = networks.compute_progress(\n current_image_id_ph,\n stable_stage_num_images,\n transition_stage_num_images,\n num_blocks=3)\n x = tf.random.normal([2, 16, 16, 3])\n logits, _ = networks.discriminator(\n x, progress, _num_filters_stub,\n networks.ResolutionSchedule(\n start_resolutions=(4, 4), scale_base=2, num_resolutions=3))\n fake_loss = tf.reduce_sum(input_tensor=tf.square(logits))\n grad_norms = [\n _get_grad_norm(\n fake_loss,\n tf.compat.v1.trainable_variables('.*/progressive_gan_block_1/.*')),\n _get_grad_norm(\n fake_loss,\n tf.compat.v1.trainable_variables('.*/progressive_gan_block_2/.*')),\n _get_grad_norm(\n fake_loss,\n tf.compat.v1.trainable_variables('.*/progressive_gan_block_3/.*'))\n ]\n\n grad_norms_output = None\n with self.cached_session(use_gpu=True) as sess:\n sess.run(tf.compat.v1.global_variables_initializer())\n grad_norms_output = np.array([\n sess.run(grad_norms, feed_dict={current_image_id_ph: i})\n for i in range(15) # total num of images\n ])\n\n # The gradient of block_1 is always on.\n self.assertEqual(\n np.argmax(grad_norms_output[:, 0] > 0), 0,\n 'gradient norms {} for block 1 is not always on'.format(\n grad_norms_output[:, 0]))\n # The gradient of block_2 is on after 1 stable stage.\n self.assertEqual(\n np.argmax(grad_norms_output[:, 1] > 0), 3,\n 'gradient norms {} for block 2 is not on at step 3'.format(\n grad_norms_output[:, 1]))\n # The gradient of block_3 is on after 2 stable stage + 1 transition stage.\n self.assertEqual(\n np.argmax(grad_norms_output[:, 2] > 0), 8,\n 'gradient norms {} for block 3 is not on at step 8'.format(\n grad_norms_output[:, 2]))\n\n\nif __name__ == '__main__':\n tf.test.main()\n"
] | [
[
"tensorflow.compat.v1.placeholder",
"numpy.argmax",
"tensorflow.compat.v1.trainable_variables",
"tensorflow.gradients",
"tensorflow.compat.v1.global_variables_initializer",
"tensorflow.square",
"tensorflow.random.normal",
"numpy.random.normal",
"tensorflow.executing_eagerly",
"tensorflow.constant",
"tensorflow.test.main"
]
] |
adrienxu/SATE | [
"a932859287b2d3a944f7b0ae6670c84c98db7965"
] | [
"examples/speech_to_text/prep_covost_data.py"
] | [
"#!/usr/bin/env python3\n# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport argparse\nimport logging\nfrom pathlib import Path\nimport shutil\nfrom tempfile import NamedTemporaryFile\nfrom typing import Optional, Tuple\nimport string\n\nimport pandas as pd\nimport torchaudio\nfrom examples.speech_to_text.data_utils import (\n create_zip,\n extract_fbank_features,\n filter_manifest_df,\n gen_config_yaml,\n gen_vocab,\n get_zip_manifest,\n load_df_from_tsv,\n save_df_to_tsv,\n)\nfrom torch import Tensor\nfrom torch.utils.data import Dataset\nfrom torchaudio.datasets.utils import download_url, extract_archive\nfrom tqdm import tqdm\n\n\nlog = logging.getLogger(__name__)\n\n\nMANIFEST_COLUMNS = [\"id\", \"audio\", \"n_frames\", \"tgt_text\", \"speaker\"]\n\n\nclass CoVoST(Dataset):\n \"\"\"Create a Dataset for CoVoST (https://github.com/facebookresearch/covost).\n\n Args:\n root (str): root path to the dataset and generated manifests/features\n source_language (str): source (audio) language\n target_language (str, optional): target (text) language,\n None for no translation (default: None)\n version (int, optional): CoVoST version. (default: 2)\n download (bool, optional): Whether to download the dataset if it is not\n found at root path. (default: ``False``).\n \"\"\"\n\n COVOST_URL_TEMPLATE = (\n \"https://dl.fbaipublicfiles.com/covost/\"\n \"covost_v2.{src_lang}_{tgt_lang}.tsv.tar.gz\"\n )\n\n VERSIONS = {2}\n # SPLITS = [\"train\", \"dev\", \"test\"]\n SPLITS = [\"train\"]\n\n XX_EN_LANGUAGES = {\n 1: [\"fr\", \"de\", \"nl\", \"ru\", \"es\", \"it\", \"tr\", \"fa\", \"sv-SE\", \"mn\", \"zh-CN\"],\n 2: [\n \"fr\",\n \"de\",\n \"es\",\n \"ca\",\n \"it\",\n \"ru\",\n \"zh-CN\",\n \"pt\",\n \"fa\",\n \"et\",\n \"mn\",\n \"nl\",\n \"tr\",\n \"ar\",\n \"sv-SE\",\n \"lv\",\n \"sl\",\n \"ta\",\n \"ja\",\n \"id\",\n \"cy\",\n ],\n }\n EN_XX_LANGUAGES = {\n 1: [],\n 2: [\n \"de\",\n \"tr\",\n \"fa\",\n \"sv-SE\",\n \"mn\",\n \"zh-CN\",\n \"cy\",\n \"ca\",\n \"sl\",\n \"et\",\n \"id\",\n \"ar\",\n \"ta\",\n \"lv\",\n \"ja\",\n ],\n }\n\n def __init__(\n self,\n root: str,\n split: str,\n source_language: str,\n target_language: Optional[str] = None,\n version: int = 2,\n ) -> None:\n assert version in self.VERSIONS and split in self.SPLITS\n assert source_language is not None\n self.no_translation = target_language is None\n if not self.no_translation:\n assert \"en\" in {source_language, target_language}\n if source_language == \"en\":\n assert target_language in self.EN_XX_LANGUAGES[version]\n else:\n assert source_language in self.XX_EN_LANGUAGES[version]\n else:\n # Hack here so that we can get \"split\" column from CoVoST TSV.\n # Note that we use CoVoST train split for ASR which is an extension\n # to Common Voice train split.\n target_language = \"de\" if source_language == \"en\" else \"en\"\n\n self.root: Path = Path(root)\n\n cv_tsv_path = self.root / \"validated.tsv\"\n assert cv_tsv_path.is_file()\n cv_tsv = load_df_from_tsv(cv_tsv_path)\n\n if self.no_translation:\n print(\"No target translation.\")\n df = cv_tsv[[\"path\", \"sentence\", \"client_id\"]]\n df = df.set_index([\"path\"], drop=False)\n else:\n covost_url = self.COVOST_URL_TEMPLATE.format(\n src_lang=source_language, tgt_lang=target_language\n )\n covost_archive = self.root / Path(covost_url).name\n if not covost_archive.is_file():\n download_url(covost_url, self.root.as_posix(), hash_value=None)\n extract_archive(covost_archive.as_posix())\n\n covost_tsv = load_df_from_tsv(\n self.root / Path(covost_url).name.replace(\".tar.gz\", \"\")\n )\n df = pd.merge(\n left=cv_tsv[[\"path\", \"sentence\", \"client_id\"]],\n right=covost_tsv[[\"path\", \"translation\", \"split\"]],\n how=\"inner\",\n on=\"path\",\n )\n if split == \"train\":\n df = df[(df[\"split\"] == split) | (df[\"split\"] == f\"{split}_covost\")]\n else:\n df = df[df[\"split\"] == split]\n\n data = df.to_dict(orient=\"index\").items()\n data = [v for k, v in sorted(data, key=lambda x: x[0])]\n self.data = []\n for e in data:\n try:\n path = self.root / \"wav\" / e[\"path\"]\n _ = torchaudio.info(path.as_posix())\n self.data.append(e)\n except RuntimeError:\n pass\n\n def __getitem__(\n self, n: int\n ) -> Tuple[Path, int, int, str, str, str, str]:\n \"\"\"Load the n-th sample from the dataset.\n\n Args:\n n (int): The index of the sample to be loaded\n\n Returns:\n tuple: ``(wav_path, sample_rate, n_frames, sentence, translation, speaker_id,\n sample_id)``\n \"\"\"\n data = self.data[n]\n path = self.root / \"wav\" / data[\"path\"]\n info = torchaudio.info(path)\n sample_rate = info.sample_rate\n n_frames = info.num_frames\n sentence = data[\"sentence\"]\n translation = None if self.no_translation else data[\"translation\"]\n speaker_id = data[\"client_id\"]\n _id = data[\"path\"].replace(\".mp3\", \"\")\n return path, sample_rate, n_frames, sentence, translation, speaker_id, _id\n\n def __len__(self) -> int:\n return len(self.data)\n\n\ndef process(args):\n root = Path(args.data_root).absolute() / args.src_lang\n output_root = Path(args.output_root).absolute()\n if args.tgt_lang is not None:\n output_root = output_root / f\"{args.src_lang}-{args.tgt_lang}\"\n else:\n output_root = output_root / f\"{args.src_lang}\"\n if not root.is_dir():\n raise NotADirectoryError(f\"{root} does not exist\")\n\n zip_path = output_root / \"fbank80.zip\"\n if not zip_path.exists():\n # Extract features\n feature_root = output_root / \"fbank80\"\n feature_root.mkdir(exist_ok=True)\n\n for split in CoVoST.SPLITS:\n print(f\"Fetching split {split}...\")\n dataset = CoVoST(root, split, args.src_lang, args.tgt_lang)\n print(\"Extracting log mel filter bank features...\")\n for wav_path, sample_rate, _, _, _, _, utt_id in tqdm(dataset):\n waveform, sample_rate = torchaudio.load(wav_path)\n extract_fbank_features(\n waveform, sample_rate, feature_root / f\"{utt_id}.npy\"\n )\n # Pack features into ZIP\n print(\"ZIPing features...\")\n create_zip(feature_root, zip_path)\n\n # # Clean up\n # shutil.rmtree(feature_root)\n\n print(\"Fetching ZIP manifest...\")\n zip_manifest = get_zip_manifest(zip_path)\n # Generate TSV manifest\n print(\"Generating manifest...\")\n train_text = []\n task = args.task\n # if args.tgt_lang is not None:\n # task = f\"st_{args.src_lang}_{args.tgt_lang}\"\n for split in CoVoST.SPLITS:\n manifest = {c: [] for c in MANIFEST_COLUMNS}\n if args.task == \"st\" and args.add_src:\n manifest[\"src_text\"] = []\n dataset = CoVoST(root, split, args.src_lang, args.tgt_lang)\n for _, sr, n_frames, src_utt, tgt_utt, speaker_id, utt_id in tqdm(dataset):\n manifest[\"id\"].append(utt_id)\n manifest[\"audio\"].append(zip_manifest[utt_id])\n duration_ms = int(n_frames / sr * 1000)\n manifest[\"n_frames\"].append(int(1 + (duration_ms - 25) / 10))\n if args.lowercase_src:\n src_utt = src_utt.lower()\n if args.rm_punc_src:\n for w in string.punctuation:\n src_utt = src_utt.replace(w, \"\")\n src_utt = src_utt.replace(\" \", \"\")\n manifest[\"tgt_text\"].append(src_utt if args.tgt_lang is None else tgt_utt)\n if args.task == \"st\" and args.add_src:\n manifest[\"src_text\"].append(src_utt)\n manifest[\"speaker\"].append(speaker_id)\n is_train_split = split.startswith(\"train\")\n if is_train_split:\n if args.task == \"st\" and args.add_src and args.share:\n train_text.extend(manifest[\"src_text\"])\n train_text.extend(manifest[\"tgt_text\"])\n df = pd.DataFrame.from_dict(manifest)\n df = filter_manifest_df(df, is_train_split=is_train_split)\n save_df_to_tsv(df, output_root / f\"{split}_{task}.tsv\")\n\n # Generate vocab\n v_size_str = \"\" if args.vocab_type == \"char\" else str(args.vocab_size)\n spm_filename_prefix = f\"spm_{args.vocab_type}{v_size_str}_{task}\"\n asr_spm_filename = None\n gen_vocab_flag = True\n\n if args.task == \"st\" and args.add_src:\n if args.share:\n if args.st_spm_prefix is not None:\n gen_vocab_flag = False\n spm_filename_prefix = args.st_spm_prefix\n else:\n spm_filename_prefix = f\"spm_{args.vocab_type}{v_size_str}_{args.task}_share\"\n asr_spm_filename = spm_filename_prefix + \".model\"\n else:\n if args.st_spm_prefix is not None:\n gen_vocab_flag = False\n spm_filename_prefix = args.st_spm_prefix\n assert args.asr_prefix is not None\n asr_spm_filename = args.asr_prefix + \".model\"\n elif args.task == \"asr\":\n if args.asr_prefix is not None:\n gen_vocab_flag = False\n spm_filename_prefix = args.asr_prefix\n\n if gen_vocab_flag:\n with NamedTemporaryFile(mode=\"w\") as f:\n for t in train_text:\n f.write(t + \"\\n\")\n gen_vocab(\n Path(f.name),\n output_root / spm_filename_prefix,\n args.vocab_type,\n args.vocab_size\n )\n # Generate config YAML\n gen_config_yaml(\n output_root,\n spm_filename_prefix + \".model\",\n yaml_filename=f\"config_{task}.yaml\",\n specaugment_policy=\"lb\",\n cmvn_type=args.cmvn_type,\n asr_spm_filename=asr_spm_filename,\n share_src_and_tgt=True if args.task == \"asr\" else False\n )\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"--data-root\", \"-d\", required=True, type=str,\n help=\"data root with sub-folders for each language <root>/<src_lang>\"\n )\n parser.add_argument(\n \"--output-root\", \"-o\", required=True, type=str,\n help=\"output root to save the results\"\n )\n parser.add_argument(\n \"--vocab-type\",\n default=\"unigram\",\n required=True,\n type=str,\n choices=[\"bpe\", \"unigram\", \"char\"],\n ),\n parser.add_argument(\"--vocab-size\", default=1000, type=int)\n parser.add_argument(\"--src-lang\", \"-s\", required=True, type=str)\n parser.add_argument(\"--task\", type=str, default=\"asr\", choices=[\"asr\", \"st\"])\n parser.add_argument(\"--tgt-lang\", \"-t\", type=str)\n parser.add_argument(\"--share\", action=\"store_true\",\n help=\"share the tokenizer and dictionary of the transcription and translation\")\n parser.add_argument(\"--add-src\", action=\"store_true\", help=\"add the src text for st task\")\n parser.add_argument(\"--asr-prefix\", type=str, help=\"prefix of the asr dict\")\n parser.add_argument(\"--st-spm-prefix\", type=str, default=None, help=\"prefix of the existing st dict\")\n parser.add_argument(\"--lowercase-src\", action=\"store_true\", help=\"lowercase the source text\")\n parser.add_argument(\"--rm-punc-src\", action=\"store_true\", help=\"remove the punctuation of the source text\")\n parser.add_argument(\"--cmvn-type\", default=\"utterance\",\n choices=[\"global\", \"utterance\"],\n help=\"The type of cepstral mean and variance normalization\")\n args = parser.parse_args()\n\n process(args)\n\n\nif __name__ == \"__main__\":\n main()\n"
] | [
[
"pandas.merge",
"pandas.DataFrame.from_dict"
]
] |
bderembl/mitgcm_configs | [
"8aa0343fc56e9da831e7a8b857838c4f4a76aa9a"
] | [
"corner/input/plot_field.py"
] | [
"#!/usr/bin/env python\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport scipy.io.netcdf as netcdf\n\nplt.ion()\n\nflag_mov = 0\nflag_traj = 0\n\ndir0 = '../run/'\n\nfile1 = 'diags.0000000000.t001.nc'\nfile2 = 'grid.t001.nc'\n\nf1 = netcdf.netcdf_file(dir0 + file1)\nf2 = netcdf.netcdf_file(dir0 + file2)\n\n\nx = f2.variables['X'][:].copy()\ny = f2.variables['Y'][:].copy()\n\nxp1 = f2.variables['Xp1'][:].copy()\nyp1 = f2.variables['Yp1'][:].copy()\nT = f1.variables['T'][:].copy()\n\n\nsi_x = len(x)\nsi_y = len(y)\nsi_t = len(T)\n\nh_mit = f2.variables['Depth'][:,:].copy()\n\nvort = f1.variables['momVort3'][0,:,:].copy()\n\nvmin = np.min(vort)\nvmax = -vmin\nvcont = np.linspace(vmin,vmax,20)\n\n\nxunit = 1000.0 # 1:m -- 1000:km\n\nposxy = np.zeros((2,si_t),dtype='int')\n\nif flag_traj == 1:\n for nt in range(0,si_t):\n vort = f1.variables['momVort3'][nt,:,:].copy()\n posxy[0,nt],posxy[1,nt] = np.unravel_index(np.argmin(vort),vort.shape)\n \n\nplt.figure()\n\nif flag_mov == -1:\n nt = 0\n mytime = [49]\n vort = f1.variables['momVort3'][mytime[nt],:,:].copy()\n plt.contour(xp1[:si_x/2]/xunit,yp1/xunit,vort[:,:si_x/2],vcont,colors='k')\n plt.title('Day ' + str(mytime[nt]+1))\n plt.xlabel('x (km)')\n plt.ylabel('y (km)')\n myci = \"CI: {:.1e}\".format(vcont[1]-vcont[0])\n plt.text(x[120]/xunit,y[5]/xunit,myci)\n\n if flag_traj:\n plt.plot(xp1[posxy[1,:mytime[nt]]]/xunit,yp1[posxy[0,:mytime[nt]]]/xunit,'b')\n plt.plot(xp1[posxy[1,mytime[nt]:]]/xunit,yp1[posxy[0,mytime[nt]:]]/xunit,'b--')\n\nelif flag_mov == 0:\n mytime = [0,9,19,29]\n\n for nt in range(0,len(mytime)):\n plt.subplot(2,2,nt+1, aspect='equal')\n vort = f1.variables['momVort3'][mytime[nt],:,:].copy()\n plt.contour(xp1/xunit,yp1/xunit,vort.squeeze(),vcont,colors='k')\n plt.contourf(x/xunit,y/xunit,h_mit,[-10,0],colors='0.5')\n plt.title('Day ' + str(mytime[nt]+1))\n if nt == 2 or nt == 3:\n plt.xlabel('x (km)')\n if nt == 0 or nt == 2: \n plt.ylabel('y (km)')\n myci = \"CI: {:.1e}\".format(vcont[1]-vcont[0])\n plt.text(x[-170]/xunit,y[5]/xunit,myci)\n\n plt.savefig('corner_10mit.eps')\n\nelif flag_mov == 1:\n\n vort = f1.variables['momVort3'][:,:,:].copy()\n\n vmin = np.min(vort)\n vmax = -vmin\n\n vcont = np.linspace(vmin,vmax,20)\n\n for nt in range(0,si_t):\n vort = f1.variables['momVort3'][nt,:,:].copy()\n vort = vort.squeeze()\n vort[0,0] = vmin\n vort[0,1] = vmax\n plt.contourf(xp1/xunit,yp1/xunit,vort,vcont,cmap = plt.cm.bwr)\n plt.contourf(x/xunit,y/xunit,h_mit,[-10,0],colors='0.5')\n ext = '0'\n if nt > 9:\n ext = ''\n plt.savefig('movie/ewall_'+ ext + str(nt) + 'mit.png') \n plt.clf()\n\nf1.close()\nf2.close()\n"
] | [
[
"matplotlib.pyplot.plot",
"numpy.zeros",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.savefig",
"numpy.argmin",
"matplotlib.pyplot.clf",
"matplotlib.pyplot.contour",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.ylabel",
"numpy.min",
"matplotlib.pyplot.text",
"matplotlib.pyplot.ion",
"matplotlib.pyplot.contourf",
"numpy.linspace",
"matplotlib.pyplot.xlabel",
"scipy.io.netcdf.netcdf_file"
]
] |
kzeiler/modflow6 | [
"a185d95b91985e965f8a04ae353305dff19b9637"
] | [
"autotest/test_gwf_maw04.py"
] | [
"import os\nimport pytest\nimport sys\nimport numpy as np\n\ntry:\n import pymake\nexcept:\n msg = \"Error. Pymake package is not available.\\n\"\n msg += \"Try installing using the following command:\\n\"\n msg += \" pip install https://github.com/modflowpy/pymake/zipball/master\"\n raise Exception(msg)\n\ntry:\n import flopy\nexcept:\n msg = \"Error. FloPy package is not available.\\n\"\n msg += \"Try installing using the following command:\\n\"\n msg += \" pip install flopy\"\n raise Exception(msg)\n\nfrom framework import testing_framework, running_on_CI\nfrom simulation import Simulation\n\nex = [\n \"maw_iss305a\",\n \"maw_iss305b\",\n \"maw_iss305c\",\n \"maw_iss305d\",\n \"maw_iss305e\",\n \"maw_iss305f\",\n]\nexdirs = []\nfor s in ex:\n exdirs.append(os.path.join(\"temp\", s))\nddir = \"data\"\ncmppth = \"mf2005\"\n\npaktest = \"maw\"\n\nrequire_failure = [True for i in range(len(exdirs))]\nrequire_failure[0] = False\n\n# set travis to True when version 1.13.0 is released\ncontinuous_integration = [True for n in ex]\n\n# set replace_exe to None to use default executable\nreplace_exe = None\n\n# temporal discretization\nnper = 2\nperlen = [0.0, 365.0]\nnstp = [1, 25]\ntsmult = [1.0, 1.1]\nsteady = [True, False]\n\n# spatial discretization\nnlay, nrow, ncol = 2, 101, 101\nshape3d = (nlay, nrow, ncol)\nsize3d = nlay * nrow * ncol\n\nxlen = 1000.0\ncommon_ratio = 1.01\nnhalf = int(0.5 * ncol) + 1\nfirst_term = 0.5 * xlen / ((1 - common_ratio**nhalf) / (1 - common_ratio))\ndelr = np.zeros((ncol), dtype=float)\nfor n in range(nhalf):\n if n == 0:\n v = first_term\n else:\n v = first_term * common_ratio**n\n delr[nhalf + n - 1] = v\ndelr[: nhalf - 1] = delr[-1 : nhalf - 1 : -1]\n\n# add error to edge cells\nerr = xlen - delr.sum()\ndelr[0] += 0.5 * err\ndelr[-1] += 0.5 * err\n\ntop = 0.0\nbotm = [-175, -350.0]\nstrt = 0.0\n\n# hydraulic data\nhk = 1.0\nss = 1e-5\nconfined = 0\n\nchd_spd = []\nchd5_spd = []\nfor i in range(nrow):\n if i == 0 or i == ncol - 1:\n for j in range(ncol):\n chd_spd.append([(0, i, j), strt])\n chd5_spd.append([0, i, j, strt, strt])\n else:\n chd_spd.append([(0, i, 0), strt])\n chd_spd.append([(0, i, ncol - 1), strt])\n chd5_spd.append([0, i, 0, strt, strt])\n chd5_spd.append([0, i, ncol - 1, strt, strt])\n\n# maw data\nradius0 = np.sqrt(delr[nhalf] * delr[nhalf] / (8.0 * np.pi))\nradius = 0.25\nsradius0 = radius + 0.1\nwellq = -100.0\nskin_mult = [0.1, 10.0, 1.0, 0.0, -1.0, 100.0]\ncondeqn = [\"CUMULATIVE\", \"SKIN\", \"SKIN\", \"SKIN\", \"SPECIFIED\", \"CUMULATIVE\"]\nsradius = [sradius0, sradius0, sradius0, sradius0, sradius0, radius0 * 1.5]\n\ntdis_rc = []\nfor idx in range(nper):\n tdis_rc.append((perlen[idx], nstp[idx], tsmult[idx]))\n\nhclose, rclose = 1e-9, 1e-6\n\n\ndef build_model(idx, dir):\n name = ex[idx]\n ws = dir\n\n # build MODFLOW 6 files\n sim = flopy.mf6.MFSimulation(\n sim_name=name, version=\"mf6\", exe_name=\"mf6\", sim_ws=ws\n )\n # create tdis package\n tdis = flopy.mf6.ModflowTdis(\n sim, time_units=\"DAYS\", nper=nper, perioddata=tdis_rc\n )\n\n # create iterative model solution\n ims = flopy.mf6.ModflowIms(\n sim, inner_dvclose=hclose, rcloserecord=rclose, outer_dvclose=hclose\n )\n\n # create gwf model\n gwf = flopy.mf6.ModflowGwf(sim, modelname=name, save_flows=True)\n\n # discretization\n dis = flopy.mf6.ModflowGwfdis(\n gwf,\n nlay=nlay,\n nrow=nrow,\n ncol=ncol,\n delr=delr,\n delc=delr,\n top=top,\n botm=botm,\n )\n # initial conditions\n ic = flopy.mf6.ModflowGwfic(gwf, strt=strt)\n\n # node property flow\n npf = flopy.mf6.ModflowGwfnpf(\n gwf, save_flows=False, icelltype=confined, k=hk\n )\n # storage\n sto = flopy.mf6.ModflowGwfsto(\n gwf,\n save_flows=False,\n iconvert=confined,\n ss=ss,\n steady_state={0: True},\n transient={1: True},\n )\n # constant head\n chd = flopy.mf6.ModflowGwfchd(\n gwf, stress_period_data=chd_spd, save_flows=False\n )\n # multi-aquifer well\n hks = hk * skin_mult[idx]\n mpd = [[0, radius, botm[-1], strt, condeqn[idx], 2]]\n mcd = [\n [0, 0, (0, nhalf, nhalf), top, botm[0], hks, sradius[idx]],\n [0, 1, (1, nhalf, nhalf), botm[0], botm[1], hks, sradius[idx]],\n ]\n perioddata = {1: [[0, \"RATE\", wellq]]}\n maw = flopy.mf6.ModflowGwfmaw(\n gwf,\n print_input=True,\n no_well_storage=True,\n packagedata=mpd,\n connectiondata=mcd,\n perioddata=perioddata,\n )\n # output control\n oc = flopy.mf6.ModflowGwfoc(\n gwf,\n budget_filerecord=\"{}.cbc\".format(name),\n head_filerecord=\"{}.hds\".format(name),\n saverecord=[(\"HEAD\", \"ALL\"), (\"BUDGET\", \"ALL\")],\n )\n # build MODFLOW-2005 files\n if require_failure[idx]:\n mc = None\n else:\n ws = os.path.join(dir, cmppth)\n mc = flopy.modflow.Modflow(name, model_ws=ws, version=cmppth)\n dis = flopy.modflow.ModflowDis(\n mc,\n nlay=nlay,\n nrow=nrow,\n ncol=ncol,\n nper=nper,\n perlen=perlen,\n nstp=nstp,\n tsmult=tsmult,\n steady=steady,\n delr=delr,\n delc=delr,\n top=top,\n botm=botm,\n )\n bas = flopy.modflow.ModflowBas(mc, strt=strt)\n lpf = flopy.modflow.ModflowLpf(\n mc, laytyp=confined, hk=hk, vka=hk, ss=ss, sy=0\n )\n chd = flopy.modflow.ModflowChd(mc, stress_period_data=chd5_spd)\n # mnw2\n # empty mnw2 file to create recarrays\n mnw2 = flopy.modflow.ModflowMnw2(mc)\n node_data = mnw2.get_empty_node_data(2)\n node_data[\"ztop\"] = np.array([top, botm[0]])\n node_data[\"zbotm\"] = np.array([botm[0], botm[1]])\n node_data[\"i\"] = np.array([nhalf, nhalf])\n node_data[\"j\"] = np.array([nhalf, nhalf])\n node_data[\"wellid\"] = np.array([\"well1\", \"well1\"])\n node_data[\"losstype\"] = np.array([\"skin\", \"skin\"])\n node_data[\"rw\"] = np.array([radius, radius])\n node_data[\"rskin\"] = np.array([sradius[idx], sradius[idx]])\n node_data[\"kskin\"] = np.array([hks, hks])\n dtype = [(\"wellid\", np.unicode_, 20), (\"qdes\", \"<f8\")]\n spd0 = np.zeros(1, dtype=dtype)\n spd0[\"wellid\"] = \"well1\"\n spd1 = np.zeros(1, dtype=dtype)\n spd1[\"wellid\"] = \"well1\"\n spd1[\"qdes\"] = wellq\n spd = {0: spd0, 1: spd1}\n mnw2 = flopy.modflow.ModflowMnw2(\n mc,\n mnwmax=1,\n node_data=node_data,\n stress_period_data=spd,\n itmp=[1, 1],\n mnwprnt=2,\n )\n oc = flopy.modflow.ModflowOc(\n mc,\n stress_period_data=None,\n save_every=1,\n save_types=[\"save head\", \"save budget\"],\n )\n pcg = flopy.modflow.ModflowPcg(mc, hclose=hclose, rclose=rclose)\n\n return sim, mc\n\n\n# - No need to change any code below\n@pytest.mark.parametrize(\n \"idx, dir\",\n list(enumerate(exdirs)),\n)\ndef test_mf6model(idx, dir):\n # determine if running on CI infrastructure\n is_CI = running_on_CI()\n\n # initialize testing framework\n test = testing_framework()\n\n # build the models\n test.build_mf6_models_legacy(build_model, idx, dir)\n\n # run the test model\n if is_CI and not continuous_integration[idx]:\n return\n test.run_mf6(Simulation(dir, require_failure=require_failure[idx]))\n\n\ndef main():\n # initialize testing framework\n test = testing_framework()\n\n # build the models\n # run the test model\n for idx, dir in enumerate(exdirs):\n test.build_mf6_models_legacy(build_model, idx, dir)\n sim = Simulation(dir, require_failure=require_failure[idx])\n test.run_mf6(sim)\n\n return\n\n\nif __name__ == \"__main__\":\n # print message\n print(\"standalone run of {}\".format(os.path.basename(__file__)))\n\n # run main routine\n main()\n"
] | [
[
"numpy.sqrt",
"numpy.array",
"numpy.zeros"
]
] |
deep-spin/SIGMORPHON2019 | [
"60cf3b53be42e76238e7928405b2916cd9aed6c4"
] | [
"onmt/tests/test_attention.py"
] | [
"\"\"\"\nHere come the tests for attention types and their compatibility\n\"\"\"\nimport unittest\nimport torch\nfrom torch.autograd import Variable\n\nimport onmt\n\n\nclass TestAttention(unittest.TestCase):\n\n def test_masked_global_attention(self):\n\n source_lengths = torch.IntTensor([7, 3, 5, 2])\n # illegal_weights_mask = torch.ByteTensor([\n # [0, 0, 0, 0, 0, 0, 0],\n # [0, 0, 0, 1, 1, 1, 1],\n # [0, 0, 0, 0, 0, 1, 1],\n # [0, 0, 1, 1, 1, 1, 1]])\n\n batch_size = source_lengths.size(0)\n dim = 20\n\n memory_bank = Variable(torch.randn(batch_size,\n source_lengths.max(), dim))\n hidden = Variable(torch.randn(batch_size, dim))\n\n attn = onmt.modules.Attention(dim)\n\n _, alignments = attn(hidden, memory_bank,\n memory_lengths=source_lengths)\n # TODO: fix for pytorch 0.3\n # illegal_weights = alignments.masked_select(illegal_weights_mask)\n\n # self.assertEqual(0.0, illegal_weights.data.sum())\n"
] | [
[
"torch.IntTensor",
"torch.randn"
]
] |
GingerBear/texar | [
"46e006f9349893a3015cd937bee9914c516e26af"
] | [
"texar/tf/data/data/tfrecord_data_test.py"
] | [
"# -*- coding: utf-8 -*-\n#\n\"\"\"\nUnit tests for data related operations.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport os\nimport sys\nimport copy\nimport shutil\nimport tempfile\nimport ssl\nimport tensorflow as tf\nimport texar.tf as tx\n\n\nssl._create_default_https_context = ssl._create_unverified_context\n\n\nclass TFRecordDataTest(tf.test.TestCase):\n \"\"\"Tests tfrecord data class.\n \"\"\"\n # pylint: disable=too-many-locals\n def setUp(self):\n tf.test.TestCase.setUp(self)\n\n # Create test data\n # pylint: disable=no-member\n self._test_dir = tempfile.mkdtemp()\n\n cat_in_snow = tf.keras.utils.get_file(\n os.path.join(self._test_dir, 'cat_0.jpg'),\n 'https://storage.googleapis.com/download.tensorflow.org/'\n 'example_images/320px-Felis_catus-cat_on_snow.jpg')\n williamsburg_bridge = tf.keras.utils.get_file(\n os.path.join(self._test_dir, 'bridge_0.jpg'),\n 'https://storage.googleapis.com/download.tensorflow.org/'\n 'example_images/194px-New_East_River_Bridge_from_Brooklyn_'\n 'det.4a09796u.jpg')\n\n def _bytes_feature(value=None):\n \"\"\"Returns a bytes_list from a string / byte.\n \"\"\"\n # pylint: disable=undefined-loop-variable\n value = tf.compat.as_bytes(\n value,\n encoding='utf-8'\n )\n return tf.train.Feature(\n bytes_list=tf.train.BytesList(value=[value]))\n\n def _int64_feature(value=None):\n \"\"\"Returns an int64_list from a bool / enum / int / uint.\n \"\"\"\n return tf.train.Feature(\n int64_list=tf.train.Int64List(value=[value]))\n\n _feature_original_types = {\n 'height': ['tf.int64', 'FixedLenFeature'],\n 'width': ['tf.int64', 'FixedLenFeature'],\n 'label': ['tf.int64', 'FixedLenFeature'],\n 'shape': [tf.int64, 'VarLenFeature'],\n 'image_raw': ['tf.string', 'FixedLenFeature'],\n 'variable1': [tf.string, 'FixedLenFeature'],\n 'variable2': ['tf.int64', 'FixedLenFeature'],\n }\n self._feature_convert_types = {\n 'variable1': 'tf.float32',\n 'variable2': 'tf.string',\n }\n _image_options = {}\n self._unconvert_features = ['height', 'width', 'label']\n def _image_example(image_string, image_shape, label):\n \"\"\"Create data example with image\n \"\"\"\n feature = {\n 'height': _int64_feature(image_shape[0]),\n 'width': _int64_feature(image_shape[1]),\n 'shape': tf.train.Feature(\n int64_list=tf.train.Int64List(value=list(image_shape))),\n 'label': _int64_feature(label),\n 'image_raw': _bytes_feature(image_string),\n 'variable1': _bytes_feature('1234567890'),\n 'variable2': _int64_feature(9876543210),\n }\n return tf.train.Example(\n features=tf.train.Features(feature=feature))\n\n self._dataset_valid = {\n 'height': [],\n 'width': [],\n 'shape': [],\n 'label': [],\n 'image_raw': [],\n 'variable1': [],\n 'variable2': [],\n }\n _toy_image_labels_valid = {\n cat_in_snow : 0,\n williamsburg_bridge : 1,\n }\n _toy_image_shapes = {\n cat_in_snow: (213, 320, 3),\n williamsburg_bridge: (239, 194),\n }\n _tfrecord_filepath = os.path.join(\n self._test_dir,\n 'test.tfrecord')\n # Prepare Validation data\n with tf.python_io.TFRecordWriter(_tfrecord_filepath) as writer:\n for image_path, label in _toy_image_labels_valid.items():\n\n with open(image_path, 'rb') as fid:\n image_data = fid.read()\n image_shape = _toy_image_shapes[image_path]\n\n tf_example = _image_example(image_data, image_shape, label)\n writer.write(tf_example.SerializeToString())\n\n #_construct_dataset_valid(\"\", shape, label)\n single_data = {\n 'height': image_shape[0],\n 'width': image_shape[1],\n 'shape': image_shape,\n 'label': label,\n 'image_raw': image_data,\n 'variable1': \"1234567890\",\n 'variable2': int(9876543210),\n }\n for key, value in single_data.items():\n self._dataset_valid[key].append(value)\n\n self._hparams = {\n \"num_epochs\": 1,\n \"batch_size\": 1,\n \"shuffle\": False,\n \"dataset\": {\n \"files\": _tfrecord_filepath,\n \"feature_original_types\": _feature_original_types,\n \"feature_convert_types\": self._feature_convert_types,\n \"image_options\": [_image_options],\n }\n }\n\n def tearDown(self):\n \"\"\"Remove the downloaded files after the test\n \"\"\"\n shutil.rmtree(self._test_dir)\n\n def _run_and_test(self, hparams):\n # Construct database\n tfrecord_data = tx.data.TFRecordData(hparams)\n iterator = tfrecord_data.dataset.make_initializable_iterator()\n data_batch = iterator.get_next()\n\n with self.test_session() as sess:\n sess.run(tf.global_variables_initializer())\n sess.run(tf.local_variables_initializer())\n sess.run(tf.tables_initializer())\n sess.run(iterator.initializer)\n i = 0\n def _prod(lst):\n res = 1\n for i in lst:\n res *= i\n return res\n while True:\n try:\n # Run the logics\n data_batch_ = sess.run(data_batch)\n self.assertEqual(\n set(data_batch_.keys()),\n set(tfrecord_data.list_items()))\n\n # Check data consistency\n for key in self._unconvert_features:\n value = data_batch_[key][0]\n self.assertEqual(value, self._dataset_valid[key][i])\n self.assertEqual(\n list(data_batch_['shape'].values),\n list(self._dataset_valid['shape'][i]))\n\n # Check data type conversion\n for key, item in self._feature_convert_types.items():\n value = data_batch_[key][0]\n if item == 'tf.string' or item is tf.string:\n self.assertTrue(isinstance(value, bytes))\n else:\n dtype_matched = (\n tx.utils.dtypes.get_tf_dtype(str(value.dtype))\n is tx.utils.dtypes.get_tf_dtype(item))\n self.assertTrue(dtype_matched)\n\n # Check image decoding and resize\n if hparams[\"dataset\"].get(\"image_options\"):\n image_options = hparams[\"dataset\"].get(\"image_options\")\n if isinstance(image_options, dict):\n image_options = [image_options]\n for image_option_feature in image_options:\n image_key = image_option_feature.get(\n \"image_feature_name\")\n if image_key is None:\n continue\n image_gen = data_batch_[image_key][0]\n image_valid_shape = self._dataset_valid[\"shape\"][i]\n resize_height = image_option_feature.get(\n \"resize_height\")\n resize_width = image_option_feature.get(\n \"resize_width\")\n if resize_height and resize_width:\n self.assertEqual(\n image_gen.shape[0] * image_gen.shape[1],\n resize_height * resize_width)\n else:\n self.assertEqual(\n _prod(image_gen.shape),\n _prod(image_valid_shape))\n i += 1\n except tf.errors.OutOfRangeError:\n print('Done -- epoch limit reached')\n break\n\n def test_default_setting(self):\n \"\"\"Tests the logics of TFRecordData.\n \"\"\"\n self._run_and_test(self._hparams)\n\n def test_image_resize(self):\n \"\"\"Tests the image resize function\n \"\"\"\n hparams = copy.copy(self._hparams)\n _image_options = {\n 'image_feature_name': 'image_raw',\n 'resize_height': 512,\n 'resize_width': 512,\n }\n hparams[\"dataset\"].update({\"image_options\": _image_options})\n self._run_and_test(hparams)\n\nif __name__ == \"__main__\":\n tf.test.main()\n"
] | [
[
"tensorflow.global_variables_initializer",
"tensorflow.compat.as_bytes",
"tensorflow.train.Int64List",
"tensorflow.test.TestCase.setUp",
"tensorflow.train.Features",
"tensorflow.python_io.TFRecordWriter",
"tensorflow.train.BytesList",
"tensorflow.local_variables_initializer",
"tensorflow.test.main",
"tensorflow.tables_initializer"
]
] |
Jeasonlee313/paperdev_Phy_SORT- | [
"24c9ee5d3fc18ed6d3d85e4f95195d39bdf527e2"
] | [
"deep_sort/sort/tracker.py"
] | [
"# vim: expandtab:ts=4:sw=4\nfrom __future__ import absolute_import\nimport numpy as np\nfrom . import kalman_filter\nfrom . import linear_assignment\nfrom . import iou_matching\nfrom .track import Track\n\n\nclass Tracker:\n \"\"\"\n This is the multi-target tracker.\n\n Parameters\n ----------\n metric : nn_matching.NearestNeighborDistanceMetric\n A distance metric for measurement-to-track association.\n max_age : int\n Maximum number of missed misses before a track is deleted.\n n_init : int\n Number of consecutive detections before the track is confirmed. The\n track state is set to `Deleted` if a miss occurs within the first\n `n_init` frames.\n\n Attributes\n ----------\n metric : nn_matching.NearestNeighborDistanceMetric\n The distance metric used for measurement to track association.\n max_age : int\n Maximum number of missed misses before a track is deleted.\n n_init : int\n Number of frames that a track remains in initialization phase.\n kf : kalman_filter.KalmanFilter\n A Kalman filter to filter target trajectories in image space.\n tracks : List[Track]\n The list of active tracks at the current time step.\n\n \"\"\"\n\n def __init__(self, metric, max_iou_distance=0.7, max_age=70, n_init=3, h = np.identity(3, float)):\n self.metric = metric\n self.max_iou_distance = max_iou_distance\n self.max_age = max_age\n self.n_init = n_init\n\n self.kf = kalman_filter.KalmanFilter()\n self.tracks = []\n self._next_id = 1\n\n self.H = h\n\n def predict(self):\n \"\"\"Propagate track state distributions one time step forward.\n\n This function should be called once every time step, before `update`.\n \"\"\"\n for track in self.tracks:\n track.predict(self.kf)\n\n def update(self, detections, h=np.identity(3)):\n \"\"\"Perform measurement update and track management.\n\n Parameters\n ----------\n detections : List[deep_sort.detection.Detection]\n A list of detections at the current time step.\n\n \"\"\"\n # Run matching cascade.\n matches, unmatched_tracks, unmatched_detections = \\\n self._match(detections)\n\n # Update track set.\n for track_idx, detection_idx in matches:\n self.tracks[track_idx].update(\n self.kf, detections[detection_idx])\n for track_idx in unmatched_tracks:\n self.tracks[track_idx].mark_missed()\n for detection_idx in unmatched_detections:\n self._initiate_track(detections[detection_idx])\n self.tracks = [t for t in self.tracks if not t.is_deleted()]\n\n # Update distance metric.\n active_targets = [t.track_id for t in self.tracks if t.is_confirmed()]\n features, targets = [], []\n for track in self.tracks:\n if not track.is_confirmed():\n continue\n features += track.features\n targets += [track.track_id for _ in track.features]\n track.features = []\n self.metric.partial_fit(\n np.asarray(features), np.asarray(targets), active_targets)\n\n def _match(self, detections):\n\n def gated_metric(tracks, dets, track_indices, detection_indices):\n features = np.array([dets[i].feature for i in detection_indices])\n targets = np.array([tracks[i].track_id for i in track_indices])\n cost_matrix = self.metric.distance(features, targets)\n print(\"cost_matrix1:\\n\", cost_matrix)\n cost_matrix = linear_assignment.gate_cost_matrix(\n self.kf, cost_matrix, tracks, dets, track_indices,\n detection_indices, only_position=True)\n print(\"cost_matrix2:\\n\", cost_matrix)\n return cost_matrix\n\n # Split track set into confirmed and unconfirmed tracks.\n confirmed_tracks = [\n i for i, t in enumerate(self.tracks) if t.is_confirmed()]\n unconfirmed_tracks = [\n i for i, t in enumerate(self.tracks) if not t.is_confirmed()]\n\n # Associate confirmed tracks using appearance features.\n matches_a, unmatched_tracks_a, unmatched_detections = \\\n linear_assignment.matching_cascade(\n gated_metric, self.metric.matching_threshold, self.max_age,\n self.tracks, detections, confirmed_tracks)\n\n # Associate remaining tracks together with unconfirmed tracks using IOU.\n iou_track_candidates = unconfirmed_tracks + [\n k for k in unmatched_tracks_a if\n self.tracks[k].time_since_update == 1]\n unmatched_tracks_a = [\n k for k in unmatched_tracks_a if\n self.tracks[k].time_since_update != 1]\n matches_b, unmatched_tracks_b, unmatched_detections = \\\n linear_assignment.min_cost_matching(\n iou_matching.iou_cost, self.max_iou_distance, self.tracks,\n detections, iou_track_candidates, unmatched_detections)\n\n matches = matches_a + matches_b\n unmatched_tracks = list(set(unmatched_tracks_a + unmatched_tracks_b))\n return matches, unmatched_tracks, unmatched_detections\n\n def _initiate_track(self, detection):\n mean, covariance = self.kf.initiate(detection.to_toppoint())\n self.tracks.append(Track(\n mean, covariance, self._next_id, self.n_init, self.max_age,\n detection.feature, h=self.H))\n self._next_id += 1\n"
] | [
[
"numpy.array",
"numpy.identity",
"numpy.asarray"
]
] |
moojink/drq | [
"e05c337aeb6fcae30c2db6e4afaca65e94511bbd"
] | [
"meta_logger.py"
] | [
"import csv\nimport json\nimport os\nimport shutil\nfrom collections import defaultdict\n\nimport numpy as np\n\nimport torch\nimport torchvision\nfrom termcolor import colored\nfrom torch.utils.tensorboard import SummaryWriter\n\nCOMMON_TRAIN_FORMAT = [('episode', 'E', 'int'), ('step', 'S', 'int'),\n ('episode_reward', 'R', 'float'),\n ('duration', 'D', 'time')]\n\nCOMMON_EVAL_FORMAT = [('episode', 'E', 'int'), ('step', 'S', 'int'),\n ('episode_reward', 'R', 'float')]\n\nAGENT_TRAIN_FORMAT = {\n 'drq': [('batch_reward', 'BR', 'float'), ('actor_loss', 'ALOSS', 'float'),\n ('critic_loss', 'CLOSS', 'float'),\n ('alpha_loss', 'TLOSS', 'float'), ('alpha_value', 'TVAL', 'float'),\n ('actor_entropy', 'AENT', 'float')]\n}\n\n\nclass AverageMeter(object):\n def __init__(self):\n self._sum = 0\n self._count = 0\n\n def update(self, value, n=1):\n self._sum += value\n self._count += n\n\n def value(self):\n return self._sum / max(1, self._count)\n\n\nclass MetersGroup(object):\n def __init__(self, file_name, formating):\n self._csv_file_name = self._prepare_file(file_name, 'csv')\n self._formating = formating\n self._meters = defaultdict(AverageMeter)\n self._csv_file = open(self._csv_file_name, 'w')\n self._csv_writer = None\n\n def _prepare_file(self, prefix, suffix):\n file_name = f'{prefix}.{suffix}'\n if os.path.exists(file_name):\n os.remove(file_name)\n return file_name\n\n def log(self, key, value, n=1):\n self._meters[key].update(value, n)\n\n def _prime_meters(self):\n data = dict()\n for key, meter in self._meters.items():\n if key.startswith('train'):\n key = key[len('train') + 1:]\n else:\n key = key[len('eval') + 1:]\n key = key.replace('/', '_')\n data[key] = meter.value()\n return data\n\n def _dump_to_csv(self, data):\n if self._csv_writer is None:\n self._csv_writer = csv.DictWriter(self._csv_file,\n fieldnames=sorted(data.keys()),\n restval=0.0)\n self._csv_writer.writeheader()\n self._csv_writer.writerow(data)\n self._csv_file.flush()\n\n def _format(self, key, value, ty):\n if ty == 'int':\n value = int(value)\n return f'{key}: {value}'\n elif ty == 'float':\n return f'{key}: {value:.04f}'\n elif ty == 'time':\n return f'{key}: {value:04.1f} s'\n else:\n raise f'invalid format type: {ty}'\n\n def _dump_to_console(self, data, prefix):\n prefix = colored(prefix, 'yellow' if prefix == 'train' else 'green')\n pieces = [f'| {prefix: <14}']\n for key, disp_key, ty in self._formating:\n value = data.get(key, 0)\n pieces.append(self._format(disp_key, value, ty))\n print(' | '.join(pieces))\n\n def dump(self, step, prefix, save=True):\n if len(self._meters) == 0:\n return\n if save:\n data = self._prime_meters()\n data['step'] = step\n self._dump_to_csv(data)\n self._dump_to_console(data, prefix)\n self._meters.clear()\n\n\nclass Logger(object):\n def __init__(self,\n log_dir,\n save_tb=False,\n log_frequency=10000,\n action_repeat=1,\n agent='drq'):\n self._log_dir = log_dir\n self._log_frequency = log_frequency\n self._action_repeat = action_repeat\n if save_tb:\n tb_dir = os.path.join(log_dir, 'tb')\n if os.path.exists(tb_dir):\n try:\n shutil.rmtree(tb_dir)\n except:\n print(\"logger.py warning: Unable to remove tb directory\")\n pass\n self._sw = SummaryWriter(tb_dir)\n else:\n self._sw = None\n # each agent has specific output format for training\n assert agent in AGENT_TRAIN_FORMAT\n train_format = COMMON_TRAIN_FORMAT + AGENT_TRAIN_FORMAT[agent]\n self._train_mg = MetersGroup(os.path.join(log_dir, 'train'),\n formating=train_format)\n self._eval_mg = MetersGroup(os.path.join(log_dir, 'eval'),\n formating=COMMON_EVAL_FORMAT)\n\n def _should_log(self, step, log_frequency):\n log_frequency = log_frequency or self._log_frequency\n return step % log_frequency == 0\n\n def _update_step(self, step):\n return step * self._action_repeat\n\n def _try_sw_log(self, key, value, step):\n step = self._update_step(step)\n if self._sw is not None:\n self._sw.add_scalar(key, value, step)\n\n def _try_sw_log_image(self, key, image, step):\n step = self._update_step(step)\n if self._sw is not None:\n assert image.dim() == 3\n grid = torchvision.utils.make_grid(image.unsqueeze(1))\n self._sw.add_image(key, grid, step)\n\n def _try_sw_log_video(self, key, frames, step):\n step = self._update_step(step)\n if self._sw is not None:\n frames = torch.from_numpy(np.array(frames))\n frames = frames.unsqueeze(0)\n self._sw.add_video(key, frames, step, fps=30)\n\n def _try_sw_log_histogram(self, key, histogram, step):\n step = self._update_step(step)\n if self._sw is not None:\n self._sw.add_histogram(key, histogram, step)\n\n def log(self, key, value, step, n=1, log_frequency=1):\n if not self._should_log(step, log_frequency):\n return\n assert key.startswith('train') or key.startswith('eval')\n if type(value) == torch.Tensor:\n value = value.item()\n self._try_sw_log(key, value / n, step)\n mg = self._train_mg if key.startswith('train') else self._eval_mg\n mg.log(key, value, n)\n \n def eval_log(self, key, value, step, n=1, log_frequency=1):\n \"\"\"Same as self.log(), except we don't call self._should_log().\n In other words, we always log.\"\"\"\n assert key.startswith('train') or key.startswith('eval')\n if type(value) == torch.Tensor:\n value = value.item()\n self._try_sw_log(key, value / n, step)\n mg = self._train_mg if key.startswith('train') else self._eval_mg\n mg.log(key, value, n)\n\n\n def log_param(self, key, param, step, log_frequency=None):\n if not self._should_log(step, log_frequency):\n return\n self.log_histogram(key + '_w', param.weight.data, step)\n if hasattr(param.weight, 'grad') and param.weight.grad is not None:\n self.log_histogram(key + '_w_g', param.weight.grad.data, step)\n if hasattr(param, 'bias') and hasattr(param.bias, 'data'):\n self.log_histogram(key + '_b', param.bias.data, step)\n if hasattr(param.bias, 'grad') and param.bias.grad is not None:\n self.log_histogram(key + '_b_g', param.bias.grad.data, step)\n\n def log_image(self, key, image, step, log_frequency=None):\n if not self._should_log(step, log_frequency):\n return\n assert key.startswith('train') or key.startswith('eval')\n self._try_sw_log_image(key, image, step)\n\n def log_video(self, key, frames, step, log_frequency=None):\n if not self._should_log(step, log_frequency):\n return\n assert key.startswith('train') or key.startswith('eval')\n self._try_sw_log_video(key, frames, step)\n\n def log_histogram(self, key, histogram, step, log_frequency=None):\n if not self._should_log(step, log_frequency):\n return\n assert key.startswith('train') or key.startswith('eval')\n self._try_sw_log_histogram(key, histogram, step)\n\n def dump(self, step, save=True, ty=None):\n step = self._update_step(step)\n if ty is None:\n self._train_mg.dump(step, 'train', save)\n self._eval_mg.dump(step, 'eval', save)\n elif ty == 'eval':\n self._eval_mg.dump(step, 'eval', save)\n elif ty == 'train':\n self._train_mg.dump(step, 'train', save)\n else:\n raise f'invalid log type: {ty}'\n"
] | [
[
"torch.utils.tensorboard.SummaryWriter",
"numpy.array"
]
] |
G-Thor/merlin | [
"33fa6e65ddb903ed5633ccb66c74d3e7c128667f"
] | [
"src/logplot/logging_plotting.py"
] | [
"################################################################################\n# The Neural Network (NN) based Speech Synthesis System\n# https://svn.ecdf.ed.ac.uk/repo/inf/dnn_tts/\n#\n# Centre for Speech Technology Research\n# University of Edinburgh, UK\n# Copyright (c) 2014-2015\n# All Rights Reserved.\n#\n# The system as a whole and most of the files in it are distributed\n# under the following copyright and conditions\n#\n# Permission is hereby granted, free of charge, to use and distribute\n# this software and its documentation without restriction, including\n# without limitation the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of this work, and to\n# permit persons to whom this work is furnished to do so, subject to\n# the following conditions:\n#\n# - Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# - Redistributions in binary form must reproduce the above\n# copyright notice, this list of conditions and the following\n# disclaimer in the documentation and/or other materials provided\n# with the distribution.\n# - The authors' names may not be used to endorse or promote products derived\n# from this software without specific prior written permission.\n#\n# THE UNIVERSITY OF EDINBURGH AND THE CONTRIBUTORS TO THIS WORK\n# DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING\n# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO EVENT\n# SHALL THE UNIVERSITY OF EDINBURGH NOR THE CONTRIBUTORS BE LIABLE\n# FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES\n# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN\n# AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION,\n# ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF\n# THIS SOFTWARE.\n################################################################################\n\n# NOTES\n# still to consider: pygal, for HTML5 SVG plotting\n\nimport math\nimport string\nimport os\n\n# this module provides the base classes that we specialise here\nimport logging # as logging\n\n# for plotting\nimport matplotlib\n\n# should make this user-configurable - TO DO later\n# this line has to come before the import of matplotlib.pyplot\nmatplotlib.use('PDF')\n\nimport matplotlib.pyplot as plt\nimport pylab\n\nfrom matplotlib.ticker import MultipleLocator, FormatStrFormatter\n\n# matplotlib needs to be passed numpy arrays\nimport numpy\n\n# for sorting tuples\nfrom operator import itemgetter, attrgetter\n\n\n# TO DO - this needs to be attached to the logging module so that it's available via config options\n# class PlotHandler(logging.FileHandler):\n# \"\"\"A handler for saving plots to disk\"\"\"\n# def __init__(self,filename):\n# logging.FileHandler.__init__(self,filename, mode='a', encoding=None, delay=False)\n\n\n\nclass PlotWithData(object):\n # a generic plot object that contains both the underlying data and the plot itself\n # this class needs to be subclassed for each specialised type of plot that we want\n\n # the underlying data for the plot - a dictionary of data series\n # each series is a list of data points of arbitrary type (e.g., tuples, arrays, ..)\n data=None\n # the plot generated from these data\n plot=None\n\n def __init__(self,name):\n # clear the data series\n self.data={}\n\n def add_data_point(self,series_name,data_point):\n # if there is no data series with this name yet, create an empty one\n if series_name not in self.data:\n self.data[series_name]=[]\n # append this data point (e.g., it might be a tuple (x,y) )\n # don't worry about data type or sorting - that is not our concern here\n self.data[series_name].append(data_point)\n\n def sort_and_validate(self):\n # only applied if the data points are tuples, such as (x,y) values\n\n # TO DO: first check that each series is a list of tuples, and that they have the same number of elements\n\n # this method checks that all data series\n # 1. have the same length\n # 2. are sorted in ascending order of x\n # 3. have identical values in their x series\n\n\n # there has to be at least one data series\n try:\n assert len(self.data) > 0\n except AssertionError:\n logger.critical('No data series found in plot')\n raise\n\n # check lengths are consistent, sort, then check x values are identical\n l=-1\n reference_x=None\n # print \"starting with self.data=\",self.data\n for series_name,data_points in self.data.items():\n if l > 0:\n assert l == len(data_points)\n else:\n l = len(data_points)\n # sort by ascending x value\n data_points.sort(key=itemgetter(0))\n\n if reference_x:\n assert reference_x == [seq[0] for seq in data_points]\n else:\n # extract a list of just the x values\n reference_x = [seq[0] for seq in data_points]\n\n\n # print \"ending with self.data=\",self.data\n\n def generate_plot(self,**kwargs):\n logger = logging.getLogger(\"plotting\")\n logger.error('Cannot generate a plot from abstract class: PlotWithData' )\n # raise an exception here?\n\nclass MultipleSeriesPlot(PlotWithData):\n\n def generate_plot(self,filename,title='',xlabel='',ylabel='',xlim=None,ylim=None):\n\n logger = logging.getLogger(\"plotting\")\n logger.debug('MultipleSeriesPlot.generate_plot')\n\n # a plot with one or more time series sharing a common x axis:\n # e.g., the training error and the validation error plotted against epochs\n\n # sort the data series and make sure they are consistent\n self.sort_and_validate()\n\n # if there is a plot already in existence, we will clear it and re-use it;\n # this avoids creating extraneous figures which will stay in memory\n # (even if we are no longer referencing them)\n if self.plot:\n self.plot.clf()\n else:\n # create a plot\n self.plot = plt.figure()\n\n splt = self.plot.add_subplot(1, 1, 1)\n splt.set_title(title)\n splt.set_xlabel(xlabel)\n splt.set_ylabel(ylabel)\n\n if xlim:\n pylab.xlim(xlim)\n if ylim:\n pylab.ylim(ylim)\n\n for series_name,data_points in self.data.items():\n xpoints=numpy.asarray([seq[0] for seq in data_points])\n ypoints=numpy.asarray([seq[1] for seq in data_points])\n line, = splt.plot(xpoints, ypoints, '-', linewidth=2)\n logger.debug('set_label for %s' % series_name)\n line.set_label(series_name)\n\n splt.legend()\n\n # TO DO - better filename configuration for plots\n self.plot.savefig(filename)\n\nclass SingleWeightMatrixPlot(PlotWithData):\n\n def generate_plot(self, filename, title='', xlabel='', ylabel=''):\n\n data_keys = list(self.data.keys())\n key_num = len(data_keys)\n\n self.plot = plt.figure()\n if key_num == 1:\n splt = self.plot.add_subplot(1, 1, 1)\n im_data = splt.imshow(numpy.flipud(self.data[data_keys[0]][0]), origin='lower')\n splt.set_xlabel(xlabel)\n splt.set_ylabel(ylabel)\n splt.set_title(title)\n else: ## still plotting multiple image in one figure still has problem. the visualization is not good\n logger.error('no supported yet')\n\n self.plot.colorbar(im_data)\n self.plot.savefig(filename) #, bbox_inches='tight'\n\n#class MultipleLinesPlot(PlotWithData):\n# def generate_plot(self, filename, title='', xlabel='', ylabel=''):\n\nclass LoggerPlotter(logging.getLoggerClass()):\n \"\"\"Based on the built-in logging class, with added capabilities including plotting\"\"\"\n\n # a dictionary to store all generated plots\n # keys are plot names\n # values are\n plots ={}\n # where the plots will be saved - a directory\n plot_path='/tmp' # default location\n\n def __init__(self,name):\n # initialise the logging parent class\n # (should really use 'super' here I think, but that fails - perhaps because the built in logger class is not derived from 'object' ?)\n logging.Logger.__init__(self,name)\n\n def set_plot_path(self,path):\n self.plot_path = path\n\n def remove_all_plots(self):\n self.plots={}\n\n def create_plot(self,plot_name,plot_object):\n self.plots[plot_name] = plot_object(plot_name)\n\n def add_plot_point(self,plot_name,series_name,data_point):\n # add a data point to a named plot\n if plot_name not in self.plots:\n self.plots[plot_name] = PlotWithData(plot_name)\n self.plots[plot_name].add_data_point(series_name,data_point)\n\n def save_plot(self,plot_name,**kwargs):\n logger = logging.getLogger(\"plotting\")\n if plot_name not in self.plots:\n logger.warn('Tried to generate a plot called %s that does not exist' % plot_name)\n # raise an exception here?\n else:\n # # the filename to save to is known by the handler, which needs to be assigned to this logger\n # # look at the handlers attached to this logger instance\n # ph=None\n # for h in self.handlers:\n # # we want an instance of a PlotHandler - we'll take the first one we find\n # # (behaviour will be unpredictable if there is more than one handler of this type)\n # if isinstance(h,PlotHandler):\n # ph=h\n # break\n # if ph:\n # TO DO - need to be sure of safe file names\n if not os.path.isdir(self.plot_path):\n os.makedirs(self.plot_path)\n filename = self.plot_path + \"/\" + string.replace(plot_name, \" \", \"_\") + \".pdf\"\n logger.info('Generating a plot in file %s' % filename)\n self.plots[plot_name].generate_plot(filename,**kwargs)\n # else:\n # logger.warn('No handler of type PlotHandler is attached to this logger - cannot save plots')\n\n\n\n\nclass ColouredFormatter(logging.Formatter):\n\n # colourising formatter adapted from an answer to this question on Stack Overflow\n # http://stackoverflow.com/questions/384076/how-can-i-color-python-logging-output\n\n BLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE = list(range(8))\n\n COLOURS = {\n 'DEBUG': BLUE,\n 'INFO': GREEN,\n 'WARNING': YELLOW,\n 'ERROR': RED,\n 'CRITICAL': MAGENTA\n }\n\n max_level_name_width = '8'\n\n # terminal escape sequences\n RESET_SEQ = \"\\033[0m\"\n COLOUR_SEQ = \"\\033[1;%dm\"\n BOLD_SEQ = \"\\033[1m\"\n\n def format(self, record):\n if record.levelname in self.COLOURS:\n # pad to fixed width - currently hardwired, should make this dynamic\n # maximum width of level names, which is the 8 characters of \"CRITICAL\"\n fixed_width_levelname = '{0:8s}'.format(record.levelname)\n record.name = '{0:8s}'.format(record.name)\n # The background is set with 40 plus the number of the color, and the foreground with 30\n record.levelname = self.COLOUR_SEQ % (30 + self.COLOURS[record.levelname]) + fixed_width_levelname + self.RESET_SEQ\n return logging.Formatter.format(self, record)\n\n def factory(fmt, datefmt):\n default = logging.Formatter(fmt, datefmt)\n return ColouredFormatter(default)\n\nif __name__ == '__main__':\n # some simple tests\n\n # tell the built-in logger module to use our custom class when instantiating any new logger\n logging.setLoggerClass(LoggerPlotter)\n\n\n logger = logging.getLogger(\"test_logger\")\n logger.setLevel(logging.DEBUG)\n\n # a console handler\n ch = logging.StreamHandler()\n ch.setLevel(logging.DEBUG)\n formatter = ColouredFormatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n ch.setFormatter(formatter)\n logger.addHandler(ch)\n\n\n\n print(\"testing the logging code\")\n logger.debug('A DEBUG message')\n logger.info('A INFO message')\n logger.warning('A WARN message')\n logger.error('A ERROR message')\n logger.critical('A CRITICAL message')\n\n\n plotlogger = logging.getLogger(\"plotting\")\n plotlogger.setLevel(logging.DEBUG)\n # handler for plotting logger - will write only to console\n plotlogger.addHandler(ch)\n\n\n # # need a handler which will control where to save plots\n # ph = PlotHandler(\"/tmp/plot_test/testing.pdf\")\n # logger.addHandler(ph)\n\n\n print(\"testing the plotting code\")\n\n # the first argument is just a key for referring to this plot within the code\n # the second argument says what kind of plot we will be making\n\n\n plotlogger.set_plot_path(\"./tmp\")\n\n logger.create_plot('test plot',MultipleTimeSeriesPlot)\n\n plotlogger.add_plot_point('test plot','validation',(1,4))\n plotlogger.add_plot_point('test plot','validation',(3,2))\n plotlogger.add_plot_point('test plot','validation',(2,3))\n plotlogger.add_plot_point('test plot','validation',(4,3))\n\n plotlogger.add_plot_point('test plot','training',(1,3))\n plotlogger.add_plot_point('test plot','training',(3,1))\n plotlogger.add_plot_point('test plot','training',(2,2))\n plotlogger.add_plot_point('test plot','training',(4,4))\n\n plotlogger.save_plot('test plot',title='Training and validation error',xlabel='epochs',ylabel='error')\n\n weights = [[1, 2, 3, 3], [1, 1, 2, 1], [2, 1, 2, 2]]\n logger.create_plot('activation weight', SingleWeightMatrixPlot)\n plotlogger.add_plot_point('activation weight', 'weight1', weights)\n plotlogger.add_plot_point('activation weight', 'weight2', weights)\n plotlogger.add_plot_point('activation weight', 'weight3', weights)\n\n plotlogger.save_plot('activation weight', title='weight', xlabel='dimension', ylabel='dimension')\n"
] | [
[
"matplotlib.use",
"matplotlib.pyplot.figure",
"numpy.asarray",
"numpy.flipud"
]
] |
Mirwaisse/tutorials | [
"18ec63ce8c85ef11af92685cc1436fd3034efc74"
] | [
"intermediate_source/model_parallel_tutorial.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"\nModel Parallel Best Practices\n*************************************************************\n**Author**: `Shen Li <https://mrshenli.github.io/>`_\n\nData parallel and model parallel are widely-used in distributed training\ntechniques. Previous posts have explained how to use\n`DataParallel <https://pytorch.org/tutorials/beginner/blitz/data_parallel_tutorial.html>`_\nto train a neural network on multiple GPUs. ``DataParallel`` replicates the\nsame model to all GPUs, where each GPU consumes a different partition of the\ninput data. Although it can significantly accelerate the training process, it\ndoes not work for some use cases where the model is too large to fit into a\nsingle GPU. This post shows how to solve that problem by using model parallel\nand also shares some insights on how to speed up model parallel training.\n\nThe high-level idea of model parallel is to place different sub-networks of a\nmodel onto different devices, and implement the ``forward`` method accordingly\nto move intermediate outputs across devices. As only part of a model operates\non any individual device, a set of devices can collectively serve a larger\nmodel. In this post, we will not try to construct huge models and squeeze them\ninto a limited number of GPUs. Instead, this post focuses on showing the idea\nof model parallel. It is up to the readers to apply the ideas to real-world\napplications.\n\n**Recommended Reading:**\n\n- https://pytorch.org/ For installation instructions\n- :doc:`/beginner/blitz/data_parallel_tutorial` Single-Machine Data Parallel\n- :doc:`/intermediate/ddp_tutorial` Combine Distributed Data Parallel and Model Parallel\n\"\"\"\n\n######################################################################\n# Basic Usage\n# =======================\n#\n# Let us start with a toy model that contains two linear layers. To run this\n# model on two GPUs, simply put each linear layer on a different GPU, and move\n# inputs and intermediate outputs to match the layer devices accordingly.\n\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\n\n\nclass ToyModel(nn.Module):\n def __init__(self):\n super(ToyModel, self).__init__()\n self.net1 = torch.nn.Linear(10, 10).to('cuda:0')\n self.relu = torch.nn.ReLU()\n self.net2 = torch.nn.Linear(10, 5).to('cuda:1')\n\n def forward(self, x):\n x = self.relu(self.net1(x.to('cuda:0')))\n return self.net2(x.to('cuda:1'))\n\n######################################################################\n# Note that, the above ``ToyModel`` looks very similar to how one would\n# implement it on a single GPU, except the five ``to(device)`` calls which\n# place linear layers and tensors on proper devices. That is the only place in\n# the model that requires changes. The ``backward()`` and ``torch.optim`` will\n# automatically take care of gradients as if the model is on one GPU. You only\n# need to make sure that the labels are on the same device as the outputs when\n# calling the loss function.\n\n\nmodel = ToyModel()\nloss_fn = nn.MSELoss()\noptimizer = optim.SGD(model.parameters(), lr=0.001)\n\noptimizer.zero_grad()\noutputs = model(torch.randn(20, 10))\nlabels = torch.randn(20, 5).to('cuda:1')\nloss_fn(outputs, labels).backward()\noptimizer.step()\n\n######################################################################\n# Apply Model Parallel to Existing Modules\n# =======================\n#\n# It is also possible to run an existing single-GPU module on multiple GPUs\n# with just a few lines of changes. The code below shows how to decompose\n# ``torchvision.models.reset50()`` to two GPUs. The idea is to inherit from\n# the existing ``ResNet`` module, and split the layers to two GPUs during\n# construction. Then, override the ``forward`` method to stitch two\n# sub-networks by moving the intermediate outputs accordingly.\n\n\nfrom torchvision.models.resnet import ResNet, Bottleneck\n\nnum_classes = 1000\n\n\nclass ModelParallelResNet50(ResNet):\n def __init__(self, *args, **kwargs):\n super(ModelParallelResNet50, self).__init__(\n Bottleneck, [3, 4, 6, 3], num_classes=num_classes, *args, **kwargs)\n\n self.seq1 = nn.Sequential(\n self.conv1,\n self.bn1,\n self.relu,\n self.maxpool,\n\n self.layer1,\n self.layer2\n ).to('cuda:0')\n\n self.seq2 = nn.Sequential(\n self.layer3,\n self.layer4,\n self.avgpool,\n ).to('cuda:1')\n\n self.fc.to('cuda:1')\n\n def forward(self, x):\n x = self.seq2(self.seq1(x).to('cuda:1'))\n return self.fc(x.view(x.size(0), -1))\n\n\n######################################################################\n# The above implementation solves the problem for cases where the model is too\n# large to fit into a single GPU. However, you might have already noticed that\n# it will be slower than running it on a single GPU if your model fits. It is\n# because, at any point in time, only one of the two GPUs are working, while\n# the other one is sitting there doing nothing. The performance further\n# deteriorates as the intermediate outputs need to be copied from ``cuda:0`` to\n# ``cuda:1`` between ``layer2`` and ``layer3``.\n#\n# Let us run an experiment to get a more quantitative view of the execution\n# time. In this experiment, we train ``ModelParallelResNet50`` and the existing\n# ``torchvision.models.reset50()`` by running random inputs and labels through\n# them. After the training, the models will not produce any useful predictions,\n# but we can get a reasonable understanding of the execution times.\n\n\nimport torchvision.models as models\n\nnum_batches = 3\nbatch_size = 120\nimage_w = 128\nimage_h = 128\n\n\ndef train(model):\n model.train(True)\n loss_fn = nn.MSELoss()\n optimizer = optim.SGD(model.parameters(), lr=0.001)\n\n one_hot_indices = torch.LongTensor(batch_size) \\\n .random_(0, num_classes) \\\n .view(batch_size, 1)\n\n for _ in range(num_batches):\n # generate random inputs and labels\n inputs = torch.randn(batch_size, 3, image_w, image_h)\n labels = torch.zeros(batch_size, num_classes) \\\n .scatter_(1, one_hot_indices, 1)\n\n # run forward pass\n optimizer.zero_grad()\n outputs = model(inputs.to('cuda:0'))\n\n # run backward pass\n labels = labels.to(outputs.device)\n loss_fn(outputs, labels).backward()\n optimizer.step()\n\n\n######################################################################\n# The ``train(model)`` method above uses ``nn.MSELoss`` as the loss function,\n# and ``optim.SGD`` as the optimizer. It mimics training on ``128 X 128``\n# images which are organized into 3 batches where each batch contains 120\n# images. Then, we use ``timeit`` to run the ``train(model)`` method 10 times\n# and plot the execution times with standard deviations.\n\n\nimport matplotlib.pyplot as plt\nplt.switch_backend('Agg')\nimport numpy as np\nimport timeit\n\nnum_repeat = 10\n\nstmt = \"train(model)\"\n\nsetup = \"model = ModelParallelResNet50()\"\n# globals arg is only available in Python 3. In Python 2, use the following\n# import __builtin__\n# __builtin__.__dict__.update(locals())\nmp_run_times = timeit.repeat(\n stmt, setup, number=1, repeat=num_repeat, globals=globals())\nmp_mean, mp_std = np.mean(mp_run_times), np.std(mp_run_times)\n\nsetup = \"import torchvision.models as models;\" + \\\n \"model = models.resnet50(num_classes=num_classes).to('cuda:0')\"\nrn_run_times = timeit.repeat(\n stmt, setup, number=1, repeat=num_repeat, globals=globals())\nrn_mean, rn_std = np.mean(rn_run_times), np.std(rn_run_times)\n\n\ndef plot(means, stds, labels, fig_name):\n fig, ax = plt.subplots()\n ax.bar(np.arange(len(means)), means, yerr=stds,\n align='center', alpha=0.5, ecolor='red', capsize=10, width=0.6)\n ax.set_ylabel('ResNet50 Execution Time (Second)')\n ax.set_xticks(np.arange(len(means)))\n ax.set_xticklabels(labels)\n ax.yaxis.grid(True)\n plt.tight_layout()\n plt.savefig(fig_name)\n plt.close(fig)\n\n\nplot([mp_mean, rn_mean],\n [mp_std, rn_std],\n ['Model Parallel', 'Single GPU'],\n 'mp_vs_rn.png')\n\n\n######################################################################\n#\n# .. figure:: /_static/img/model-parallel-images/mp_vs_rn.png\n# :alt:\n#\n# The result shows that the execution time of model parallel implementation is\n# ``4.02/3.75-1=7%`` longer than the existing single-GPU implementation. So we\n# can conclude there is roughly 7% overhead in copying tensors back and forth\n# across the GPUs. There are rooms for improvements, as we know one of the two\n# GPUs is sitting idle throughout the execution. One option is to further\n# divide each batch into a pipeline of splits, such that when one split reaches\n# the second sub-network, the following split can be fed into the first\n# sub-network. In this way, two consecutive splits can run concurrently on two\n# GPUs.\n\n######################################################################\n# Speed Up by Pipelining Inputs\n# =======================\n#\n# In the following experiments, we further divide each 120-image batch into\n# 20-image splits. As PyTorch launches CUDA operations asynchronizely, the\n# implementation does not need to spawn multiple threads to achieve\n# concurrency.\n\n\nclass PipelineParallelResNet50(ModelParallelResNet50):\n def __init__(self, split_size=20, *args, **kwargs):\n super(PipelineParallelResNet50, self).__init__(*args, **kwargs)\n self.split_size = split_size\n\n def forward(self, x):\n splits = iter(x.split(self.split_size, dim=0))\n s_next = next(splits)\n s_prev = self.seq1(s_next).to('cuda:1')\n ret = []\n\n for s_next in splits:\n # A. s_prev runs on cuda:1\n s_prev = self.seq2(s_prev)\n ret.append(self.fc(s_prev.view(s_prev.size(0), -1)))\n\n # B. s_next runs on cuda:0, which can run concurrently with A\n s_prev = self.seq1(s_next).to('cuda:1')\n\n s_prev = self.seq2(s_prev)\n ret.append(self.fc(s_prev.view(s_prev.size(0), -1)))\n\n return torch.cat(ret)\n\n\nsetup = \"model = PipelineParallelResNet50()\"\npp_run_times = timeit.repeat(\n stmt, setup, number=1, repeat=num_repeat, globals=globals())\npp_mean, pp_std = np.mean(pp_run_times), np.std(pp_run_times)\n\nplot([mp_mean, rn_mean, pp_mean],\n [mp_std, rn_std, pp_std],\n ['Model Parallel', 'Single GPU', 'Pipelining Model Parallel'],\n 'mp_vs_rn_vs_pp.png')\n\n######################################################################\n# Please note, device-to-device tensor copy operations are synchronized on\n# current streams on the source and the destination devices. If you create\n# multiple streams, you have to make sure that copy operations are properly\n# synchronized. Writing the source tensor or reading/writing the destination\n# tensor before finishing the copy operation can lead to undefined behavior.\n# The above implementation only uses default streams on both source and\n# destination devices, hence it is not necessary to enforce additional\n# synchronizations.\n#\n# .. figure:: /_static/img/model-parallel-images/mp_vs_rn_vs_pp.png\n# :alt:\n#\n# The experiment result shows that, pipelining inputs to model parallel\n# ResNet50 speeds up the training process by roughly ``3.75/2.51-1=49%``. It is\n# still quite far away from the ideal 100% speedup. As we have introduced a new\n# parameter ``split_sizes`` in our pipeline parallel implementation, it is\n# unclear how the new parameter affects the overall training time. Intuitively\n# speaking, using small ``split_size`` leads to many tiny CUDA kernel launch,\n# while using large ``split_size`` results to relatively long idle times during\n# the first and last splits. Neither are optimal. There might be an optimal\n# ``split_size`` configuration for this specific experiment. Let us try to find\n# it by running experiments using several different ``split_size`` values.\n\n\nmeans = []\nstds = []\nsplit_sizes = [1, 3, 5, 8, 10, 12, 20, 40, 60]\n\nfor split_size in split_sizes:\n setup = \"model = PipelineParallelResNet50(split_size=%d)\" % split_size\n pp_run_times = timeit.repeat(\n stmt, setup, number=1, repeat=num_repeat, globals=globals())\n means.append(np.mean(pp_run_times))\n stds.append(np.std(pp_run_times))\n\nfig, ax = plt.subplots()\nax.plot(split_sizes, means)\nax.errorbar(split_sizes, means, yerr=stds, ecolor='red', fmt='ro')\nax.set_ylabel('ResNet50 Execution Time (Second)')\nax.set_xlabel('Pipeline Split Size')\nax.set_xticks(split_sizes)\nax.yaxis.grid(True)\nplt.tight_layout()\nplt.savefig(\"split_size_tradeoff.png\")\nplt.close(fig)\n\n######################################################################\n#\n# .. figure:: /_static/img/model-parallel-images/split_size_tradeoff.png\n# :alt:\n#\n# The result shows that setting ``split_size`` to 12 achieves the fastest\n# training speed, which leads to ``3.75/2.43-1=54%`` speedup. There are\n# still opportunities to further accelerate the training process. For example,\n# all operations on ``cuda:0`` is placed on its default stream. It means that\n# computations on the next split cannot overlap with the copy operation of the\n# prev split. However, as prev and next splits are different tensors, there is\n# no problem to overlap one's computation with the other one's copy. The\n# implementation need to use multiple streams on both GPUs, and different\n# sub-network structures require different stream management strategies. As no\n# general multi-stream solution works for all model parallel use cases, we will\n# not discuss it in this tutorial.\n"
] | [
[
"torch.nn.Linear",
"torch.nn.MSELoss",
"torch.randn",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.switch_backend",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.close",
"torch.nn.Sequential",
"torch.zeros",
"numpy.std",
"torch.LongTensor",
"torch.nn.ReLU",
"torch.cat",
"numpy.mean"
]
] |
kichiro09/object-detection | [
"e498d28503fd4a12d1fa9ade41891f2f9601c674"
] | [
"official/recommendation/ncf_test.py"
] | [
"# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests NCF.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport math\nimport mock\n\nimport numpy as np\nimport tensorflow as tf\n\nfrom absl import flags\nfrom absl.testing import flagsaver\nfrom official.recommendation import constants as rconst\nfrom official.recommendation import data_preprocessing\nfrom official.recommendation import neumf_model\nfrom official.recommendation import ncf_main\nfrom official.recommendation import stat_utils\n\n\nNUM_TRAIN_NEG = 4\n\n\nclass NcfTest(tf.test.TestCase):\n\n @classmethod\n def setUpClass(cls): # pylint: disable=invalid-name\n super(NcfTest, cls).setUpClass()\n ncf_main.define_ncf_flags()\n\n def setUp(self):\n self.top_k_old = rconst.TOP_K\n self.num_eval_negatives_old = rconst.NUM_EVAL_NEGATIVES\n rconst.NUM_EVAL_NEGATIVES = 2\n\n def tearDown(self):\n rconst.NUM_EVAL_NEGATIVES = self.num_eval_negatives_old\n rconst.TOP_K = self.top_k_old\n\n def get_hit_rate_and_ndcg(self, predicted_scores_by_user, items_by_user,\n top_k=rconst.TOP_K, match_mlperf=False):\n rconst.TOP_K = top_k\n rconst.NUM_EVAL_NEGATIVES = predicted_scores_by_user.shape[1] - 1\n\n g = tf.Graph()\n with g.as_default():\n logits = tf.convert_to_tensor(\n predicted_scores_by_user.reshape((-1, 1)), tf.float32)\n softmax_logits = tf.concat([tf.zeros(logits.shape, dtype=logits.dtype),\n logits], axis=1)\n duplicate_mask = tf.convert_to_tensor(\n stat_utils.mask_duplicates(items_by_user, axis=1), tf.float32)\n\n metric_ops = neumf_model.compute_eval_loss_and_metrics(\n logits=logits, softmax_logits=softmax_logits,\n duplicate_mask=duplicate_mask, num_training_neg=NUM_TRAIN_NEG,\n match_mlperf=match_mlperf).eval_metric_ops\n\n hr = metric_ops[rconst.HR_KEY]\n ndcg = metric_ops[rconst.NDCG_KEY]\n\n init = [tf.global_variables_initializer(),\n tf.local_variables_initializer()]\n\n with self.test_session(graph=g) as sess:\n sess.run(init)\n return sess.run([hr[1], ndcg[1]])\n\n\n\n def test_hit_rate_and_ndcg(self):\n # Test with no duplicate items\n predictions = np.array([\n [1., 2., 0.], # In top 2\n [2., 1., 0.], # In top 1\n [0., 2., 1.], # In top 3\n [2., 3., 4.] # In top 3\n ])\n items = np.array([\n [1, 2, 3],\n [2, 3, 1],\n [3, 2, 1],\n [2, 1, 3],\n ])\n\n hr, ndcg = self.get_hit_rate_and_ndcg(predictions, items, 1)\n self.assertAlmostEqual(hr, 1 / 4)\n self.assertAlmostEqual(ndcg, 1 / 4)\n\n hr, ndcg = self.get_hit_rate_and_ndcg(predictions, items, 2)\n self.assertAlmostEqual(hr, 2 / 4)\n self.assertAlmostEqual(ndcg, (1 + math.log(2) / math.log(3)) / 4)\n\n hr, ndcg = self.get_hit_rate_and_ndcg(predictions, items, 3)\n self.assertAlmostEqual(hr, 4 / 4)\n self.assertAlmostEqual(ndcg, (1 + math.log(2) / math.log(3) +\n 2 * math.log(2) / math.log(4)) / 4)\n\n hr, ndcg = self.get_hit_rate_and_ndcg(predictions, items, 1,\n match_mlperf=True)\n self.assertAlmostEqual(hr, 1 / 4)\n self.assertAlmostEqual(ndcg, 1 / 4)\n\n hr, ndcg = self.get_hit_rate_and_ndcg(predictions, items, 2,\n match_mlperf=True)\n self.assertAlmostEqual(hr, 2 / 4)\n self.assertAlmostEqual(ndcg, (1 + math.log(2) / math.log(3)) / 4)\n\n hr, ndcg = self.get_hit_rate_and_ndcg(predictions, items, 3,\n match_mlperf=True)\n self.assertAlmostEqual(hr, 4 / 4)\n self.assertAlmostEqual(ndcg, (1 + math.log(2) / math.log(3) +\n 2 * math.log(2) / math.log(4)) / 4)\n\n # Test with duplicate items. In the MLPerf case, we treat the duplicates as\n # a single item. Otherwise, we treat the duplicates as separate items.\n predictions = np.array([\n [1., 2., 2., 3.], # In top 4. MLPerf: In top 3\n [3., 1., 0., 2.], # In top 1. MLPerf: In top 1\n [0., 2., 3., 2.], # In top 4. MLPerf: In top 3\n [3., 2., 4., 2.] # In top 2. MLPerf: In top 2\n ])\n items = np.array([\n [1, 2, 2, 3],\n [1, 2, 3, 4],\n [1, 2, 3, 2],\n [4, 3, 2, 1],\n ])\n hr, ndcg = self.get_hit_rate_and_ndcg(predictions, items, 1)\n self.assertAlmostEqual(hr, 1 / 4)\n self.assertAlmostEqual(ndcg, 1 / 4)\n\n hr, ndcg = self.get_hit_rate_and_ndcg(predictions, items, 2)\n self.assertAlmostEqual(hr, 2 / 4)\n self.assertAlmostEqual(ndcg, (1 + math.log(2) / math.log(3)) / 4)\n\n hr, ndcg = self.get_hit_rate_and_ndcg(predictions, items, 3)\n self.assertAlmostEqual(hr, 2 / 4)\n self.assertAlmostEqual(ndcg, (1 + math.log(2) / math.log(3)) / 4)\n\n hr, ndcg = self.get_hit_rate_and_ndcg(predictions, items, 4)\n self.assertAlmostEqual(hr, 4 / 4)\n self.assertAlmostEqual(ndcg, (1 + math.log(2) / math.log(3) +\n 2 * math.log(2) / math.log(5)) / 4)\n\n hr, ndcg = self.get_hit_rate_and_ndcg(predictions, items, 1,\n match_mlperf=True)\n self.assertAlmostEqual(hr, 1 / 4)\n self.assertAlmostEqual(ndcg, 1 / 4)\n\n hr, ndcg = self.get_hit_rate_and_ndcg(predictions, items, 2,\n match_mlperf=True)\n self.assertAlmostEqual(hr, 2 / 4)\n self.assertAlmostEqual(ndcg, (1 + math.log(2) / math.log(3)) / 4)\n\n hr, ndcg = self.get_hit_rate_and_ndcg(predictions, items, 3,\n match_mlperf=True)\n self.assertAlmostEqual(hr, 4 / 4)\n self.assertAlmostEqual(ndcg, (1 + math.log(2) / math.log(3) +\n 2 * math.log(2) / math.log(4)) / 4)\n\n hr, ndcg = self.get_hit_rate_and_ndcg(predictions, items, 4,\n match_mlperf=True)\n self.assertAlmostEqual(hr, 4 / 4)\n self.assertAlmostEqual(ndcg, (1 + math.log(2) / math.log(3) +\n 2 * math.log(2) / math.log(4)) / 4)\n\n # Test with duplicate items, where the predictions for the same item can\n # differ. In the MLPerf case, we should take the first prediction.\n predictions = np.array([\n [3., 2., 4., 4.], # In top 3. MLPerf: In top 2\n [3., 4., 2., 4.], # In top 3. MLPerf: In top 3\n [2., 3., 4., 1.], # In top 3. MLPerf: In top 2\n [4., 3., 5., 2.] # In top 2. MLPerf: In top 1\n ])\n items = np.array([\n [1, 2, 2, 3],\n [4, 3, 3, 2],\n [2, 1, 1, 1],\n [4, 2, 2, 1],\n ])\n hr, ndcg = self.get_hit_rate_and_ndcg(predictions, items, 1)\n self.assertAlmostEqual(hr, 0 / 4)\n self.assertAlmostEqual(ndcg, 0 / 4)\n\n hr, ndcg = self.get_hit_rate_and_ndcg(predictions, items, 2)\n self.assertAlmostEqual(hr, 1 / 4)\n self.assertAlmostEqual(ndcg, (math.log(2) / math.log(3)) / 4)\n\n hr, ndcg = self.get_hit_rate_and_ndcg(predictions, items, 3)\n self.assertAlmostEqual(hr, 4 / 4)\n self.assertAlmostEqual(ndcg, (math.log(2) / math.log(3) +\n 3 * math.log(2) / math.log(4)) / 4)\n\n hr, ndcg = self.get_hit_rate_and_ndcg(predictions, items, 4)\n self.assertAlmostEqual(hr, 4 / 4)\n self.assertAlmostEqual(ndcg, (math.log(2) / math.log(3) +\n 3 * math.log(2) / math.log(4)) / 4)\n\n hr, ndcg = self.get_hit_rate_and_ndcg(predictions, items, 1,\n match_mlperf=True)\n self.assertAlmostEqual(hr, 1 / 4)\n self.assertAlmostEqual(ndcg, 1 / 4)\n\n hr, ndcg = self.get_hit_rate_and_ndcg(predictions, items, 2,\n match_mlperf=True)\n self.assertAlmostEqual(hr, 3 / 4)\n self.assertAlmostEqual(ndcg, (1 + 2 * math.log(2) / math.log(3)) / 4)\n\n hr, ndcg = self.get_hit_rate_and_ndcg(predictions, items, 3,\n match_mlperf=True)\n self.assertAlmostEqual(hr, 4 / 4)\n self.assertAlmostEqual(ndcg, (1 + 2 * math.log(2) / math.log(3) +\n math.log(2) / math.log(4)) / 4)\n\n hr, ndcg = self.get_hit_rate_and_ndcg(predictions, items, 4,\n match_mlperf=True)\n self.assertAlmostEqual(hr, 4 / 4)\n self.assertAlmostEqual(ndcg, (1 + 2 * math.log(2) / math.log(3) +\n math.log(2) / math.log(4)) / 4)\n\n _BASE_END_TO_END_FLAGS = {\n \"batch_size\": 1024,\n \"train_epochs\": 1,\n \"use_synthetic_data\": True\n }\n\n @flagsaver.flagsaver(**_BASE_END_TO_END_FLAGS)\n @mock.patch.object(data_preprocessing, \"SYNTHETIC_BATCHES_PER_EPOCH\", 100)\n def test_end_to_end(self):\n ncf_main.main(None)\n\n @flagsaver.flagsaver(ml_perf=True, **_BASE_END_TO_END_FLAGS)\n @mock.patch.object(data_preprocessing, \"SYNTHETIC_BATCHES_PER_EPOCH\", 100)\n def test_end_to_end_mlperf(self):\n ncf_main.main(None)\n\n @flagsaver.flagsaver(use_estimator=False, **_BASE_END_TO_END_FLAGS)\n @mock.patch.object(data_preprocessing, \"SYNTHETIC_BATCHES_PER_EPOCH\", 100)\n def test_end_to_end_no_estimator(self):\n ncf_main.main(None)\n flags.FLAGS.ml_perf = True\n ncf_main.main(None)\n\n @flagsaver.flagsaver(use_estimator=False, **_BASE_END_TO_END_FLAGS)\n @mock.patch.object(data_preprocessing, \"SYNTHETIC_BATCHES_PER_EPOCH\", 100)\n def test_end_to_end_while_loop(self):\n # We cannot set use_while_loop = True in the flagsaver constructor, because\n # if the flagsaver sets it to True before setting use_estimator to False,\n # the flag validator will throw an error.\n flags.FLAGS.use_while_loop = True\n ncf_main.main(None)\n flags.FLAGS.ml_perf = True\n ncf_main.main(None)\n\n\nif __name__ == \"__main__\":\n tf.logging.set_verbosity(tf.logging.INFO)\n tf.test.main()\n"
] | [
[
"tensorflow.zeros",
"tensorflow.global_variables_initializer",
"tensorflow.logging.set_verbosity",
"tensorflow.Graph",
"numpy.array",
"tensorflow.local_variables_initializer",
"tensorflow.test.main"
]
] |
End-of-an-Era/PCN | [
"043c3063014166d831c07197d4e6748e824a5587"
] | [
"PCN/PyPCN.py"
] | [
"#!/usr/bin/python3\nfrom ctypes import *\nimport cv2\nimport numpy as np\nimport sys\nimport os\nimport time\nfrom ipdb import set_trace as dbg\nfrom enum import IntEnum\n\nclass CPoint(Structure):\n _fields_ = [(\"x\", c_int),\n (\"y\", c_int)]\n\nFEAT_POINTS = 14\nclass CWindow(Structure):\n _fields_ = [(\"x\", c_int),\n (\"y\", c_int),\n (\"width\", c_int),\n (\"angle\", c_int),\n (\"score\", c_float),\n (\"points\",CPoint*FEAT_POINTS)]\n\nclass FeatEnam(IntEnum):\n CHIN_0 = 0\n CHIN_1 = 1\n CHIN_2 = 2\n CHIN_3 = 3\n CHIN_4 = 4\n CHIN_5 = 5\n CHIN_6 = 6\n CHIN_7 = 7\n CHIN_8 = 8\n NOSE = 9\n EYE_LEFT = 10\n EYE_RIGHT = 11\n MOUTH_LEFT = 12\n MOUTH_RIGHT = 13\n FEAT_POINTS = 14\n\nlib = CDLL(\"/usr/local/lib/libPCN.so\")\n\ninit_detector = lib.init_detector\n#void *init_detector(const char *detection_model_path, \n# const char *pcn1_proto, const char *pcn2_proto, const char *pcn3_proto, \n# const char *tracking_model_path, const char *tracking_proto,\n# int min_face_size, float pyramid_scale_factor, float detection_thresh_stage1,\n# float detection_thresh_stage2, float detection_thresh_stage3, int tracking_period,\n# float tracking_thresh, int do_smooth)\ninit_detector.argtypes = [\n c_char_p, c_char_p, c_char_p, \n c_char_p, c_char_p, c_char_p,\n c_int,c_float,c_float,c_float,\n c_float,c_int,c_float,c_int]\ninit_detector.restype = c_void_p\n\n#CWindow* detect_faces(void* pcn, unsigned char* raw_img,size_t rows, size_t cols, int *lwin)\ndetect_faces = lib.detect_faces\ndetect_faces.argtypes = [c_void_p, POINTER(c_ubyte),c_size_t,c_size_t,POINTER(c_int)]\ndetect_faces.restype = POINTER(CWindow)\n\n#CWindow* detect_track_faces(void* pcn, unsigned char* raw_img,size_t rows, size_t cols, int *lwin)\ndetect_track_faces = lib.detect_track_faces\ndetect_track_faces.argtypes = [c_void_p, POINTER(c_ubyte),c_size_t,c_size_t,POINTER(c_int)]\ndetect_track_faces.restype = POINTER(CWindow)\n\n#void free_faces(CWindow* wins)\nfree_faces = lib.free_faces\nfree_faces.argtypes= [c_void_p]\n\n# void free_detector(void *pcn)\nfree_detector = lib.free_detector\nfree_detector.argtypes= [c_void_p]\n\nCYAN=(255,255,0)\nBLUE=(255,0,0)\nRED=(0,0,255)\nGREEN=(0,255,0)\nYELLOW=(0,255,255)\n\ndef DrawFace(win,img):\n width = 2\n x1 = win.x\n y1 = win.y\n x2 = win.width + win.x - 1\n y2 = win.width + win.y - 1\n centerX = (x1 + x2) / 2\n centerY = (y1 + y2) / 2\n angle = win.angle\n R = cv2.getRotationMatrix2D((centerX,centerY),angle,1)\n pts = np.array([[x1,y1,1],[x1,y2,1],[x2,y2,1],[x2,y1,1]], np.int32)\n pts = (pts @ R.T).astype(int) #Rotate points\n pts = pts.reshape((-1,1,2))\n cv2.polylines(img,[pts],True,CYAN,width)\n cv2.line(img, (pts[0][0][0],pts[0][0][1]), (pts[3][0][0],pts[3][0][1]), BLUE, width)\n \ndef DrawPoints(win,img):\n width = 2\n f = FeatEnam.NOSE\n cv2.circle(img,(win.points[f].x,win.points[f].y),width,GREEN,-1)\n f = FeatEnam.EYE_LEFT\n cv2.circle(img,(win.points[f].x,win.points[f].y),width,YELLOW,-1)\n f = FeatEnam.EYE_RIGHT\n cv2.circle(img,(win.points[f].x,win.points[f].y),width,YELLOW,-1)\n f = FeatEnam.MOUTH_LEFT\n cv2.circle(img,(win.points[f].x,win.points[f].y),width,RED,-1)\n f = FeatEnam.MOUTH_RIGHT\n cv2.circle(img,(win.points[f].x,win.points[f].y),width,RED,-1)\n for i in range(8):\n cv2.circle(img,(win.points[i].x,win.points[i].y),width,BLUE,-1)\n\ndef SetThreadCount(threads):\n os.environ['OMP_NUM_THREADS'] = str(threads)\n\ndef c_str(str_in):\n return c_char_p(str_in.encode('utf-8'))\n\nvideo_flag = 0\n\nif __name__==\"__main__\":\n\n SetThreadCount(1)\n path = '/usr/local/share/pcn/'\n detection_model_path = c_str(path + \"PCN.caffemodel\")\n pcn1_proto = c_str(path + \"PCN-1.prototxt\")\n pcn2_proto = c_str(path + \"PCN-2.prototxt\")\n pcn3_proto = c_str(path + \"PCN-3.prototxt\")\n tracking_model_path = c_str(path + \"PCN-Tracking.caffemodel\")\n tracking_proto = c_str(path + \"PCN-Tracking.prototxt\")\n if video_flag:\n cap = cv2.VideoCapture(0)\n detector = init_detector(detection_model_path,pcn1_proto,pcn2_proto,pcn3_proto,\n\t\t \ttracking_model_path,tracking_proto, \n\t\t \t40,1.45,0.5,0.5,0.98,30,0.9,1)\n width = cap.get(cv2.CAP_PROP_FRAME_WIDTH) \n height = cap.get(cv2.CAP_PROP_FRAME_HEIGHT) \n fps = cap.get(cv2.CAP_PROP_FPS) \n while cap.isOpened():\n ret, frame = cap.read()\n if ret == False:\n break\n start = time.time()\n face_count = c_int(0)\n raw_data = frame.ctypes.data_as(POINTER(c_ubyte))\n windows = detect_track_faces(detector, raw_data, \n int(height), int(width),\n pointer(face_count))\n end = time.time()\n for i in range(face_count.value):\n DrawFace(windows[i],frame)\n DrawPoints(windows[i],frame)\n free_faces(windows)\n fps = int(1 / (end - start))\n cv2.putText(frame, str(fps) + \"fps\", (20, 45), 4, 1, (0, 0, 125))\n cv2.imshow('PCN', frame)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n else:\n detector = init_detector(detection_model_path,pcn1_proto,pcn2_proto,pcn3_proto,\n\t\t \ttracking_model_path,tracking_proto, \n\t\t \t40,1.45,0.5,0.5,0.98,30,0.9,0)\n for i in range(1, 27):\n frame = cv2.imread(\"imgs/\" + str(i) + \".jpg\")\n start = time.time()\n face_count = c_int(0)\n raw_data = frame.ctypes.data_as(POINTER(c_ubyte))\n windows = detect_faces(detector, raw_data, \n frame.shape[0], frame.shape[1],\n pointer(face_count))\n end = time.time()\n print(i, end - start, \"s\")\n for i in range(face_count.value):\n DrawFace(windows[i],frame)\n DrawPoints(windows[i],frame)\n free_faces(windows)\n cv2.imshow('PCN', frame)\n cv2.waitKey()\n\n free_detector(detector)\n\n"
] | [
[
"numpy.array"
]
] |