repo_name
stringlengths 8
130
| hexsha
sequence | file_path
sequence | code
sequence | apis
sequence |
---|---|---|---|---|
adcrn/knest | [
"a274dc9ddb642cc30f837e225f000bf33430eb43"
] | [
"utils/compare.py"
] | [
"# UCF Senior Design 2017-18\n# Group 38\n\nfrom PIL import Image\nimport cv2\nimport imagehash\nimport math\nimport numpy as np\n\nDIFF_THRES = 20\nLIMIT = 2\nRESIZE = 1000\n\n\ndef calc_hash(img):\n \"\"\"\n Calculate the wavelet hash of the image\n img: (ndarray) image file\n \"\"\"\n # resize image if height > 1000\n img = resize(img)\n return imagehash.whash(Image.fromarray(img))\n\n\ndef compare(hash1, hash2):\n \"\"\"\n Calculate the difference between two images\n hash1: (array) first wavelet hash\n hash2: (array) second wavelet hash\n \"\"\"\n return hash1 - hash2\n\n\ndef limit(img, std_hash, count):\n \"\"\"\n Determine whether image should be removed from image dictionary in main.py\n img: (ndarray) image file\n std_hash: (array) wavelet hash of comparison standard\n count: (int) global count of images similar to comparison standard\n \"\"\"\n # calculate hash for given image\n cmp_hash = calc_hash(img)\n\n # compare to standard\n diff = compare(std_hash, cmp_hash)\n\n # image is similar to standard\n if diff <= DIFF_THRES:\n # if there are 3 similar images already, remove image\n if count >= LIMIT:\n return 'remove'\n\n # non-similar image found\n else:\n # update comparison standard\n return 'update_std'\n\n # else continue reading images with same standard\n return 'continue'\n\n\ndef resize(img):\n \"\"\"\n Resize an image\n img: (ndarray) RGB color image\n \"\"\"\n # get dimensions of image\n width = np.shape(img)[1]\n height = np.shape(img)[0]\n\n # if height of image is greater than 1000, resize it to 1000\n if width > RESIZE:\n # keep resize proportional\n scale = RESIZE / width\n resized_img = cv2.resize(\n img, (RESIZE, math.floor(height / scale)), cv2.INTER_AREA)\n # return resized image\n return resized_img\n\n # if height of image is less than 1000, return image unresized\n return img\n\n\ndef set_standard(images, filename):\n \"\"\"\n Set new comparison standard and update information\n images: (dictionary) dictionary containing all the image data\n filename: (String) name of the image file\n \"\"\"\n return filename, calc_hash(images[filename]), 0\n"
] | [
[
"numpy.shape"
]
] |
dongmengshi/easylearn | [
"df528aaa69c3cf61f5459a04671642eb49421dfb",
"df528aaa69c3cf61f5459a04671642eb49421dfb"
] | [
"eslearn/utils/lc_featureSelection_variance.py",
"eslearn/machine_learning/test/GCNNCourseCodes/metrics.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Jul 24 14:38:20 2018\ndimension reduction with VarianceThreshold using sklearn.\nFeature selector that removes all low-variance features.\n@author: lenovo\n\"\"\"\nfrom sklearn.feature_selection import VarianceThreshold\nimport numpy as np\n#\nnp.random.seed(1)\nX = np.random.randn(100, 10)\nX = np.hstack([X, np.zeros([100, 5])])\n#\n\n\ndef featureSelection_variance(X, thrd):\n sel = VarianceThreshold(threshold=thrd)\n X_selected = sel.fit_transform(X)\n mask = sel.get_support()\n return X_selected, mask\n\n\nX = [[0, 2, 0, 3], [0, 1, 4, 3], [0, 1, 1, 3]]\nselector = VarianceThreshold()\nselector.fit_transform(X)\nselector.variances_\n",
"import tensorflow as tf\n\n\ndef masked_softmax_cross_entropy(preds, labels, mask):\n \"\"\"Softmax cross-entropy loss with masking.\"\"\"\n loss = tf.nn.softmax_cross_entropy_with_logits(logits=preds, labels=labels) \n mask = tf.cast(mask, dtype=tf.float32)\n mask /= tf.reduce_mean(mask)\n loss *= mask\n return tf.reduce_mean(loss)\n\ndef sigmoid_cross_entropy(preds, labels):\n \"\"\"Softmax cross-entropy loss with masking.\"\"\"\n loss = tf.nn.sigmoid_cross_entropy_with_logits(logits=preds, labels=labels) \n return tf.reduce_mean(loss)\n\ndef softmax_cross_entropy(preds, labels):\n \"\"\"Softmax cross-entropy loss with masking.\"\"\"\n loss = tf.nn.softmax_cross_entropy_with_logits(logits=preds, labels=labels) \n return tf.reduce_mean(loss)\n\ndef masked_accuracy(preds, labels, mask):\n \"\"\"Accuracy with masking.\"\"\"\n correct_prediction = tf.equal(tf.argmax(preds, 1), tf.argmax(labels, 1))\n accuracy_all = tf.cast(correct_prediction, tf.float32)\n mask = tf.cast(mask, dtype=tf.float32)\n mask /= tf.reduce_mean(mask)\n accuracy_all *= mask\n return tf.reduce_mean(accuracy_all)\n\ndef inductive_multiaccuracy(preds, labels):\n \"\"\"Accuracy with masking.\"\"\"\n\n correct_prediction = tf.equal(tf.argmax(preds, 1), tf.argmax(labels, 1)) \n return tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n\n\ndef inductive_accuracy(preds, labels):\n \"\"\"Accuracy with masking.\"\"\"\n\n predicted = tf.nn.sigmoid(preds)\n correct_pred = tf.equal(tf.round(predicted), labels)\n accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32)) \n return accuracy\n"
] | [
[
"numpy.random.randn",
"numpy.random.seed",
"numpy.zeros",
"sklearn.feature_selection.VarianceThreshold"
],
[
"tensorflow.nn.sigmoid_cross_entropy_with_logits",
"tensorflow.nn.sigmoid",
"tensorflow.nn.softmax_cross_entropy_with_logits",
"tensorflow.round",
"tensorflow.reduce_mean",
"tensorflow.cast",
"tensorflow.argmax"
]
] |
silent567/examples | [
"e9de12549125ecd93a4924f6b8e2bbf66d7635d9"
] | [
"mnist/my_multi_tune3.py"
] | [
"#!/usr/bin/env python\n# coding=utf-8\n\nfrom my_multi_main3 import main\nimport numpy as np\nimport argparse\nimport time\n\nparser = argparse.ArgumentParser(description='PyTorch MNIST Example')\nparser.add_argument('--batch-size', type=int, default=64, metavar='N',\n help='input batch size for training (default: 64)')\nparser.add_argument('--test-batch-size', type=int, default=1000, metavar='N',\n help='input batch size for testing (default: 1000)')\nparser.add_argument('--epochs', type=int, default=10, metavar='N',\n help='number of epochs to train (default: 10)')\nparser.add_argument('--lr', type=float, default=0.01, metavar='LR',\n help='learning rate (default: 0.01)')\nparser.add_argument('--momentum', type=float, default=0.5, metavar='M',\n help='SGD momentum (default: 0.5)')\nparser.add_argument('--no-cuda', action='store_true', default=False,\n help='disables CUDA training')\nparser.add_argument('--seed', type=int, default=1, metavar='S',\n help='random seed (default: 1)')\nparser.add_argument('--log-interval', type=int, default=10, metavar='N',\n help='how many batches to wait before logging training status')\nparser.add_argument('--save-model', action='store_true', default=False,\n help='For Saving the current Model')\nparser.add_argument('--norm-flag', type=bool, default=False,\n help='Triggering the Layer Normalization flag for attention scores')\nparser.add_argument('--gamma', type=float, default=None,\n help='Controlling the sparisty of gfusedmax/sparsemax, the smaller, the more sparse')\nparser.add_argument('--lam', type=float, default=1.0,\n help='Lambda: Controlling the smoothness of gfusedmax, the larger, the smoother')\nparser.add_argument('--max-type', type=str, default='softmax',choices=['softmax','sparsemax','gfusedmax'],\n help='mapping function in attention')\nparser.add_argument('--optim-type', type=str, default='SGD',choices=['SGD','Adam'],\n help='mapping function in attention')\nparser.add_argument('--head-cnt', type=int, default=2, metavar='S', choices=[1,2,4,5,10],\n help='Number of heads for attention (default: 1)')\n\nargs = parser.parse_args()\n\nhyperparameter_choices = {\n 'lr':list(10**np.arange(-4,-1,0.5)),\n 'norm_flag': [True,False],\n 'gamma':list(10**np.arange(-1,3,0.5))+[None,],\n 'lam':list(10**np.arange(-2,2,0.5)),\n 'max_type':['softmax','sparsemax','gfusedmax'],\n # 'max_type':['sparsemax'],\n 'optim_type':['SGD','Adam'],\n 'head_cnt':[1,2,4,5,10,20]\n}\n\nparam_num = 25\nrecord = np.zeros([param_num,len(hyperparameter_choices)+1])\nrecord_name = 'record3_multi_%s.csv'%time.strftime('%Y-%m-%d_%H-%M-%S',time.localtime())\nfor n in range(param_num):\n for param_index,(k,v) in enumerate(hyperparameter_choices.items()):\n print(param_index,k)\n value_index = np.random.choice(len(v))\n if isinstance(v[value_index],str) or isinstance(v[value_index],bool) or v[value_index] is None:\n record[n,param_index] = value_index\n else:\n record[n,param_index] = v[value_index]\n setattr(args,k,v[value_index])\n record[n,-1] = main(args)\n np.savetxt(record_name, record, delimiter=',')\n\n\n\n"
] | [
[
"numpy.arange",
"numpy.savetxt"
]
] |
neonbjb/DL-Art-School | [
"a6f0f854b987ac724e258af8b042ea4459a571bc"
] | [
"codes/data/image_corruptor.py"
] | [
"import functools\nimport random\nfrom math import cos, pi\n\nimport cv2\nimport kornia\nimport numpy as np\nimport torch\nfrom kornia.augmentation import ColorJitter\n\nfrom data.util import read_img\nfrom PIL import Image\nfrom io import BytesIO\n\n\n# Get a rough visualization of the above distribution. (Y-axis is meaningless, just spreads data)\nfrom utils.util import opt_get\n\n'''\nif __name__ == '__main__':\n import numpy as np\n import matplotlib.pyplot as plt\n data = np.asarray([get_rand() for _ in range(5000)])\n plt.plot(data, np.random.uniform(size=(5000,)), 'x')\n plt.show()\n'''\n\n\ndef kornia_color_jitter_numpy(img, setting):\n if setting * 255 > 1:\n # I'm using Kornia's ColorJitter, which requires pytorch arrays in b,c,h,w format.\n img = torch.from_numpy(img).permute(2,0,1).unsqueeze(0)\n img = ColorJitter(setting, setting, setting, setting)(img)\n img = img.squeeze(0).permute(1,2,0).numpy()\n return img\n\n\n# Performs image corruption on a list of images from a configurable set of corruption\n# options.\nclass ImageCorruptor:\n def __init__(self, opt):\n self.opt = opt\n self.reset_random()\n self.blur_scale = opt['corruption_blur_scale'] if 'corruption_blur_scale' in opt.keys() else 1\n self.fixed_corruptions = opt['fixed_corruptions'] if 'fixed_corruptions' in opt.keys() else []\n self.num_corrupts = opt['num_corrupts_per_image'] if 'num_corrupts_per_image' in opt.keys() else 0\n self.cosine_bias = opt_get(opt, ['cosine_bias'], True)\n if self.num_corrupts == 0:\n return\n else:\n self.random_corruptions = opt['random_corruptions'] if 'random_corruptions' in opt.keys() else []\n\n def reset_random(self):\n if 'random_seed' in self.opt.keys():\n self.rand = random.Random(self.opt['random_seed'])\n else:\n self.rand = random.Random()\n\n # Feeds a random uniform through a cosine distribution to slightly bias corruptions towards \"uncorrupted\".\n # Return is on [0,1] with a bias towards 0.\n def get_rand(self):\n r = self.rand.random()\n if self.cosine_bias:\n return 1 - cos(r * pi / 2)\n else:\n return r\n\n def corrupt_images(self, imgs, return_entropy=False):\n if self.num_corrupts == 0 and not self.fixed_corruptions:\n if return_entropy:\n return imgs, []\n else:\n return imgs\n\n if self.num_corrupts == 0:\n augmentations = []\n else:\n augmentations = random.choices(self.random_corruptions, k=self.num_corrupts)\n\n # Sources of entropy\n corrupted_imgs = []\n entropy = []\n undo_fns = []\n applied_augs = augmentations + self.fixed_corruptions\n for img in imgs:\n for aug in augmentations:\n r = self.get_rand()\n img, undo_fn = self.apply_corruption(img, aug, r, applied_augs)\n if undo_fn is not None:\n undo_fns.append(undo_fn)\n for aug in self.fixed_corruptions:\n r = self.get_rand()\n img, undo_fn = self.apply_corruption(img, aug, r, applied_augs)\n entropy.append(r)\n if undo_fn is not None:\n undo_fns.append(undo_fn)\n # Apply undo_fns after all corruptions are finished, in same order.\n for ufn in undo_fns:\n img = ufn(img)\n corrupted_imgs.append(img)\n\n\n if return_entropy:\n return corrupted_imgs, entropy\n else:\n return corrupted_imgs\n\n def apply_corruption(self, img, aug, rand_val, applied_augmentations):\n undo_fn = None\n if 'color_quantization' in aug:\n # Color quantization\n quant_div = 2 ** (int(rand_val * 10 / 3) + 2)\n img = img * 255\n img = (img // quant_div) * quant_div\n img = img / 255\n elif 'color_jitter' in aug:\n lo_end = 0\n hi_end = .2\n setting = rand_val * (hi_end - lo_end) + lo_end\n img = kornia_color_jitter_numpy(img, setting)\n elif 'gaussian_blur' in aug:\n img = cv2.GaussianBlur(img, (0,0), self.blur_scale*rand_val*1.5)\n elif 'motion_blur' in aug:\n # Motion blur\n intensity = self.blur_scale*rand_val * 3 + 1\n angle = random.randint(0,360)\n k = np.zeros((intensity, intensity), dtype=np.float32)\n k[(intensity - 1) // 2, :] = np.ones(intensity, dtype=np.float32)\n k = cv2.warpAffine(k, cv2.getRotationMatrix2D((intensity / 2 - 0.5, intensity / 2 - 0.5), angle, 1.0),\n (intensity, intensity))\n k = k * (1.0 / np.sum(k))\n img = cv2.filter2D(img, -1, k)\n elif 'block_noise' in aug:\n # Large distortion blocks in part of an img, such as is used to mask out a face.\n pass\n elif 'lq_resampling' in aug:\n # Random mode interpolation HR->LR->HR\n if 'lq_resampling4x' == aug:\n scale = 4\n else:\n if rand_val < .3:\n scale = 1\n elif rand_val < .7:\n scale = 2\n else:\n scale = 4\n if scale > 1:\n interpolation_modes = [cv2.INTER_NEAREST, cv2.INTER_CUBIC, cv2.INTER_LINEAR, cv2.INTER_LANCZOS4]\n mode = random.randint(0,4) % len(interpolation_modes)\n # Downsample first, then upsample using the random mode.\n img = cv2.resize(img, dsize=(img.shape[1]//scale, img.shape[0]//scale), interpolation=mode)\n def lq_resampling_undo_fn(scale, img):\n return cv2.resize(img, dsize=(img.shape[1]*scale, img.shape[0]*scale), interpolation=cv2.INTER_LINEAR)\n undo_fn = functools.partial(lq_resampling_undo_fn, scale)\n elif 'color_shift' in aug:\n # Color shift\n pass\n elif 'interlacing' in aug:\n # Interlacing distortion\n pass\n elif 'chromatic_aberration' in aug:\n # Chromatic aberration\n pass\n elif 'noise' in aug:\n # Random noise\n if 'noise-5' == aug:\n noise_intensity = 5 / 255.0\n else:\n noise_intensity = (rand_val*6) / 255.0\n img += np.random.rand(*img.shape) * noise_intensity\n elif 'jpeg' in aug:\n if 'noise' not in applied_augmentations and 'noise-5' not in applied_augmentations:\n if aug == 'jpeg':\n lo=10\n range=20\n elif aug == 'jpeg-low':\n lo=15\n range=10\n elif aug == 'jpeg-medium':\n lo=23\n range=25\n elif aug == 'jpeg-broad':\n lo=15\n range=60\n elif aug == 'jpeg-normal':\n lo=47\n range=35\n else:\n raise NotImplementedError(\"specified jpeg corruption doesn't exist\")\n # JPEG compression\n qf = (int((1-rand_val)*range) + lo)\n # Use PIL to perform a mock compression to a data buffer, then swap back to cv2.\n img = (img * 255).astype(np.uint8)\n img = Image.fromarray(img)\n buffer = BytesIO()\n img.save(buffer, \"JPEG\", quality=qf, optimize=True)\n buffer.seek(0)\n jpeg_img_bytes = np.asarray(bytearray(buffer.read()), dtype=\"uint8\")\n img = read_img(\"buffer\", jpeg_img_bytes, rgb=True)\n elif 'saturation' in aug:\n # Lightening / saturation\n saturation = rand_val * .3\n img = np.clip(img + saturation, a_max=1, a_min=0)\n elif 'greyscale' in aug:\n img = np.tile(np.mean(img, axis=2, keepdims=True), [1,1,3])\n elif 'none' not in aug:\n raise NotImplementedError(\"Augmentation doesn't exist\")\n\n return img, undo_fn\n"
] | [
[
"numpy.ones",
"numpy.sum",
"numpy.zeros",
"torch.from_numpy",
"numpy.clip",
"numpy.random.rand",
"numpy.mean"
]
] |
pclucas14/continuum | [
"09034db1371e9646ca660fd4d4df73e61bf77067"
] | [
"tests/test_background_swap.py"
] | [
"import os\n\nfrom torch.utils.data import DataLoader\nfrom continuum.datasets import CIFAR10, InMemoryDataset\nfrom continuum.datasets import MNIST\nimport torchvision\nfrom continuum.scenarios import TransformationIncremental\nimport pytest\nimport numpy as np\n\nfrom continuum.transforms.bg_swap import BackgroundSwap\n\nDATA_PATH = os.environ.get(\"CONTINUUM_DATA_PATH\")\n\n# Uncomment for debugging via image output\n# import matplotlib.pyplot as plt\n\n\ndef test_bg_swap_fast():\n \"\"\"\n Fast test for background swap.\n \"\"\"\n bg_x = np.ones(shape=[2, 5, 5, 3]) * -1\n bg_y = np.random.rand(2)\n\n fg = np.random.normal(loc=.5, scale=.1, size=[5, 5])\n bg = InMemoryDataset(bg_x, bg_y)\n\n bg_swap = BackgroundSwap(bg, input_dim=(5, 5), normalize_bg=None)\n\n spliced_1_channel = bg_swap(fg)[:, :, 0]\n\n assert np.array_equal((spliced_1_channel <= -1), (fg <= .5))\n\n\n@pytest.mark.slow\ndef test_background_swap_numpy():\n \"\"\"\n Test background swap on a single ndarray input.\n \"\"\"\n mnist = MNIST(DATA_PATH, download=True, train=True)\n cifar = CIFAR10(DATA_PATH, download=True, train=True)\n\n bg_swap = BackgroundSwap(cifar, input_dim=(28, 28))\n\n im = mnist.get_data()[0][0]\n im = bg_swap(im)\n\n # Uncomment for debugging\n # plt.imshow(im, interpolation='nearest')\n # plt.show()\n\n\n@pytest.mark.slow\ndef test_background_swap_torch():\n \"\"\"\n Test background swap on a single tensor input.\n \"\"\"\n cifar = CIFAR10(DATA_PATH, download=True, train=True)\n\n mnist = torchvision.datasets.MNIST(DATA_PATH, train=True, download=True,\n transform=torchvision.transforms.Compose([\n torchvision.transforms.ToTensor()\n ]))\n\n bg_swap = BackgroundSwap(cifar, input_dim=(28, 28))\n im = mnist[0][0]\n\n im = bg_swap(im)\n\n # Uncomment for debugging\n # plt.imshow(im.permute(1, 2, 0), interpolation='nearest')\n # plt.show()\n\n\n@pytest.mark.slow\ndef test_background_tranformation():\n \"\"\"\n Example code using TransformationIncremental to create a setting with 3 tasks.\n \"\"\"\n cifar = CIFAR10(DATA_PATH, train=True)\n mnist = MNIST(DATA_PATH, download=False, train=True)\n nb_task = 3\n list_trsf = []\n for i in range(nb_task):\n list_trsf.append([torchvision.transforms.ToTensor(), BackgroundSwap(cifar, bg_label=i, input_dim=(28, 28)),\n torchvision.transforms.ToPILImage()])\n scenario = TransformationIncremental(mnist, base_transformations=[torchvision.transforms.ToTensor()],\n incremental_transformations=list_trsf)\n folder = \"tests/samples/background_trsf/\"\n if not os.path.exists(folder):\n os.makedirs(folder)\n for task_id, task_data in enumerate(scenario):\n task_data.plot(path=folder, title=f\"background_{task_id}.jpg\", nb_samples=100, shape=[28, 28, 3])\n loader = DataLoader(task_data)\n _, _, _ = next(iter(loader))\n"
] | [
[
"numpy.ones",
"torch.utils.data.DataLoader",
"numpy.random.rand",
"numpy.array_equal",
"numpy.random.normal"
]
] |
g-nightingale/tox_examples | [
"d7714375c764580b4b8af9db61332ced4e851def"
] | [
"packaging/squarer/ml_squarer.py"
] | [
"import numpy as np\n\n\ndef train_ml_squarer() -> None:\n print(\"Training!\")\n\n\ndef square() -> int:\n \"\"\"Square a number...maybe\"\"\"\n return np.random.randint(1, 100)\n\n\nif __name__ == '__main__':\n train_ml_squarer()"
] | [
[
"numpy.random.randint"
]
] |
GOOGLE-M/SGC | [
"78ad8d02b80808302e38559e2d0f430f66a809bd"
] | [
"venv/lib/python3.7/site-packages/torch/utils/benchmark/utils/timer.py"
] | [
"\"\"\"Timer class based on the timeit.Timer class, but torch aware.\"\"\"\nimport enum\nimport timeit\nimport textwrap\nfrom typing import Any, Callable, Dict, List, NoReturn, Optional, Type, Union\n\nimport numpy as np\nimport torch\nfrom torch.utils.benchmark.utils import common, cpp_jit\nfrom torch.utils.benchmark.utils._stubs import TimerClass, TimeitModuleType\nfrom torch.utils.benchmark.utils.valgrind_wrapper import timer_interface as valgrind_timer_interface\n\n\n__all__ = [\"Timer\", \"timer\", \"Language\"]\n\n\nif torch.has_cuda and torch.cuda.is_available():\n def timer() -> float:\n torch.cuda.synchronize()\n return timeit.default_timer()\nelse:\n timer = timeit.default_timer\n\n\nclass Language(enum.Enum):\n PYTHON = 0\n CPP = 1\n\n\nclass CPPTimer:\n def __init__(\n self,\n stmt: str,\n setup: str,\n timer: Callable[[], float],\n globals: Dict[str, Any],\n ) -> None:\n if timer is not timeit.default_timer:\n raise NotImplementedError(\n \"PyTorch was built with CUDA and a GPU is present; however \"\n \"Timer does not yet support GPU measurements. If your \"\n \"code is CPU only, pass `timer=timeit.default_timer` to the \"\n \"Timer's constructor to indicate this. (Note that this will \"\n \"produce incorrect results if the GPU is in fact used, as \"\n \"Timer will not synchronize CUDA.)\"\n )\n\n if globals:\n raise ValueError(\"C++ timing does not support globals.\")\n\n self._stmt: str = textwrap.dedent(stmt)\n self._setup: str = textwrap.dedent(setup)\n self._timeit_module: Optional[TimeitModuleType] = None\n\n def timeit(self, number: int) -> float:\n if self._timeit_module is None:\n self._timeit_module = cpp_jit.compile_timeit_template(\n self._stmt,\n self._setup,\n )\n\n return self._timeit_module.timeit(number)\n\n\nclass Timer(object):\n \"\"\"Helper class for measuring execution time of PyTorch statements.\n\n For a full tutorial on how to use this class, see:\n https://pytorch.org/tutorials/recipes/recipes/benchmark.html\n\n The PyTorch Timer is based on `timeit.Timer` (and in fact uses\n `timeit.Timer` internally), but with several key differences:\n\n 1) Runtime aware:\n Timer will perform warmups (important as some elements of PyTorch are\n lazily initialized), set threadpool size so that comparisons are\n apples-to-apples, and synchronize asynchronous CUDA functions when\n necessary.\n\n 2) Focus on replicates:\n When measuring code, and particularly complex kernels / models,\n run-to-run variation is a significant confounding factor. It is\n expected that all measurements should include replicates to quantify\n noise and allow median computation, which is more robust than mean.\n To that effect, this class deviates from the `timeit` API by\n conceptually merging `timeit.Timer.repeat` and `timeit.Timer.autorange`.\n (Exact algorithms are discussed in method docstrings.) The `timeit`\n method is replicated for cases where an adaptive strategy is not\n desired.\n\n 3) Optional metadata:\n When defining a Timer, one can optionally specify `label`, `sub_label`,\n `description`, and `env`. (Defined later) These fields are included in\n the representation of result object and by the `Compare` class to group\n and display results for comparison.\n\n 4) Instruction counts\n In addition to wall times, Timer can run a statement under Callgrind\n and report instructions executed.\n\n Directly analogous to `timeit.Timer` constructor arguments:\n\n `stmt`, `setup`, `timer`, `globals`\n\n PyTorch Timer specific constructor arguments:\n\n `label`, `sub_label`, `description`, `env`, `num_threads`\n\n Args:\n stmt: Code snippet to be run in a loop and timed.\n\n setup: Optional setup code. Used to define variables used in `stmt`\n\n timer:\n Callable which returns the current time. If PyTorch was built\n without CUDA or there is no GPU present, this defaults to\n `timeit.default_timer`; otherwise it will synchronize CUDA before\n measuring the time.\n\n globals:\n A dict which defines the global variables when `stmt` is being\n executed. This is the other method for providing variables which\n `stmt` needs.\n\n label:\n String which summarizes `stmt`. For instance, if `stmt` is\n \"torch.nn.functional.relu(torch.add(x, 1, out=out))\"\n one might set label to \"ReLU(x + 1)\" to improve readability.\n\n sub_label:\n Provide supplemental information to disambiguate measurements\n with identical stmt or label. For instance, in our example\n above sub_label might be \"float\" or \"int\", so that it is easy\n to differentiate:\n \"ReLU(x + 1): (float)\"\n\n \"ReLU(x + 1): (int)\"\n when printing Measurements or summarizing using `Compare`.\n\n description:\n String to distinguish measurements with identical label and\n sub_label. The principal use of `description` is to signal to\n `Compare` the columns of data. For instance one might set it\n based on the input size to create a table of the form: ::\n\n | n=1 | n=4 | ...\n ------------- ...\n ReLU(x + 1): (float) | ... | ... | ...\n ReLU(x + 1): (int) | ... | ... | ...\n\n\n using `Compare`. It is also included when printing a Measurement.\n\n env:\n This tag indicates that otherwise identical tasks were run in\n different environments, and are therefore not equivilent, for\n instance when A/B testing a change to a kernel. `Compare` will\n treat Measurements with different `env` specification as distinct\n when merging replicate runs.\n\n num_threads:\n The size of the PyTorch threadpool when executing `stmt`. Single\n threaded performace is important as both a key inference workload\n and a good indicator of intrinsic algorithmic efficiency, so the\n default is set to one. This is in contrast to the default PyTorch\n threadpool size which tries to utilize all cores.\n \"\"\"\n\n _timer_cls: Type[TimerClass] = timeit.Timer\n\n def __init__(\n self,\n stmt: str = \"pass\",\n setup: str = \"pass\",\n timer: Callable[[], float] = timer,\n globals: Optional[Dict[str, Any]] = None,\n label: Optional[str] = None,\n sub_label: Optional[str] = None,\n description: Optional[str] = None,\n env: Optional[str] = None,\n num_threads: int = 1,\n language: Union[Language, str] = Language.PYTHON,\n ):\n if not isinstance(stmt, str):\n raise ValueError(\"Currently only a `str` stmt is supported.\")\n\n # We copy `globals` to prevent mutations from leaking.\n # (For instance, `eval` adds the `__builtins__` key)\n self._globals = dict(globals or {})\n if language in (Language.PYTHON, \"py\", \"python\"):\n # Include `torch` if not specified as a convenience feature.\n self._globals.setdefault(\"torch\", torch)\n self._language: Language = Language.PYTHON\n\n elif language in (Language.CPP, \"cpp\", \"c++\"):\n assert self._timer_cls is timeit.Timer, \"_timer_cls has already been swapped.\"\n self._timer_cls = CPPTimer\n setup = (\"\" if setup == \"pass\" else setup)\n self._language = Language.CPP\n\n else:\n raise ValueError(f\"Invalid language `{language}`.\")\n\n # Convenience adjustment so that multi-line code snippets defined in\n # functions do not IndentationError (Python) or look odd (C++). The\n # leading newline removal is for the initial newline that appears when\n # defining block strings. For instance:\n # textwrap.dedent(\"\"\"\n # print(\"This is a stmt\")\n # \"\"\")\n # produces '\\nprint(\"This is a stmt\")\\n'.\n #\n # Stripping this down to 'print(\"This is a stmt\")' doesn't change\n # what gets executed, but it makes __repr__'s nicer.\n stmt = textwrap.dedent(stmt)\n stmt = (stmt[1:] if stmt and stmt[0] == \"\\n\" else stmt).rstrip()\n setup = textwrap.dedent(setup)\n setup = (setup[1:] if setup and setup[0] == \"\\n\" else setup).rstrip()\n\n self._timer = self._timer_cls(\n stmt=stmt,\n setup=setup,\n timer=timer,\n globals=valgrind_timer_interface.CopyIfCallgrind.unwrap_all(self._globals),\n )\n self._task_spec = common.TaskSpec(\n stmt=stmt,\n setup=setup,\n label=label,\n sub_label=sub_label,\n description=description,\n env=env,\n num_threads=num_threads,\n )\n\n def timeit(self, number: int = 1000000) -> common.Measurement:\n \"\"\"Mirrors the semantics of timeit.Timer.timeit().\n\n Execute the main statement (`stmt`) `number` times.\n https://docs.python.org/3/library/timeit.html#timeit.Timer.timeit\n \"\"\"\n with common.set_torch_threads(self._task_spec.num_threads):\n # Warmup\n self._timer.timeit(number=max(int(number // 100), 1))\n\n return common.Measurement(\n number_per_run=number,\n raw_times=[self._timer.timeit(number=number)],\n task_spec=self._task_spec\n )\n\n def repeat(self, repeat: int = -1, number: int = -1) -> None:\n raise NotImplementedError(\"See `Timer.blocked_autorange.`\")\n\n def autorange(self, callback: Optional[Callable[[int, float], NoReturn]] = None) -> None:\n raise NotImplementedError(\"See `Timer.blocked_autorange.`\")\n\n def _threaded_measurement_loop(\n self,\n number: int,\n time_hook: Callable[[], float],\n stop_hook: Callable[[List[float]], bool],\n min_run_time: float,\n max_run_time: Optional[float] = None,\n callback: Optional[Callable[[int, float], NoReturn]] = None\n ) -> List[float]:\n total_time = 0.0\n can_stop = False\n times: List[float] = []\n with common.set_torch_threads(self._task_spec.num_threads):\n while (total_time < min_run_time) or (not can_stop):\n time_spent = time_hook()\n times.append(time_spent)\n total_time += time_spent\n if callback:\n callback(number, time_spent)\n can_stop = stop_hook(times)\n if max_run_time and total_time > max_run_time:\n break\n return times\n\n def _estimate_block_size(self, min_run_time: float) -> int:\n with common.set_torch_threads(self._task_spec.num_threads):\n # Estimate the block size needed for measurement to be negligible\n # compared to the inner loop. This also serves as a warmup.\n overhead = np.median([self._timer.timeit(0) for _ in range(5)])\n number = 1\n while True:\n time_taken = self._timer.timeit(number)\n relative_overhead = overhead / time_taken\n if relative_overhead <= 1e-4 and time_taken >= min_run_time / 1000:\n break\n if time_taken > min_run_time:\n break\n number *= 10\n return number\n\n def adaptive_autorange(\n self,\n threshold: float = 0.1,\n *,\n min_run_time: float = 0.01,\n max_run_time: float = 10.0,\n callback: Optional[Callable[[int, float], NoReturn]] = None,\n ) -> common.Measurement:\n number = self._estimate_block_size(min_run_time=0.05)\n\n def time_hook() -> float:\n return self._timer.timeit(number)\n\n def stop_hook(times: List[float]) -> bool:\n if len(times) > 3:\n return common.Measurement(\n number_per_run=number,\n raw_times=times,\n task_spec=self._task_spec\n ).meets_confidence(threshold=threshold)\n return False\n times = self._threaded_measurement_loop(\n number, time_hook, stop_hook, min_run_time, max_run_time, callback=callback)\n\n return common.Measurement(\n number_per_run=number,\n raw_times=times,\n task_spec=self._task_spec\n )\n\n def blocked_autorange(\n self,\n callback: Optional[Callable[[int, float], NoReturn]] = None,\n min_run_time: float = 0.2,\n ) -> common.Measurement:\n \"\"\"Measure many replicates while keeping timer overhead to a minimum.\n\n At a high level, blocked_autorange executes the following pseudo-code::\n\n `setup`\n\n total_time = 0\n while total_time < min_run_time\n start = timer()\n for _ in range(block_size):\n `stmt`\n total_time += (timer() - start)\n\n Note the variable `block_size` in the inner loop. The choice of block\n size is important to measurement quality, and must balance two\n competing objectives:\n\n 1) A small block size results in more replicates and generally\n better statistics.\n\n 2) A large block size better amortizes the cost of `timer`\n invocation, and results in a less biased measurement. This is\n important because CUDA syncronization time is non-trivial\n (order single to low double digit microseconds) and would\n otherwise bias the measurement.\n\n blocked_autorange sets block_size by running a warmup period,\n increasing block size until timer overhead is less than 0.1% of\n the overall computation. This value is then used for the main\n measurement loop.\n\n Returns:\n A `Measurement` object that contains measured runtimes and\n repetition counts, and can be used to compute statistics.\n (mean, median, etc.)\n \"\"\"\n number = self._estimate_block_size(min_run_time)\n\n def time_hook() -> float:\n return self._timer.timeit(number)\n\n def stop_hook(times: List[float]) -> bool:\n return True\n\n times = self._threaded_measurement_loop(\n number, time_hook, stop_hook,\n min_run_time=min_run_time,\n callback=callback)\n\n return common.Measurement(\n number_per_run=number,\n raw_times=times,\n task_spec=self._task_spec\n )\n\n def collect_callgrind(\n self,\n number: int = 100,\n collect_baseline: bool = True\n ) -> valgrind_timer_interface.CallgrindStats:\n \"\"\"Collect instruction counts using Callgrind.\n\n Unlike wall times, instruction counts are deterministic\n (modulo non-determinism in the program itself and small amounts of\n jitter from the Python interpreter.) This makes them ideal for detailed\n performance analysis. This method runs `stmt` in a separate process\n so that Valgrind can instrument the program. Performance is severely\n degraded due to the instrumentation, howevever this is ameliorated by\n the fact that a small number of iterations is generally sufficient to\n obtain good measurements.\n\n In order to to use this method `valgrind`, `callgrind_control`, and\n `callgrind_annotate` must be installed.\n\n Because there is a process boundary between the caller (this process)\n and the `stmt` execution, `globals` cannot contain arbitrary in-memory\n data structures. (Unlike timing methods) Instead, globals are\n restricted to builtins, `nn.Modules`'s, and TorchScripted functions/modules\n to reduce the surprise factor from serialization and subsequent\n deserialization. The `GlobalsBridge` class provides more detail on this\n subject. Take particular care with nn.Modules: they rely on pickle and\n you may need to add an import to `setup` for them to transfer properly.\n\n By default, a profile for an empty statement will be collected and\n cached to indicate how many instructions are from the Python loop which\n drives `stmt`.\n\n Returns:\n A `CallgrindStats` object which provides instruction counts and\n some basic facilities for analyzing and manipulating results.\n \"\"\"\n if not isinstance(self._task_spec.stmt, str):\n raise ValueError(\"`collect_callgrind` currently only supports string `stmt`\")\n\n # Check that the statement is valid. It doesn't guarantee success, but it's much\n # simpler and quicker to raise an exception for a faulty `stmt` or `setup` in\n # the parent process rather than the valgrind subprocess.\n self._timer.timeit(1)\n is_python = (self._language == Language.PYTHON)\n assert is_python or not self._globals\n return valgrind_timer_interface.wrapper_singleton().collect_callgrind(\n task_spec=self._task_spec,\n globals=self._globals,\n number=number,\n collect_baseline=collect_baseline and is_python,\n is_python=is_python)\n"
] | [
[
"torch.utils.benchmark.utils.common.TaskSpec",
"torch.utils.benchmark.utils.valgrind_wrapper.timer_interface.wrapper_singleton",
"torch.utils.benchmark.utils.common.Measurement",
"torch.utils.benchmark.utils.valgrind_wrapper.timer_interface.CopyIfCallgrind.unwrap_all",
"torch.cuda.synchronize",
"torch.utils.benchmark.utils.common.set_torch_threads",
"torch.utils.benchmark.utils.cpp_jit.compile_timeit_template",
"torch.cuda.is_available"
]
] |
mohammedshariqnawaz/Pedestron | [
"9785feb94f00e07ae24a662525b4678f12d0fdc8"
] | [
"mmdet/models/detectors/csp.py"
] | [
"\nfrom .single_stage import SingleStageDetector\nfrom ..registry import DETECTORS\nfrom mmdet.core import bbox2result\nimport torch.nn as nn\nimport torch\nfrom .. import builder\nimport numpy as np\nimport cv2\nfrom mmdet.core import bbox2roi, bbox2result, build_assigner, build_sampler\n\n@DETECTORS.register_module\nclass CSP(SingleStageDetector):\n\n def __init__(self,\n backbone,\n neck,\n bbox_head,\n refine_roi_extractor=None,\n refine_head=None,\n train_cfg=None,\n test_cfg=None,\n pretrained=None,\n detached=True,\n return_feature_maps=False):\n super(CSP, self).__init__(backbone, neck, bbox_head, train_cfg,\n test_cfg, pretrained)\n if refine_head is not None:\n self.refine_roi_extractor = builder.build_roi_extractor(\n refine_roi_extractor)\n self.refine_head = builder.build_head(refine_head)\n self.return_feature_maps = return_feature_maps\n self.train_cfg = train_cfg\n self.test_cfg = test_cfg\n self.detached = detached\n\n def show_input_debug(self, img, classification_maps, scale_maps, offset_maps):\n img_numpy = img.cpu().numpy().copy()[0]\n # img_numpy = np.transpose(img_numpy, [1, 2, 0]) * [58.395, 57.12, 57.375] + [123.675, 116.28, 103.53]\n img_numpy = np.transpose(img_numpy, [1, 2, 0]) + [102.9801, 115.9465, 122.7717]\n img_numpy = img_numpy[:, :, ::-1]\n img_numpy = img_numpy.astype(np.uint8)\n strides = [8, 16, 32, 64, 128]\n img_nows = []\n for i, stride in enumerate(strides):\n img_now = img_numpy.copy()\n # cls_numpy = classification_maps[0][i].cpu().numpy().copy()[0][2]\n cls_numpy = classification_maps[0][i].cpu().numpy().copy()[0][:80]\n scale_numpy = scale_maps[0][i].cpu().numpy().copy()[0][0] * stride\n offset_numpy = offset_maps[0][i].cpu().numpy().copy()[0][:2]\n cs, ys, xs = cls_numpy.nonzero()\n print(len(ys))\n for c, x, y in zip(cs, xs, ys):\n cv2.imshow(str(c), classification_maps[0][i].cpu().numpy().copy()[0][80+c])\n realx = x\n realy = y\n height = scale_numpy[y, x]\n realy = realy + 0.5 + offset_numpy[0][y, x]\n realx = realx + 0.5 + offset_numpy[1][y, x]\n realy = realy * stride\n realx = realx * stride\n top_y = int(realy - height/2)\n top_x = int(realx)\n down_y = int(realy + height/2)\n down_x = int(realx)\n top_left = (int(top_x - height * 0.1), int(top_y))\n down_right = (int(down_x + height * 0.1), down_y)\n cv2.rectangle(img_now, top_left, down_right, (255, 255, 5*int(c)), 2)\n img_nows.append(img_now)\n cv2.imshow(str(i) +'img', img_now)\n cv2.waitKey(0)\n\n def show_input_debug_caltech(self, img, classification_maps, scale_maps, offset_maps):\n for j in range(img.shape[0]):\n img_numpy = img.cpu().numpy().copy()[j]\n img_numpy = np.transpose(img_numpy, [1, 2, 0]) * [58.395, 57.12, 57.375] + [123.675, 116.28, 103.53]\n img_numpy = img_numpy[:, :, ::-1]\n img_numpy = img_numpy.astype(np.uint8)\n strides = [4]\n img_nows = []\n for i, stride in enumerate(strides):\n img_now = img_numpy.copy()\n cls_numpy = classification_maps[j][i].cpu().numpy().copy()[0][2]\n ignore_numpy = classification_maps[j][i].cpu().numpy().copy()[0][1]\n cv2.imshow('ignore', ignore_numpy)\n scale_numpy = scale_maps[j][i].cpu().numpy().copy()[0][0] * stride\n offset_numpy = offset_maps[j][i].cpu().numpy().copy()[0][:2]\n ys, xs = cls_numpy.nonzero()\n print(len(ys))\n for x, y in zip(xs, ys):\n # cv2.imshow(str(c), classification_maps[j][i].cpu().numpy().copy()[0][c])\n realx = x\n realy = y\n height = scale_numpy[y, x]\n realy = realy + 0.5 + offset_numpy[0][y, x]\n realx = realx + 0.5 + offset_numpy[1][y, x]\n realy = realy * stride\n realx = realx * stride\n top_y = int(realy - height/2)\n top_x = int(realx)\n down_y = int(realy + height/2)\n down_x = int(realx)\n top_left = (int(top_x - height * 0.1), int(top_y))\n down_right = (int(down_x + height * 0.1), down_y)\n cv2.rectangle(img_now, top_left, down_right, (255, 255, 125), 2)\n img_nows.append(img_now)\n cv2.imshow(str(i) +'img', img_now)\n cv2.waitKey(0)\n\n def show_input_debug_head(self, img, classification_maps, scale_maps, offset_maps):\n for j in range(img.shape[0]):\n img_numpy = img.cpu().numpy().copy()[j]\n img_numpy = np.transpose(img_numpy, [1, 2, 0]) * [58.395, 57.12, 57.375] + [123.675, 116.28, 103.53]\n img_numpy = img_numpy[:, :, ::-1]\n img_numpy = img_numpy.astype(np.uint8)\n strides = [4]\n img_nows = []\n for i, stride in enumerate(strides):\n img_now = img_numpy.copy()\n cls_numpy = classification_maps[j][i].cpu().numpy().copy()[0][2]\n ignore_numpy = classification_maps[j][i].cpu().numpy().copy()[0][1]\n cv2.imshow('ignore', ignore_numpy)\n scale_numpy = scale_maps[j][i].exp().cpu().numpy().copy()[0][0] * stride\n offset_numpy = offset_maps[j][i].cpu().numpy().copy()[0][:2]\n ys, xs = cls_numpy.nonzero()\n for x, y in zip(xs, ys):\n # cv2.imshow(str(c), classification_maps[j][i].cpu().numpy().copy()[0][c])\n realx = x\n realy = y\n height = scale_numpy[y, x]\n realy = realy + 0.5 + offset_numpy[0][y, x]\n realx = realx + 0.5 + offset_numpy[1][y, x]\n realy = realy * stride\n realx = realx * stride\n top_y = int(realy)\n top_x = int(realx)\n down_y = int(realy + height)\n down_x = int(realx)\n top_left = (int(top_x - height * 0.41/2), int(top_y))\n down_right = (int(down_x + height * 0.41/2), down_y)\n cv2.rectangle(img_now, top_left, down_right, (255, 255, 125), 2)\n img_nows.append(img_now)\n cv2.imshow(str(i) +'img', img_now)\n cv2.waitKey(0)\n\n def show_mot_input_debug(self, img, classification_maps, scale_maps, offset_maps):\n for j in range(img.shape[0]):\n img_numpy = img.cpu().numpy().copy()[j]\n img_numpy = np.transpose(img_numpy, [1, 2, 0]) * [58.395, 57.12, 57.375] + [123.675, 116.28, 103.53]\n # img_numpy = np.transpose(img_numpy, [1, 2, 0]) + [102.9801, 115.9465, 122.7717]\n img_numpy = img_numpy[:, :, ::-1]\n img_numpy = img_numpy.astype(np.uint8)\n strides = [4]\n img_nows = []\n for i, stride in enumerate(strides):\n img_now = img_numpy.copy()\n # cls_numpy = classification_maps[0][i].cpu().numpy().copy()[0][2]\n cls_numpy = classification_maps[j][i].cpu().numpy().copy()[0][2]\n instance_numpy = classification_maps[j][i].cpu().numpy().copy()[0][3]\n scale_numpy = scale_maps[j][i].cpu().numpy().copy()[0][0] * stride\n offset_numpy = offset_maps[j][i].cpu().numpy().copy()[0][:2]\n ys, xs = cls_numpy.nonzero()\n for x, y in zip(xs, ys):\n c=0\n cv2.imshow(str(c), classification_maps[j][i].cpu().numpy().copy()[0][2])\n realx = x\n realy = y\n height = scale_numpy[y, x]\n realy = realy + 0.5 + offset_numpy[0][y, x]\n realx = realx + 0.5 + offset_numpy[1][y, x]\n realy = realy * stride\n realx = realx * stride\n top_y = int(realy - height/2)\n top_x = int(realx)\n down_y = int(realy + height/2)\n down_x = int(realx)\n top_left = (int(top_x - height * 0.1), int(top_y))\n down_right = (int(down_x + height * 0.1), down_y)\n cv2.rectangle(img_now, top_left, down_right, (255, 255, 5*int(c)), 2)\n instance = instance_numpy[y, x]\n cv2.putText(img_now, str(instance), top_left, cv2.FONT_HERSHEY_COMPLEX, 1, 255)\n img_nows.append(img_now)\n cv2.imshow(str(i) +'img', img_now)\n cv2.waitKey(0)\n\n @property\n def refine(self):\n return hasattr(self, 'refine_head') and self.refine_head is not None\n\n def forward_train(self,\n img,\n img_metas,\n gt_bboxes,\n gt_labels,\n gt_bboxes_ignore=None,\n classification_maps=None,\n scale_maps=None,\n offset_maps=None):\n # for tracking data which batch is produced by dataset instead of data loader\n if type(img) == list:\n img=img[0]\n img_metas=img_metas[0]\n gt_bboxes=gt_bboxes[0]\n gt_labels=gt_labels[0]\n gt_bboxes_ignore = gt_bboxes_ignore[0]\n classification_maps = classification_maps[0]\n scale_maps = scale_maps[0]\n offset_maps = offset_maps[0]\n\n losses = dict()\n x = self.extract_feat(img)\n # self.show_input_debug(img, classification_maps, scale_maps, offset_maps)\n # self.show_input_debug_caltech(img, classification_maps, scale_maps, offset_maps)\n # self.show_mot_input_debug(img, classification_maps, scale_maps, offset_maps)\n # self.show_input_debug_head(img, classification_maps, scale_maps, offset_maps)\n\n outs = self.bbox_head(x)\n loss_inputs = outs + (gt_bboxes, gt_labels, classification_maps, scale_maps, offset_maps, img_metas, self.train_cfg.csp_head if self.refine else self.train_cfg)\n losses_bbox = self.bbox_head.loss(\n *loss_inputs, gt_bboxes_ignore=gt_bboxes_ignore)\n losses.update(losses_bbox)\n \n if self.refine:\n if self.detached:\n x = tuple([i.detach() for i in x])\n bbox_inputs = outs + (img_metas, self.train_cfg.csp_head, False)\n bbox_list = self.bbox_head.get_bboxes(*bbox_inputs, no_strides=False) # no_strides to not upscale yet\n \n bbox_list = [\n bbox2result(det_bboxes, det_labels, self.bbox_head.num_classes)[0]\n for det_bboxes, det_labels in bbox_list\n ]\n\n bbox_assigner = build_assigner(self.train_cfg.rcnn.assigner)\n bbox_sampler = build_sampler(\n self.train_cfg.rcnn.sampler, context=self)\n num_imgs = img.size(0)\n if gt_bboxes_ignore is None:\n gt_bboxes_ignore = [None for _ in range(num_imgs)]\n sampling_results = []\n \n for i in range(num_imgs):\n if bbox_list[i].shape[0] == 0 or gt_bboxes[i].shape[0] == 0:\n continue\n bbox = torch.tensor(bbox_list[i]).float().cuda()\n assign_result = bbox_assigner.assign(\n bbox, gt_bboxes[i], gt_bboxes_ignore[i],\n gt_labels[i])\n sampling_result = bbox_sampler.sample(\n assign_result,\n bbox,\n gt_bboxes[i],\n gt_labels[i])\n sampling_results.append(sampling_result)\n\n samp_list = [res.bboxes for res in sampling_results]\n if len(samp_list) == 0:\n losses.update(dict(loss_refine_cls=torch.tensor(0).float().cuda(), acc=torch.tensor(0).float().cuda()))\n return losses\n rois = bbox2roi(samp_list).float()\n if self.refine_head.loss_opinion is not None:\n pred_scores = torch.cat([torch.tensor(bbox[:, 4]).float().cuda() for bbox in bbox_list], dim=0)\n pred_rois = bbox2roi([torch.tensor(bbox).float().cuda() for bbox in bbox_list])\n pred_feats = self.refine_roi_extractor(\n x, pred_rois)\n pred_scores_refine = self.refine_head(pred_feats)\n loss_opinion = self.refine_head.compute_opinion_loss(pred_scores, pred_scores_refine)\n losses.update(loss_opinion)\n bbox_feats = self.refine_roi_extractor(\n x, rois)\n cls_score = self.refine_head(bbox_feats)\n bbox_targets = self.refine_head.get_target(\n sampling_results, gt_bboxes, gt_labels, self.train_cfg.rcnn)\n loss_refine = self.refine_head.loss(cls_score,\n *bbox_targets[:2])\n losses.update(dict(loss_refine_cls=loss_refine[\"loss_cls\"], distL1=loss_refine[\"dist\"]))\n\n return losses\n\n def simple_test_accuracy(self, img, img_meta):\n gts = img_meta[0][\"gts\"]\n x = self.extract_feat(img)\n if self.detached:\n x = (x[0].detach(),)\n\n rois = bbox2roi(gts)\n if rois.shape[0] == 0:\n return 0, 0\n\n roi_feats = self.refine_roi_extractor(\n x, rois)\n cls_score = self.refine_head.get_scores(roi_feats)\n\n return (cls_score > 0.5).float().sum(), rois.size(0)\n\n def simple_test(self, img, img_meta, rescale=False, return_id=False):\n x = self.extract_feat(img)\n outs = self.bbox_head(x)\n bbox_inputs = outs + (img_meta, self.test_cfg.csp_head if self.refine else self.test_cfg, False) # TODO://Handle rescalling\n if self.return_feature_maps:\n return self.bbox_head.get_bboxes_features(*bbox_inputs)\n bbox_list = self.bbox_head.get_bboxes(*bbox_inputs, no_strides=False)\n im_scale = img_meta[0][\"scale_factor\"]\n if \"id\" in img_meta[0]:\n img_id = img_meta[0][\"id\"]\n else:\n img_id = 0\n if self.refine:\n if self.detached:\n x = (x[0].detach(),)\n bbox_list = [\n bbox2result(det_bboxes, det_labels, self.bbox_head.num_classes)[0]\n for det_bboxes, det_labels in bbox_list\n ]\n refine_cfg = self.test_cfg.get('rcnn', None)\n bbox_list = [torch.tensor(bbox).float().cuda() for bbox in bbox_list]\n rois = bbox2roi(bbox_list)\n bbox_list = [bbox/im_scale for bbox in bbox_list]\n if rois.shape[0] == 0:\n cls_score = None\n else:\n roi_feats = self.refine_roi_extractor(\n x, rois)\n cls_score = self.refine_head.get_scores(roi_feats)\n\n res_buffer = []\n if cls_score is not None:\n if refine_cfg is not None:\n res_buffer = self.refine_head.suppress_boxes(rois, cls_score, img_meta, cfg=refine_cfg)\n else:\n res_buffer = self.refine_head.combine_scores(bbox_list, cls_score)\n if return_id:\n return res_buffer, img_id\n return res_buffer\n\n bbox_results = [\n bbox2result(det_bboxes, det_labels, self.bbox_head.num_classes)\n for det_bboxes, det_labels in bbox_list\n ]\n if return_id:\n return bbox_results[0], img_id\n return bbox_results[0]\n\n def foward_features(self, features):\n bbox_list = self.bbox_head.get_bboxes(*features)\n bbox_results = [\n bbox2result(det_bboxes, det_labels, self.bbox_head.num_classes)\n for det_bboxes, det_labels in bbox_list\n ]\n return bbox_results[0]\n"
] | [
[
"torch.tensor",
"numpy.transpose"
]
] |
MichaelAllen1966/stroke_outcome_algorithm | [
"99050bf4e0b19c38c8973fe10234fee4f230a172"
] | [
"clinical_outcome.py"
] | [
"\"\"\"\nClass to hold clinical outcome model.\nPredicts probability of good outcome of patient(s) or group(s) of patients.\n\nCall `calculate_outcome_for_all(args)` from outside of the object\n\nInputs\n======\n\nAll inputs take np arrays (for multiple groups of patients).\n\nmimic: proportion of patients with stroke mimic\n\nich: proportion of patients with intracerebral haemorrhage (ICH). \nOr probability of a patient having an ICH, when using for a single patient.\n\nnlvo: proportion of patients with non-large vessel occlusions (nLVO). \nOr probability of a patient having an NLVO, when using for a single patient.\n\nlvo: proportion of patients with large vessel occlusions (LVO). \nOr probability of a patient having a LVO, when using for a single patient.\n\nonset_to_needle: minutes from onset to thrombolysis\n\nonset_to_ouncture: minutes from onset to thrombectomy\n\nnlvo_eligible_for_treatment: proportion of patients with NLVO suitable for \ntreatment with thrombolysis. Or probability of a patient with NVLO being \neligible for treatment.\n\nlvo_eligible_for_treatment: proportion of patients with LVO suitable for \ntreatment with thrombolysis and/or thrombectomy. Or probability of a patient \nwith LVO being eligible for treatment.\n\nReturns\n=======\n\nProbability of good outcome: The probability of having a good outcome (modified\nRankin Scale 0-1) for the patient or group of patients (np array).\n\n\nReferences for decay of effect of thrombolysis and thrombectomy\n===============================================================\n\nDecay of effect of thrombolysis without image selection of patients taken from:\nEmberson, Jonathan, Kennedy R. Lees, Patrick Lyden, Lisa Blackwell, \nGregory Albers, Erich Bluhmki, Thomas Brott, et al (2014). “Effect of Treatment \nDelay, Age, and Stroke Severity on the Effects of Intravenous Thrombolysis with\nAlteplase for Acute Ischaemic Stroke: A Meta-Analysis of Individual Patient\nData from Randomised Trials.” The Lancet 384: 1929–1935.\nhttps://doi.org/10.1016/S0140-6736(14)60584-5.\n\n* Time to no effect = 6.3hrs\n\nDecay of effect of thrombectomy without image selection of patients taken from:\nFransen, Puck S. S., Olvert A. Berkhemer, Hester F. Lingsma, Debbie Beumer, \nLucie A. van den Berg, Albert J. Yoo, Wouter J. Schonewille, et al. (2016)\n“Time to Reperfusion and Treatment Effect for Acute Ischemic Stroke: A \nRandomized Clinical Trial.” JAMA Neurology 73: 190–96. \nhttps://doi.org/10.1001/jamaneurol.2015.3886.\n\n* Time to no effect = 8hrs\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\n\n\nclass Clinical_outcome:\n def __init__(self):\n \"\"\"Constructor for clinical outcome model\n \"\"\"\n self.name = \"Clinical outcome model\"\n self.thrombectomy_time_no_effect = 8 * 60\n self.thrombolysis_time_no_effect = 6.3 * 60\n self.maximum_permitted_time_to_thrombectomy = 360\n self.maximum_permitted_time_to_thrombolysis = 270\n\n def calculate_outcome_for_all(self,\n mimic,\n ich,\n nlvo,\n lvo,\n onset_to_needle,\n onset_to_puncture,\n nlvo_eligible_for_treatment,\n lvo_eligible_for_treatment,\n prop_thrombolysed_lvo_receiving_thrombectomy):\n \"\"\"\n Calculates the probability of good outcome for all patients admitted\n with acute stroke. \n\n Based on:\n Holodinsky JK, Williamson TS, Demchuk AM, et al. Modeling Stroke Patient\n Transport for All Patients With Suspected Large-Vessel Occlusion. JAMA \n Neurol. 2018;75(12):1477-1486. doi:10.1001/jamaneurol.2018.2424\n \n Sums outcomes for:\n\n 1) mimics\n 2) ICH\n 3) non-LVO\n 4) LVO treated with thrombolysis\n 5) LVO treated with thrombectomy (if thrombolysis not successful in a\n drip and ship configuration)\n\n arguments\n ---------\n\n np arrays (each row is a given geographic area with different \n characteristics)\n\n mimic: proportion of patients with stroke mimic\n ich: proportion of patients with ICH\n nlvo: proportion of patients with non-lvo\n lvo: proportion of patients with lvo\n onset_to_needle: minutes from onset to thrombolysis\n onset_to_ounctureL minutes from onset to thrombectomy\n nlvo_eligible_for_treatment: proportion of nlvo suitable for treatment\n lvo_eligible_for_treatment: proportion of lvo suitable for treatment\n\n returns\n -------\n\n probability of good outcome for all (np array)\n \"\"\"\n \n # Get outcomes\n # ------------\n \n outcomes = pd.DataFrame()\n\n # Calculate good outcomes for mimics\n outcomes['mimic'] = self._calculate_outcome_for_stroke_mimics(\n mimic.shape)\n\n # Calculate good outcomes for ich \n outcomes['ich'] = self._calculate_outcome_for_ICH(mimic.shape)\n\n # Calculate good outcomes for nlvo without treatment\n outcomes['nlvo_base'] = \\\n np.full(nlvo.shape, 0.4622)\n \n # Calculate good outcomes for nlvo with thrombolysis\n outcomes['nlvo_add_ivt'] = \\\n self._calculate_thrombolysis_outcome_for_nlvo(onset_to_needle)\n\n # Calculate good outcomes for lvo without treatment\n outcomes['lvo_base'] = \\\n np.full(nlvo.shape, 0.1328)\n \n # Calculate good outcomes for lvo with thrombolysis\n outcomes['lvo_add_ivt'] = \\\n self._calculate_thrombolysis_outcome_for_lvo(onset_to_needle)\n\n # Calculate good outcomes for lvo with thrombolysis\n outcomes['lvo_add_et'] = \\\n self._calculate_thrombectomy_outcome_for_lvo(onset_to_puncture)\n\n \n # Weight outcome results by proportion of patients\n # ------------------------------------------------\n \n # 'Results' are good outcomes\n results = pd.DataFrame()\n \n # Results for mimic\n results['mimic'] = outcomes['mimic'] * mimic\n \n # Results for ich\n results['ich'] = outcomes['ich'] * ich\n \n # Results for nlvo\n results['nlvo_base'] = nlvo * outcomes['nlvo_base']\n \n results['nlvo_ivt'] = \\\n nlvo * outcomes['nlvo_add_ivt'] * nlvo_eligible_for_treatment\n \n # Results for lvo\n results['lvo_base'] = lvo * outcomes['lvo_base']\n \n results['lvo_ivt'] = \\\n lvo * outcomes['lvo_add_ivt'] * lvo_eligible_for_treatment\n \n # Adjust thrombectomy/thrombolysis ratio for LVO \n # Reduce thrombectomy treatment by LVO responding to IVT\n lvo_receiving_et = ((lvo * lvo_eligible_for_treatment * \n prop_thrombolysed_lvo_receiving_thrombectomy) - \n results['lvo_ivt'])\n\n results['lvo_et'] = lvo_receiving_et * outcomes['lvo_add_et']\n\n p_good = results.sum(axis=1).values\n\n return p_good\n\n @staticmethod\n def _calculate_outcome_for_ICH(array_shape):\n \"\"\"\n Calculates the probability of good outcome for patients with intra-\n cranial haemorrhage (ICH).\n\n Sets all values to 0.24 \n\n Based on Holodinsky et al. (2018) Drip-and-Ship vs. Mothership: \n Modelling Stroke Patient Transport for All Suspected Large Vessel\n Occlusion Patients. JAMA Neuro (in press)\n\n arguments\n ---------\n\n array size\n\n returns\n -------\n\n probability of good outcome for ICH (np array)\n \"\"\"\n\n # Create an array of required length and set all values to 0.24\n p_good = np.zeros(array_shape)\n p_good[:] = 0.24\n\n return p_good \n\n @staticmethod\n def _calculate_outcome_for_stroke_mimics(array_shape):\n \"\"\"\n Calculates the probability of good outcome for patients with stroke\n mimic\n\n Sets all values to 1\n\n Based on Holodinsky et al. (2018) Drip-and-Ship vs. Mothership: \n Modelling Stroke Patient Transport for All Suspected Large Vessel\n Occlusion Patients. JAMA Neuro (in press)\n\n arguments\n ---------\n\n array size\n\n returns\n -------\n\n probability of good outcome for stroke mimiccs (np array)\n \"\"\"\n\n # Create an array of required length and set all values to 0.9\n p_good = np.zeros(array_shape)\n p_good[:] = 1\n\n return p_good\n \n def _calculate_thrombectomy_outcome_for_lvo(self, onset_to_puncture):\n \"\"\"\n Calculates the probability of additional good outcome for LVO patients\n receiving thrombectomy.\n\n arguments\n ---------\n\n onset_to_puncture : np array in minutes\n\n returns\n -------\n\n probability of additional good outcome if given thrombectomy (np array)\n \"\"\"\n\n p_good_max = 0.5208\n p_good_min = 0.1328\n \n # Convert probability to odds\n odds_good_max = p_good_max / (1 - p_good_max)\n odds_good_min = p_good_min / (1 - p_good_min)\n \n # Calculate fraction of effective time used\n fraction_max_effect_time_used = \\\n onset_to_puncture / self.thrombectomy_time_no_effect\n \n # Calculate odds of good outcome with treatment\n odds_good = np.exp(np.log(odds_good_max) - \n ((np.log(odds_good_max) - np.log(odds_good_min)) \n * fraction_max_effect_time_used))\n \n # Convert odds to probability\n prob_good = odds_good / (1 + odds_good)\n prob_good[prob_good < p_good_min] = p_good_min\n \n # Calculate probability of additional good outcome\n p_good_add = prob_good - p_good_min\n \n # Set additional good outcomes to zero if past permitted treatment time\n mask = onset_to_puncture > self.maximum_permitted_time_to_thrombectomy\n p_good_add[mask] = 0 \n \n # Ensure no negative outcomes\n mask = p_good_add < 0\n p_good_add[mask] = 0 \n\n return p_good_add \n\n def _calculate_thrombolysis_outcome_for_lvo(self, onset_to_needle):\n \"\"\"\n Calculates the probability of additional good outcome for LVO patients\n receiving thrombolysis. Does not include baseline untreated good\n comes.\n\n arguments\n ---------\n \n onset_to_needle : np array in minutes\n\n\n returns\n -------\n\n probability of additional good outcome if given thrombolysis \n (np array)\n \"\"\"\n \n p_good_max = 0.2441\n p_good_min = 0.1328\n \n # Convert probability to odds\n odds_good_max = p_good_max / (1 - p_good_max)\n odds_good_min = p_good_min / (1 - p_good_min)\n \n # Calculate fraction of effective time used \n fraction_max_effect_time_used = \\\n onset_to_needle / self.thrombolysis_time_no_effect\n\n # Calculate odds of good outcome with treatment\n odds_good = np.exp(np.log(odds_good_max) - \n ((np.log(odds_good_max) - np.log(odds_good_min)) \n * fraction_max_effect_time_used))\n\n # Convert odds to probability\n prob_good = odds_good / (1 + odds_good)\n prob_good[prob_good < p_good_min] = p_good_min\n \n # Calculate probability of additional good outcome\n p_good_add = prob_good - p_good_min\n \n # Set additional good outcomes to zero if past permitted treatment time\n mask = onset_to_needle> self.maximum_permitted_time_to_thrombolysis\n p_good_add[mask] = 0 \n \n # Ensure no negative outcomes\n mask = p_good_add < 0\n p_good_add[mask] = 0 \n\n # return outcome and proportion of treated who respond\n return p_good_add\n\n def _calculate_thrombolysis_outcome_for_nlvo(self, onset_to_needle):\n \"\"\"\n Calculates the probability of good outcome for non-LVO patients\n receiving thrombolysis.\n\n arguments\n ---------\n\n onset_to_needle : np array in minutes\n\n returns\n -------\n\n probability of good outcome if given thrombolysis (np array)\n \"\"\"\n\n p_good_max = 0.6444\n p_good_min = 0.4622\n \n # Convert probability to odds\n odds_good_max = p_good_max / (1 - p_good_max)\n odds_good_min = p_good_min / (1 - p_good_min)\n \n # Calculate fraction of effective time used \n fraction_max_effect_time_used = (onset_to_needle / \n self.thrombolysis_time_no_effect)\n \n # Calculate odds of good outcome with treatment\n odds_good = np.exp(np.log(odds_good_max) - \n ((np.log(odds_good_max) - np.log(odds_good_min)) \n * fraction_max_effect_time_used))\n \n # Convert odds to probability\n prob_good = odds_good / (1 + odds_good)\n prob_good[prob_good < p_good_min] = p_good_min\n \n # Calculate probability of additional good outcome\n p_good_add = prob_good - p_good_min\n \n mask = onset_to_needle> self.maximum_permitted_time_to_thrombolysis\n p_good_add[mask] = 0 \n \n # Ensure no negative outcomes\n mask = p_good_add < 0\n p_good_add[mask] = 0 \n\n # return outcome and proportion of treated who respond\n return p_good_add\n"
] | [
[
"numpy.log",
"pandas.DataFrame",
"numpy.zeros",
"numpy.full"
]
] |
brettelliot/event-study | [
"cffc6a80dbc4b33e68e863488428996af51cc991"
] | [
"examples/earnings_surprises/earnings-converter.py"
] | [
"import pandas as pd\nfrom pandas.compat import StringIO\nimport numpy\nnumpy.set_printoptions(threshold=numpy.nan)\n\n\ndef main():\n df = pd.read_csv(StringIO(earnings), sep=\",\", header=None,\n names=['symbol', 'exchange', 'eps_pct_diff_surp', 'asof_date'])\n df = df.sort_values(by=['asof_date'])\n print(df.head())\n print(len(df))\n df.to_csv('../../data/events/nyse_earnings_surprises_2013.csv', index=False)\n\n myString = ', '.join('\"{0}\"'.format(s) for s in df.symbol.unique())\n myString = myString.replace(\" \", \"\")\n print(myString)\n\n#earnings = 'CFN, NYSE, -21.82, 2013-02-09\\nNDZ, NYSE, 30.77, 2013-01-29\\nAZZ, NYSE, -1.64, 2013-01-10'\nearnings = 'CFN, NYSE, -21.82, 2013-02-09\\n NDZ, NYSE, 30.77, 2013-01-29\\n AZZ, NYSE, -1.64, 2013-01-10\\n CLC, NYSE, 2.86, 2013-01-17\\n CMC, NYSE, 64.71, 2013-01-08\\n FC, NYSE, 15.38, 2013-01-04\\n FDO, NYSE, -6.76, 2013-01-04\\n FUL, NYSE, 14.29, 2013-01-17\\n LEN, NYSE, 30.23, 2013-01-16\\n LNN, NYSE, 53.33, 2013-01-09\\n MKC, NYSE, -3.48, 2013-01-25\\n RT, NYSE, 0.00, 2013-01-10\\n MSM, NYSE, 1.00, 2013-01-11\\n RPM, NYSE, -4.76, 2013-01-09\\n SVU, NYSE, -50.00, 2013-01-11\\n TISI, NYSE, 10.00, 2013-01-08\\n TXI, NYSE, -5.88, 2013-01-10\\n UNF, NYSE, 15.79, 2013-01-04\\n WOR, NYSE, 12.20, 2013-01-04\\n GBX, NYSE, 12.90, 2013-01-10\\n SJR, NYSE, 11.11, 2013-01-10\\n OMN, NYSE, -50.00, 2013-01-23\\n MON, NYSE, 67.57, 2013-01-09\\n GPN, NYSE, 6.90, 2013-01-09\\n AYI, NYSE, -13.75, 2013-01-09\\n STZ, NYSE, 14.55, 2013-01-10\\n SNX, NYSE, 11.54, 2013-01-11\\n TAL, NYSE, 600.00, 2013-01-23\\n IHS, NYSE, 12.35, 2013-01-09\\n EDU, NYSE, -150.00, 2013-01-30\\n SAR, NYSE, 28.57, 2013-01-15\\n ZEP, NYSE, 11.11, 2013-01-08\\n MG, NYSE, 0.00, 2013-01-09\\n MOS, NYSE, 7.14, 2013-01-04\\n ABT, NYSE, 1.33, 2013-01-24\\n ABX, NYSE, 1.83, 2013-02-15\\n AB, NYSE, 21.21, 2013-02-13\\n TAP, NYSE, 7.81, 2013-02-15\\n ACO, NYSE, -15.91, 2013-01-26\\n ADM, NYSE, -26.83, 2013-02-05\\n AEM, NYSE, -13.33, 2013-02-14\\n AEP, NYSE, 11.11, 2013-02-16\\n AES, NYSE, 6.67, 2013-02-28\\n AET, NYSE, -2.08, 2013-02-01\\n AFL, NYSE, 0.00, 2013-02-06\\n AGCO, NYSE, 1.02, 2013-02-06\\n HES, NYSE, -2.44, 2013-01-31\\n AIG, NYSE, 322.22, 2013-02-22\\n AIN, NYSE, -9.68, 2013-02-07\\n AJG, NYSE, 2.63, 2013-01-30\\n ALU, NYSE, 0.00, 2013-02-08\\n MATX, NYSE, 24.14, 2013-02-08\\n ALK, NYSE, -4.11, 2013-01-25\\n ALX, NYSE, -11.52, 2013-02-27\\n BEAM, NYSE, 0.00, 2013-02-02\\n AME, NYSE, 2.08, 2013-01-25\\n TWX, NYSE, 6.36, 2013-02-07\\n AVD, NYSE, 11.43, 2013-03-01\\n AMN, NYSE, 36.36, 2013-02-22\\n AN, NYSE, 3.08, 2013-02-01\\n AON, NYSE, 1.60, 2013-02-02\\n AP, NYSE, 77.78, 2013-02-05\\n APA, NYSE, -1.30, 2013-02-15\\n APC, NYSE, 30.00, 2013-02-05\\n APD, NYSE, 0.78, 2013-01-24\\n APH, NYSE, 4.44, 2013-01-18\\n ARG, NYSE, -3.70, 2013-01-25\\n AAN, NYSE, -4.00, 2013-02-08\\n ARW, NYSE, 13.89, 2013-02-08\\n ASGN, NYSE, -25.00, 2013-02-15\\n ASH, NYSE, -17.65, 2013-01-30\\n ASR, NYSE, 56.88, 2013-02-26\\n GAS, NYSE, -9.90, 2013-02-07\\n ATO, NYSE, -5.13, 2013-02-07\\n ATW, NYSE, 17.02, 2013-01-31\\n AU, NYSE, -67.44, 2013-02-21\\n AVP, NYSE, 37.04, 2013-02-13\\n AVT, NYSE, 21.69, 2013-01-25\\n AVY, NYSE, 10.20, 2013-01-31\\n AXP, NYSE, 0.00, 2013-01-18\\n B, NYSE, 7.84, 2013-02-23\\n BA, NYSE, 7.56, 2013-01-31\\n BAC, NYSE, 50.00, 2013-01-18\\n BAX, NYSE, 0.00, 2013-01-25\\n BC, NYSE, 122.22, 2013-01-25\\n OMX, NYSE, 6.67, 2013-02-21\\n BCE, NYSE, -2.99, 2013-02-08\\n BCR, NYSE, 1.80, 2013-02-01\\n BCS, NYSE, 40.74, 2013-02-13\\n BDX, NYSE, 9.76, 2013-02-06\\n BEN, NYSE, 1.68, 2013-02-02\\n BGG, NYSE, 250.00, 2013-01-25\\n BHE, NYSE, 10.00, 2013-02-05\\n BHI, NYSE, 1.64, 2013-01-24\\n BID, NYSE, 0.92, 2013-03-01\\n BIO, NYSE, 15.67, 2013-02-27\\n BK, NYSE, 0.00, 2013-01-16\\n BKH, NYSE, 9.68, 2013-02-01\\n WRB, NYSE, 28.00, 2013-01-29\\n BLC, NYSE, 5.71, 2013-02-09\\n BLL, NYSE, -3.03, 2013-02-01\\n BLX, NYSE, 20.75, 2013-02-08\\n BMI, NYSE, -11.36, 2013-02-07\\n BMS, NYSE, 4.00, 2013-02-01\\n BMY, NYSE, 9.30, 2013-01-25\\n BOH, NYSE, 1.12, 2013-01-31\\n BXS, NYSE, -25.00, 2013-01-24\\n BPL, NYSE, 18.52, 2013-02-09\\nBRK.A, NYSE, 175.73, 2013-03-02\\n BRO, NYSE, 7.41, 2013-02-02\\n BSX, NYSE, 63.64, 2013-01-30\\n BT, NYSE, -89.22, 2013-02-02\\n MTRN, NYSE, 17.14, 2013-03-01\\n CACI, NYSE, 3.66, 2013-01-31\\n CAT, NYSE, -13.10, 2013-01-29\\n CB, NYSE, 10.00, 2013-01-30\\n CBI, NYSE, 9.64, 2013-02-28\\n CBM, NYSE, 100.00, 2013-02-07\\n CBU, NYSE, -3.70, 2013-01-23\\n CBT, NYSE, -28.57, 2013-01-31\\n CCC, NYSE, 35.71, 2013-02-22\\n CCE, NYSE, 4.65, 2013-02-08\\n C, NYSE, -20.69, 2013-01-18\\n CCK, NYSE, -7.27, 2013-01-31\\n CCU, NYSE, -12.21, 2013-02-01\\n CDE, NYSE, -15.15, 2013-02-22\\n CDI, NYSE, 8.70, 2013-02-27\\n CAH, NYSE, 9.41, 2013-02-06\\n CFR, NYSE, 5.38, 2013-01-31\\n CHD, NYSE, 0.00, 2013-02-06\\n CKP, NYSE, -50.00, 2013-03-06\\n CPK, NYSE, 18.60, 2013-03-08\\n CI, NYSE, 6.08, 2013-02-08\\n CIA, NYSE, -100.00, 2013-03-12\\n CKH, NYSE, -93.55, 2013-02-28\\n CL, NYSE, 0.71, 2013-02-01\\n CLF, NYSE, -25.45, 2013-02-13\\n CLH, NYSE, -25.00, 2013-02-21\\n CLX, NYSE, 11.11, 2013-02-05\\n CMA, NYSE, 7.81, 2013-01-17\\n CMO, NYSE, -6.06, 2013-01-31\\n CRK, NYSE, -77.42, 2013-02-12\\n CMS, NYSE, 4.17, 2013-02-22\\n CNA, NYSE, -150.00, 2013-02-12\\n CNW, NYSE, -10.34, 2013-02-07\\n CHG, NYSE, -4.12, 2013-02-27\\n CNL, NYSE, 12.50, 2013-02-20\\n COG, NYSE, 14.29, 2013-02-22\\n COT, NYSE, -66.67, 2013-02-16\\n CP, NYSE, -0.78, 2013-01-30\\n CPF, NYSE, 11.54, 2013-02-01\\n CQB, NYSE, -17.65, 2013-03-12\\n CR, NYSE, -5.15, 2013-01-29\\nCRD.B, NYSE, 52.38, 2013-02-14\\n CRS, NYSE, 1.64, 2013-02-01\\n CSC, NYSE, 22.22, 2013-02-06\\n CSL, NYSE, 6.49, 2013-02-09\\n CTB, NYSE, 35.29, 2013-02-26\\n CTL, NYSE, -1.47, 2013-02-14\\n CTS, NYSE, -21.74, 2013-01-29\\n CUB, NYSE, -32.86, 2013-02-12\\n CMI, NYSE, 14.94, 2013-02-07\\n CUZ, NYSE, 40.00, 2013-02-14\\n CVC, NYSE, -400.00, 2013-03-01\\n CVH, NYSE, 35.82, 2013-02-07\\n CW, NYSE, 4.40, 2013-02-21\\n CWT, NYSE, 33.33, 2013-02-28\\n CX, NYSE, -258.33, 2013-02-08\\n CYN, NYSE, -13.00, 2013-01-25\\n D, NYSE, 1.47, 2013-02-01\\n DBD, NYSE, -8.16, 2013-02-13\\n DCO, NYSE, -23.81, 2013-03-05\\n DD, NYSE, 22.22, 2013-01-23\\n CVA, NYSE, -13.04, 2013-02-07\\n DHR, NYSE, 0.00, 2013-01-30\\n DIS, NYSE, 2.60, 2013-02-06\\n DLX, NYSE, 11.76, 2013-01-25\\n DNB, NYSE, -1.24, 2013-02-12\\n RRD, NYSE, 16.22, 2013-02-27\\n DOV, NYSE, 1.87, 2013-01-25\\n DOW, NYSE, -2.94, 2013-02-01\\n DRE, NYSE, 0.00, 2013-01-31\\n DHI, NYSE, 42.86, 2013-01-30\\n UFS, NYSE, -7.09, 2013-02-02\\n DTE, NYSE, 0.00, 2013-02-21\\n DUK, NYSE, 7.69, 2013-02-14\\n DVN, NYSE, 2.63, 2013-02-21\\n DV, NYSE, 55.36, 2013-02-07\\n EAT, NYSE, 0.00, 2013-01-23\\n ECL, NYSE, 0.00, 2013-02-27\\n ED, NYSE, -6.85, 2013-02-01\\n EDE, NYSE, 27.78, 2013-02-15\\n EFX, NYSE, 4.00, 2013-02-07\\n EGN, NYSE, -15.58, 2013-01-24\\n EGP, NYSE, 0.00, 2013-02-13\\n ELY, NYSE, 2.00, 2013-01-31\\n EMC, NYSE, 6.98, 2013-01-30\\n EMR, NYSE, 0.00, 2013-02-06\\n EOG, NYSE, 19.26, 2013-02-14\\n EQT, NYSE, 14.29, 2013-01-25\\n ESE, NYSE, -44.44, 2013-02-08\\n ESV, NYSE, 7.87, 2013-02-21\\n ETN, NYSE, -10.87, 2013-02-06\\n ETR, NYSE, 21.99, 2013-02-09\\n EXAR, NYSE, -14.29, 2013-01-24\\n F, NYSE, 19.23, 2013-01-30\\n OPY, NYSE, 115.79, 2013-02-02\\n CLGX, NYSE, -3.12, 2013-02-22\\n FNB, NYSE, 4.55, 2013-01-24\\n FCF, NYSE, -18.18, 2013-01-31\\n FBP, NYSE, -30.00, 2013-02-06\\n FICO, NYSE, 6.94, 2013-01-31\\n FLO, NYSE, 12.00, 2013-02-08\\n FMC, NYSE, 0.00, 2013-02-07\\n FOE, NYSE, -250.00, 2013-03-06\\n S, NYSE, 4.35, 2013-02-08\\n NEE, NYSE, 9.57, 2013-01-30\\n FRT, NYSE, 0.91, 2013-02-13\\n FRX, NYSE, -61.54, 2013-01-16\\n FUN, NYSE, -433.33, 2013-02-20\\n FUR, NYSE, -48.15, 2013-03-08\\n GBL, NYSE, -28.72, 2013-02-06\\n GVA, NYSE, -29.03, 2013-03-01\\n BGC, NYSE, -3.45, 2013-02-26\\n GD, NYSE, -26.84, 2013-01-24\\n GE, NYSE, 2.33, 2013-01-19\\n RHP, NYSE, -50.00, 2013-02-13\\n AXLL, NYSE, 95.08, 2013-02-13\\n GGG, NYSE, 13.33, 2013-01-29\\n GHM, NYSE, -22.22, 2013-02-02\\n GIB, NYSE, -4.35, 2013-01-31\\n GLT, NYSE, -25.71, 2013-02-08\\n GLW, NYSE, 3.03, 2013-01-30\\n GSK, NYSE, 8.33, 2013-02-07\\n GLF, NYSE, -160.71, 2013-02-26\\n GNI, NYSE, -14.44, 2013-01-30\\n GPC, NYSE, 0.00, 2013-02-20\\n GRA, NYSE, 4.72, 2013-02-07\\n GTY, NYSE, -10.34, 2013-03-01\\n GWW, NYSE, -7.28, 2013-01-25\\n HAE, NYSE, 4.17, 2013-01-31\\n HAL, NYSE, 3.28, 2013-01-26\\n HAR, NYSE, -32.95, 2013-02-01\\n HVT, NYSE, 30.43, 2013-02-26\\n HRC, NYSE, 6.82, 2013-01-24\\n HCC, NYSE, 43.75, 2013-02-13\\n HCN, NYSE, 1.19, 2013-02-26\\n HCP, NYSE, 1.41, 2013-02-13\\n HOG, NYSE, 0.00, 2013-01-30\\n HE, NYSE, 21.88, 2013-02-16\\n HL, NYSE, -25.00, 2013-02-26\\n HMA, NYSE, -5.00, 2013-02-15\\n HMC, NYSE, -29.58, 2013-02-01\\n HMN, NYSE, 91.43, 2013-02-06\\n HFC, NYSE, -8.97, 2013-02-27\\n HOT, NYSE, 7.69, 2013-02-08\\n HP, NYSE, 8.53, 2013-02-01\\n HLS, NYSE, 40.63, 2013-02-19\\n HRS, NYSE, 4.17, 2013-01-30\\n HSC, NYSE, -3.23, 2013-02-15\\n HSY, NYSE, -1.33, 2013-02-01\\n HUBB, NYSE, 0.00, 2013-01-25\\n HUM, NYSE, 11.21, 2013-02-05\\n HXL, NYSE, -5.26, 2013-01-24\\n IBM, NYSE, 2.67, 2013-01-23\\n IDA, NYSE, 10.00, 2013-02-22\\n IEX, NYSE, 2.99, 2013-02-05\\n IFF, NYSE, -1.19, 2013-02-08\\n DIN, NYSE, 1.22, 2013-02-28\\n INT, NYSE, 0.00, 2013-02-22\\n IP, NYSE, 6.15, 2013-01-30\\n IPG, NYSE, 3.70, 2013-02-23\\n IO, NYSE, 30.77, 2013-02-14\\n IR, NYSE, 8.57, 2013-02-02\\n IRF, NYSE, 6.38, 2013-01-29\\n ITW, NYSE, -1.11, 2013-01-30\\n IVC, NYSE, -56.00, 2013-02-09\\n JEC, NYSE, 0.00, 2013-01-24\\n JNJ, NYSE, 1.71, 2013-01-23\\n JNY, NYSE, 75.00, 2013-02-14\\n K, NYSE, 3.08, 2013-02-06\\n KAMN, NYSE, 0.00, 2013-02-26\\n KDN, NYSE, 0.00, 2013-02-22\\n KEX, NYSE, 9.30, 2013-01-31\\n KEY, NYSE, -4.55, 2013-01-25\\n KIM, NYSE, 6.45, 2013-02-06\\n KMB, NYSE, 0.74, 2013-01-26\\n KEM, NYSE, 53.33, 2013-02-01\\n KMT, NYSE, -21.88, 2013-01-25\\n KO, NYSE, 2.27, 2013-02-13\\n KSU, NYSE, 10.98, 2013-01-23\\n LDL, NYSE, -10.53, 2013-02-27\\n LDR, NYSE, 10.42, 2013-02-12\\n LEE, NYSE, 25.00, 2013-01-23\\n LEG, NYSE, 10.34, 2013-02-05\\n LLY, NYSE, 8.97, 2013-01-30\\n LM, NYSE, 29.63, 2013-02-02\\n LNC, NYSE, 3.77, 2013-02-07\\n LPX, NYSE, -10.00, 2013-02-09\\n LXU, NYSE, 145.00, 2013-03-01\\n LTC, NYSE, -1.72, 2013-02-22\\n L, NYSE, -37.93, 2013-02-12\\n LUK, NYSE, 210.17, 2013-02-26\\n LUV, NYSE, 28.57, 2013-01-25\\n LUX, NYSE, 4.35, 2013-03-01\\n MKL, NYSE, 314.07, 2013-02-05\\n MAN, NYSE, 18.18, 2013-01-31\\n MTW, NYSE, 12.50, 2013-02-01\\n SM, NYSE, 95.65, 2013-02-21\\n MAS, NYSE, 500.00, 2013-02-12\\n MTZ, NYSE, 2.22, 2013-03-01\\n MCD, NYSE, 3.76, 2013-01-24\\n MDC, NYSE, 40.48, 2013-02-01\\n MDP, NYSE, 1.14, 2013-01-25\\n MDR, NYSE, 13.04, 2013-03-01\\n MDU, NYSE, 2.56, 2013-02-05\\n MED, NYSE, 12.00, 2013-03-08\\n CVS, NYSE, 2.73, 2013-02-07\\n MFC, NYSE, -12.50, 2013-02-08\\n MGA, NYSE, 36.84, 2013-03-02\\n MGM, NYSE, 0.00, 2013-02-21\\n MLR, NYSE, -11.76, 2013-03-07\\n MLI, NYSE, 14.29, 2013-02-06\\n MMC, NYSE, 0.00, 2013-02-13\\n MMM, NYSE, 0.00, 2013-01-25\\n MSA, NYSE, 3.64, 2013-02-14\\n MNR, NYSE, 38.46, 2013-02-08\\n MO, NYSE, 1.85, 2013-02-01\\n MOD, NYSE, -75.00, 2013-02-02\\nMOG.A, NYSE, -8.54, 2013-01-26\\n MHK, NYSE, 7.45, 2013-02-22\\n MSI, NYSE, 7.61, 2013-01-24\\n MCY, NYSE, -168.00, 2013-02-05\\n MRK, NYSE, 2.47, 2013-02-02\\n MRO, NYSE, -19.12, 2013-02-07\\n POWR, NYSE, 18.18, 2013-03-08\\n MTG, NYSE, -37.87, 2013-03-01\\n MTB, NYSE, 2.76, 2013-01-17\\n MTX, NYSE, 6.38, 2013-02-01\\n MUR, NYSE, 59.23, 2013-01-31\\n MYE, NYSE, -7.14, 2013-02-14\\n NBL, NYSE, 54.21, 2013-02-08\\n NBR, NYSE, 3.45, 2013-02-20\\n NE, NYSE, -19.35, 2013-01-24\\n NEM, NYSE, 13.27, 2013-02-22\\n NFG, NYSE, 6.58, 2013-02-08\\n NHI, NYSE, 1.20, 2013-02-15\\n NI, NYSE, 0.00, 2013-02-20\\n NJR, NYSE, -17.48, 2013-02-08\\n THC, NYSE, -24.64, 2013-02-27\\n NNN, NYSE, 4.55, 2013-02-08\\n NOC, NYSE, 18.39, 2013-01-31\\n NPK, NYSE, -11.23, 2013-02-16\\n NR, NYSE, 0.00, 2013-02-15\\n NSC, NYSE, 9.24, 2013-01-23\\n NUE, NYSE, 55.17, 2013-01-30\\n NVR, NYSE, 8.22, 2013-01-25\\n NWL, NYSE, 2.38, 2013-02-02\\n NWN, NYSE, -4.55, 2013-03-02\\n NYT, NYSE, 3.23, 2013-02-08\\n OCR, NYSE, 1.18, 2013-02-20\\n OGE, NYSE, 14.71, 2013-02-28\\n OHI, NYSE, 3.57, 2013-02-12\\n OI, NYSE, 8.11, 2013-01-31\\n OII, NYSE, 2.78, 2013-02-14\\n OKE, NYSE, 17.78, 2013-02-26\\n OLN, NYSE, 2.94, 2013-01-29\\n BRS, NYSE, 32.95, 2013-02-05\\n OLP, NYSE, 0.00, 2013-03-15\\n OMC, NYSE, 3.67, 2013-02-13\\n OMI, NYSE, -12.77, 2013-02-12\\n ORB, NYSE, 31.82, 2013-02-15\\n ORI, NYSE, -28.57, 2013-01-25\\n OSK, NYSE, 93.55, 2013-01-26\\n OXY, NYSE, 10.24, 2013-02-01\\n PHX, NYSE, -18.75, 2013-02-08\\n FCFS, NYSE, 2.20, 2013-01-24\\n PBI, NYSE, 7.69, 2013-02-01\\n PCG, NYSE, 3.51, 2013-02-22\\n PCL, NYSE, 68.97, 2013-01-29\\n PCP, NYSE, -3.23, 2013-01-25\\n TPC, NYSE, 0.00, 2013-02-22\\n PDS, NYSE, 250.00, 2013-02-15\\n PEG, NYSE, 5.13, 2013-02-22\\n PEI, NYSE, 0.00, 2013-02-26\\n PEP, NYSE, 3.81, 2013-02-15\\n PFE, NYSE, 6.82, 2013-01-30\\n PG, NYSE, 9.91, 2013-01-26\\n PGR, NYSE, 0.00, 2013-01-19\\n PH, NYSE, 6.25, 2013-01-19\\n PHG, NYSE, -4.17, 2013-01-30\\n PHM, NYSE, 9.68, 2013-02-01\\n PKD, NYSE, -150.00, 2013-02-22\\n PKY, NYSE, 17.39, 2013-02-12\\n PNC, NYSE, 24.82, 2013-01-18\\n PNM, NYSE, 18.18, 2013-03-02\\n PNR, NYSE, 6.82, 2013-01-30\\n PNW, NYSE, 41.18, 2013-02-23\\n POM, NYSE, -5.00, 2013-03-02\\n POT, NYSE, -11.86, 2013-02-01\\n PPG, NYSE, -0.65, 2013-01-15\\n PPL, NYSE, 6.52, 2013-02-15\\n PRGO, NYSE, 3.82, 2013-02-02\\n PL, NYSE, 11.36, 2013-02-07\\n PSB, NYSE, 5.04, 2013-02-20\\n CSH, NYSE, 12.61, 2013-01-25\\n PWR, NYSE, 36.11, 2013-02-22\\n PX, NYSE, 0.00, 2013-01-24\\n KWR, NYSE, 26.32, 2013-03-07\\n R, NYSE, 6.36, 2013-02-01\\n RBC, NYSE, 2.70, 2013-02-05\\n RDC, NYSE, 28.57, 2013-03-01\\n HTSI, NYSE, -20.69, 2013-02-01\\n RES, NYSE, 8.33, 2013-01-24\\n RGS, NYSE, -76.92, 2013-02-01\\n RGR, NYSE, 36.99, 2013-02-28\\n RHI, NYSE, 2.44, 2013-01-30\\n RJF, NYSE, 0.00, 2013-01-24\\n RLI, NYSE, 102.27, 2013-01-24\\n ROG, NYSE, -8.62, 2013-02-20\\n ROK, NYSE, -2.38, 2013-01-31\\n ROL, NYSE, -5.88, 2013-01-24\\n ROP, NYSE, 1.37, 2013-01-29\\n RTI, NYSE, 25.00, 2013-02-07\\n RTN, NYSE, 23.08, 2013-01-25\\n RYL, NYSE, 12.00, 2013-01-30\\n BSAC, NYSE, -1.96, 2013-02-05\\n T, NYSE, -6.38, 2013-01-25\\n SCG, NYSE, 0.00, 2013-02-22\\n SCHW, NYSE, 0.00, 2013-01-17\\n SCL, NYSE, -5.56, 2013-02-20\\n SMG, NYSE, 0.88, 2013-02-07\\n SEE, NYSE, 17.24, 2013-02-20\\n SF, NYSE, 5.17, 2013-02-26\\n SFE, NYSE, -121.74, 2013-03-08\\n SHW, NYSE, -0.87, 2013-02-01\\n STC, NYSE, 29.27, 2013-02-15\\n SJI, NYSE, -6.67, 2013-03-01\\n JOE, NYSE, -1000.00, 2013-03-01\\n SJW, NYSE, 72.22, 2013-02-20\\n SLB, NYSE, 0.00, 2013-01-19\\n HSH, NYSE, 29.17, 2013-02-01\\n AOS, NYSE, 12.35, 2013-01-25\\n SNA, NYSE, 4.38, 2013-02-08\\n PII, NYSE, 0.81, 2013-01-30\\n SNV, NYSE, 0.00, 2013-01-23\\n SO, NYSE, 12.82, 2013-01-31\\n SON, NYSE, 3.70, 2013-02-14\\n SPA, NYSE, 30.00, 2013-02-06\\n TRV, NYSE, 500.00, 2013-01-23\\n SR, NYSE, 14.68, 2013-02-06\\n NVE, NYSE, 0.00, 2013-02-23\\n SCI, NYSE, 10.00, 2013-02-13\\n SSP, NYSE, -3.85, 2013-02-27\\n STT, NYSE, 11.00, 2013-01-19\\n STI, NYSE, 6.56, 2013-01-19\\n STJ, NYSE, 2.22, 2013-01-24\\n STL, NYSE, 14.29, 2013-01-24\\n STR, NYSE, 8.57, 2013-02-21\\n STE, NYSE, 3.57, 2013-02-07\\n SYK, NYSE, 0.88, 2013-01-24\\n SUN, NYSE, -4.88, 2013-03-30\\n SUP, NYSE, -61.54, 2013-03-02\\n SWK, NYSE, 3.01, 2013-01-25\\n SWN, NYSE, 2.33, 2013-02-21\\n SWS, NYSE, 0.00, 2013-02-07\\n SWX, NYSE, -2.44, 2013-02-27\\n SWY, NYSE, 23.68, 2013-02-22\\n SXI, NYSE, 1.10, 2013-02-02\\n SYY, NYSE, 19.51, 2013-02-05\\n TNC, NYSE, 6.90, 2013-02-20\\n TCB, NYSE, -16.67, 2013-01-31\\n TCO, NYSE, 5.15, 2013-02-14\\n TDS, NYSE, -725.00, 2013-02-27\\n TDW, NYSE, 38.64, 2013-02-02\\n TDY, NYSE, 8.33, 2013-01-25\\n TE, NYSE, 0.00, 2013-02-06\\n TER, NYSE, 600.00, 2013-01-24\\n TEVA, NYSE, -0.75, 2013-02-08\\n TEX, NYSE, -51.28, 2013-02-20\\n TFX, NYSE, 1.79, 2013-02-22\\n TEN, NYSE, -2.94, 2013-02-01\\n TKR, NYSE, 25.00, 2013-01-25\\n TMK, NYSE, 1.53, 2013-02-05\\n TMO, NYSE, 6.25, 2013-02-01\\n TOT, NYSE, -1.12, 2013-02-14\\n TM, NYSE, -44.72, 2013-02-06\\n TR, NYSE, 37.50, 2013-02-14\\n TRN, NYSE, 7.14, 2013-02-21\\n TRP, NYSE, -15.09, 2013-02-13\\n TRR, NYSE, 566.67, 2013-02-07\\n TSO, NYSE, -2.90, 2013-02-07\\n TSS, NYSE, -3.03, 2013-01-23\\n TTI, NYSE, -21.05, 2013-03-01\\n TXT, NYSE, -1.75, 2013-01-24\\n TYL, NYSE, 10.71, 2013-02-07\\n TSN, NYSE, 23.08, 2013-02-02\\n UDR, NYSE, 2.94, 2013-02-06\\n UFI, NYSE, -42.86, 2013-01-23\\n UGI, NYSE, -15.89, 2013-02-01\\n UAM, NYSE, 45.45, 2013-02-20\\n UHS, NYSE, 9.89, 2013-03-01\\n UHT, NYSE, 268.42, 2013-02-28\\n UIL, NYSE, -9.68, 2013-02-22\\n UNH, NYSE, 0.00, 2013-01-18\\n KMPR, NYSE, -250.00, 2013-02-08\\n UNM, NYSE, 5.13, 2013-02-06\\n UNP, NYSE, 1.39, 2013-01-25\\n UNT, NYSE, 2.06, 2013-02-20\\n URS, NYSE, -1.04, 2013-02-26\\n USG, NYSE, -67.86, 2013-02-07\\n MUX, NYSE, -600.00, 2013-03-09\\n USM, NYSE, -1100.00, 2013-02-27\\n USPH, NYSE, 3.03, 2013-03-08\\n UTL, NYSE, 3.13, 2013-01-31\\n UTX, NYSE, 26.47, 2013-01-24\\n VMI, NYSE, 8.48, 2013-02-13\\n VAR, NYSE, 3.49, 2013-01-24\\n VFC, NYSE, 1.32, 2013-02-16\\n CBS, NYSE, -8.57, 2013-02-15\\n VLO, NYSE, 57.98, 2013-01-30\\n VMC, NYSE, -81.82, 2013-02-15\\n VLY, NYSE, 0.00, 2013-01-31\\n VNO, NYSE, 6.09, 2013-02-27\\n VSH, NYSE, 37.50, 2013-02-06\\n WTS, NYSE, 5.17, 2013-02-20\\n WBS, NYSE, 6.12, 2013-01-19\\n WEC, NYSE, 4.88, 2013-01-31\\n WFC, NYSE, 3.41, 2013-01-14\\n WG, NYSE, 57.14, 2013-03-07\\n WGL, NYSE, 9.62, 2013-02-07\\n WHR, NYSE, 3.15, 2013-02-01\\n WMB, NYSE, -3.85, 2013-02-21\\n WMK, NYSE, 20.29, 2013-03-06\\n WNC, NYSE, 3.23, 2013-02-06\\n TEG, NYSE, -5.32, 2013-03-01\\n WR, NYSE, 80.00, 2013-03-01\\n WRE, NYSE, 2.17, 2013-02-14\\n WRI, NYSE, 4.44, 2013-02-15\\n WPP, NYSE, -175.00, 2013-02-12\\n WSO, NYSE, -12.77, 2013-02-15\\n WST, NYSE, 8.93, 2013-02-22\\n WWW, NYSE, 200.00, 2013-02-20\\n WY, NYSE, 36.84, 2013-01-26\\n X, NYSE, 45.33, 2013-01-30\\n XL, NYSE, 138.24, 2013-02-08\\n XOM, NYSE, 10.00, 2013-02-02\\n XRX, NYSE, 7.14, 2013-01-25\\n Y, NYSE, 54.64, 2013-02-22\\n HRG, NYSE, -50.00, 2013-02-09\\n CRY, NYSE, 33.33, 2013-02-15\\n CHK, NYSE, 85.71, 2013-02-22\\n DDR, NYSE, 0.00, 2013-02-13\\n ELS, NYSE, 0.00, 2013-01-29\\n ALG, NYSE, 37.93, 2013-03-07\\n ETH, NYSE, 5.41, 2013-01-23\\n ATR, NYSE, 0.00, 2013-02-08\\n GGP, NYSE, 6.90, 2013-02-05\\n MSL, NYSE, -10.00, 2013-01-30\\n RCL, NYSE, 66.67, 2013-02-05\\n CWEI, NYSE, -34.04, 2013-02-22\\n HR, NYSE, 0.00, 2013-02-21\\n RGA, NYSE, 35.56, 2013-02-01\\n RIG, NYSE, 12.35, 2013-03-02\\n SKT, NYSE, 2.22, 2013-02-13\\n TWI, NYSE, -80.85, 2013-02-26\\n BDN, NYSE, 17.86, 2013-02-07\\n KGC, NYSE, -4.55, 2013-02-14\\n YPF, NYSE, 26.67, 2013-03-13\\n CPT, NYSE, 1.04, 2013-02-01\\n SGY, NYSE, 67.27, 2013-02-26\\n BFS, NYSE, -11.48, 2013-03-08\\n BWA, NYSE, 3.57, 2013-02-15\\n EQR, NYSE, 0.00, 2013-02-06\\n CLP, NYSE, -81.25, 2013-02-08\\n KOF, NYSE, -7.78, 2013-02-28\\n OKS, NYSE, 3.13, 2013-02-26\\n SQM, NYSE, -15.63, 2013-03-06\\n BYD, NYSE, -138.46, 2013-03-05\\n CBL, NYSE, 8.77, 2013-02-06\\n DECK, NYSE, 7.36, 2013-03-01\\n IT, NYSE, 6.78, 2013-02-08\\n GFI, NYSE, -36.36, 2013-02-15\\n HST, NYSE, 8.11, 2013-02-22\\n LXP, NYSE, 0.00, 2013-02-22\\n OMG, NYSE, -533.33, 2013-02-20\\n REG, NYSE, 8.62, 2013-01-31\\n TUC, NYSE, -5.56, 2013-03-08\\n AF, NYSE, 7.14, 2013-01-24\\n BFR, NYSE, 13.33, 2013-02-09\\n HHS, NYSE, 26.32, 2013-02-01\\n MHO, NYSE, -3.45, 2013-02-01\\n NFX, NYSE, -36.36, 2013-02-20\\n SPG, NYSE, 13.93, 2013-02-05\\n SU, NYSE, -14.20, 2013-02-06\\n SUI, NYSE, -2.44, 2013-02-22\\n TV, NYSE, 5.13, 2013-02-26\\n CGI, NYSE, 0.00, 2013-01-24\\n CYT, NYSE, 77.42, 2013-02-01\\n EMN, NYSE, 0.00, 2013-02-01\\n GRT, NYSE, 0.00, 2013-02-15\\n MAA, NYSE, -1.74, 2013-02-07\\n PLT, NYSE, 0.00, 2013-01-30\\n BZH, NYSE, 24.27, 2013-01-29\\n ELX, NYSE, 0.00, 2013-02-01\\n AGM, NYSE, -5.41, 2013-03-19\\n MLM, NYSE, -13.21, 2013-02-13\\n AKS, NYSE, 14.29, 2013-01-30\\n ALB, NYSE, 18.18, 2013-01-23\\n VRX, NYSE, -4.00, 2013-03-01\\n CBR, NYSE, 140.00, 2013-02-22\\n MAC, NYSE, 3.45, 2013-02-07\\n RKT, NYSE, 5.47, 2013-01-23\\n RYN, NYSE, 3.51, 2013-01-25\\n ADC, NYSE, 1.96, 2013-02-28\\nBRK.B, NYSE, 0.88, 2013-03-02\\n EXP, NYSE, 0.00, 2013-02-07\\n GGB, NYSE, -66.67, 2013-02-22\\n SSD, NYSE, -100.00, 2013-02-08\\n ESS, NYSE, 4.02, 2013-02-01\\n FR, NYSE, 0.00, 2013-02-21\\n HIW, NYSE, 0.00, 2013-02-13\\n IMAX, NYSE, 58.33, 2013-02-22\\n AIV, NYSE, 4.00, 2013-02-08\\n FCH, NYSE, 50.00, 2013-02-20\\n ITGR, NYSE, 6.00, 2013-02-26\\n GEO, NYSE, 7.32, 2013-02-22\\n CLI, NYSE, 4.76, 2013-02-08\\n DAR, NYSE, -20.00, 2013-02-28\\n RS, NYSE, 9.28, 2013-02-22\\n CPE, NYSE, -66.67, 2013-03-15\\n KNX, NYSE, 4.76, 2013-01-31\\n O, NYSE, 3.70, 2013-02-15\\n PKX, NYSE, -15.35, 2013-03-02\\n COF, NYSE, -12.35, 2013-01-18\\n CYD, NYSE, -23.14, 2013-02-28\\n IRS, NYSE, 57.50, 2013-02-20\\n MCK, NYSE, -13.50, 2013-02-01\\n SWC, NYSE, 116.67, 2013-02-28\\n STM, NYSE, -22.22, 2013-01-31\\n TEO, NYSE, 28.36, 2013-03-01\\n TRK, NYSE, 400.00, 2013-03-07\\n GFF, NYSE, 300.00, 2013-01-31\\n LMT, NYSE, -0.56, 2013-01-25\\n APU, NYSE, -13.89, 2013-02-01\\n AGU, NYSE, 6.93, 2013-02-22\\n LH, NYSE, -4.35, 2013-02-09\\n DDD, NYSE, 0.00, 2013-02-26\\n WEX, NYSE, 0.94, 2013-02-07\\n AFG, NYSE, 3.08, 2013-02-12\\n RMD, NYSE, 3.92, 2013-01-25\\n WAB, NYSE, 2.29, 2013-02-20\\n CIB, NYSE, 20.39, 2013-03-05\\n CAM, NYSE, -1.04, 2013-02-01\\n FCX, NYSE, 5.41, 2013-01-23\\n RNR, NYSE, 70.27, 2013-02-06\\n AVX, NYSE, -20.00, 2013-01-25\\n RWT, NYSE, 85.19, 2013-02-22\\n AXE, NYSE, 0.76, 2013-01-30\\n CLB, NYSE, 3.54, 2013-01-31\\n MD, NYSE, 1.54, 2013-02-01\\n THG, NYSE, 6.25, 2013-02-07\\n BAP, NYSE, 3.72, 2013-02-06\\n DO, NYSE, 28.18, 2013-02-06\\n RE, NYSE, 175.86, 2013-02-07\\n DST, NYSE, 17.82, 2013-02-01\\n EL, NYSE, 11.54, 2013-02-06\\n ESC, NYSE, -34.88, 2013-03-01\\n MIG, NYSE, -100.00, 2013-02-13\\n WAT, NYSE, 0.63, 2013-01-23\\n EME, NYSE, 11.48, 2013-02-27\\n HIG, NYSE, 80.00, 2013-02-05\\n ITT, NYSE, 2.63, 2013-02-28\\n SPN, NYSE, 4.26, 2013-02-27\\n SWM, NYSE, -9.18, 2013-02-07\\n SCCO, NYSE, 0.00, 2013-02-02\\n RCI, NYSE, 20.55, 2013-02-15\\n EIX, NYSE, 66.04, 2013-02-27\\n IRM, NYSE, -20.00, 2013-03-01\\n REV, NYSE, -19.18, 2013-02-06\\n SPH, NYSE, -17.46, 2013-02-08\\n CCJ, NYSE, 46.34, 2013-02-09\\n PGI, NYSE, -6.67, 2013-02-14\\n CRR, NYSE, 2.30, 2013-02-01\\n BVN, NYSE, -26.67, 2013-03-01\\n FCN, NYSE, 11.67, 2013-03-01\\n RPT, NYSE, 8.00, 2013-02-13\\n TUP, NYSE, 1.79, 2013-01-30\\n ASB, NYSE, 0.00, 2013-01-18\\n GWR, NYSE, -2.47, 2013-02-13\\n TBI, NYSE, 35.71, 2013-02-07\\n FFG, NYSE, 24.00, 2013-02-08\\n USNA, NYSE, 4.96, 2013-02-06\\n CSV, NYSE, 4.35, 2013-02-26\\n LVB, NYSE, 12.77, 2013-03-07\\n ALR, NYSE, 6.25, 2013-02-16\\n OCN, NYSE, -7.84, 2013-03-01\\n PAA, NYSE, 42.03, 2013-02-07\\n DNR, NYSE, 24.14, 2013-02-22\\n HMY, NYSE, 50.00, 2013-02-05\\n TGI, NYSE, 5.80, 2013-01-31\\n PAG, NYSE, 7.55, 2013-02-07\\n GEL, NYSE, -2.86, 2013-02-15\\n IM, NYSE, 23.73, 2013-02-14\\n LIN, NYSE, -21.92, 2013-03-01\\n NUS, NYSE, 2.11, 2013-02-07\\n CNI, NYSE, -0.70, 2013-01-23\\n LAD, NYSE, 10.45, 2013-02-21\\n NSP, NYSE, 4.44, 2013-02-09\\n DEL, NYSE, -29.63, 2013-02-28\\n DGX, NYSE, -3.81, 2013-01-24\\n KRC, NYSE, 3.23, 2013-01-31\\n MTH, NYSE, 50.00, 2013-02-01\\n NCR, NYSE, 4.35, 2013-02-08\\n OFG, NYSE, -50.00, 2013-02-08\\n IVZ, NYSE, -4.26, 2013-02-01\\n DX, NYSE, 9.68, 2013-02-21\\n FBC, NYSE, 38.27, 2013-02-09\\n ALV, NYSE, 9.85, 2013-02-01\\n ARE, NYSE, 0.87, 2013-02-08\\n BBT, NYSE, 2.86, 2013-01-18\\n CGG, NYSE, -59.32, 2013-03-02\\n BXP, NYSE, 2.42, 2013-01-30\\n MS, NYSE, 73.08, 2013-01-19\\n SRT, NYSE, 200.00, 2013-02-28\\n HLX, NYSE, 162.86, 2013-02-21\\n FLS, NYSE, 0.35, 2013-02-22\\n MT, NYSE, -880.00, 2013-02-07\\n PXD, NYSE, -2.35, 2013-02-14\\n SLG, NYSE, 0.87, 2013-01-31\\n NAT, NYSE, 0.00, 2013-02-12\\n CSU, NYSE, -22.22, 2013-03-07\\n DRQ, NYSE, 2.70, 2013-03-01\\n FDP, NYSE, -100.00, 2013-02-20\\n NLY, NYSE, 35.29, 2013-02-07\\n TLM, NYSE, -300.00, 2013-02-18\\n TSM, NYSE, 0.00, 2013-01-18\\n YUM, NYSE, 2.47, 2013-02-05\\n AMG, NYSE, 4.94, 2013-01-30\\n EPR, NYSE, -4.40, 2013-02-27\\n FE, NYSE, 1.27, 2013-02-26\\n LFL, NYSE, -80.00, 2013-05-01\\n MTD, NYSE, 8.44, 2013-02-07\\n SID, NYSE, 57.14, 2013-03-29\\n IN, NYSE, -18.18, 2013-03-12\\n AI, NYSE, 9.91, 2013-02-07\\n URI, NYSE, 23.30, 2013-01-24\\n INGR, NYSE, 4.26, 2013-02-08\\n RAS, NYSE, 153.85, 2013-02-14\\n UNS, NYSE, 12.50, 2013-02-27\\n ASI, NYSE, -17.95, 2013-03-07\\n ANH, NYSE, 7.14, 2013-02-08\\n OFC, NYSE, 4.08, 2013-02-09\\n GPX, NYSE, 6.67, 2013-02-27\\n WAC, NYSE, 11.32, 2013-03-19\\n RBA, NYSE, -12.50, 2013-02-27\\n WDR, NYSE, 5.17, 2013-01-30\\n LHO, NYSE, 4.44, 2013-02-21\\n LNT, NYSE, -1.72, 2013-02-15\\n LVLT, NYSE, 11.11, 2013-02-13\\n MFA, NYSE, 0.00, 2013-03-07\\n OME, NYSE, 33.33, 2013-03-06\\n EQY, NYSE, 7.14, 2013-02-21\\n FII, NYSE, 10.00, 2013-01-25\\n FMX, NYSE, 39.60, 2013-02-28\\n LLL, NYSE, 6.13, 2013-01-31\\n VTR, NYSE, 2.06, 2013-02-16\\n WCN, NYSE, -7.69, 2013-02-15\\n AVB, NYSE, -0.71, 2013-01-31\\n GIL, NYSE, 6.67, 2013-02-07\\n HZO, NYSE, 10.00, 2013-01-30\\n AWR, NYSE, 43.24, 2013-03-01\\n CLS, NYSE, 46.67, 2013-01-23\\n EPD, NYSE, 7.58, 2013-02-01\\n RSG, NYSE, -13.95, 2013-02-08\\n WM, NYSE, -5.00, 2013-02-15\\n AKR, NYSE, 3.57, 2013-02-06\\n CVG, NYSE, 4.17, 2013-02-08\\n RRC, NYSE, 228.57, 2013-02-27\\n SAP, NYSE, -2.38, 2013-01-24\\n CCI, NYSE, 57.14, 2013-01-24\\n PQ, NYSE, -20.00, 2013-03-01\\n WFT, NYSE, -94.44, 2013-02-27\\n CAA, NYSE, 14.29, 2013-02-01\\n ENB, NYSE, -6.67, 2013-02-16\\n GMK, NYSE, -8.33, 2013-02-28\\n MMR, NYSE, 75.00, 2013-01-19\\n PB, NYSE, 1.19, 2013-01-26\\n VIV, NYSE, -7.25, 2013-02-26\\n AXL, NYSE, -111.76, 2013-02-09\\n BP, NYSE, 19.05, 2013-02-06\\n ETM, NYSE, 13.04, 2013-02-09\\n HT, NYSE, 10.00, 2013-02-21\\n BYI, NYSE, 5.26, 2013-02-01\\n CEB, NYSE, 4.84, 2013-02-07\\n INFY, NYSE, 5.56, 2013-01-12\\n JLL, NYSE, -0.38, 2013-01-30\\n AZN, NYSE, 24.64, 2013-02-01\\n SFG, NYSE, 7.23, 2013-01-30\\n TREX, NYSE, 27.78, 2013-02-20\\n GS, NYSE, 61.38, 2013-01-17\\n SYX, NYSE, -144.44, 2013-03-06\\n WCC, NYSE, -2.75, 2013-02-01\\n JNPR, NYSE, 26.67, 2013-01-25\\n RDN, NYSE, -146.43, 2013-02-12\\n RAI, NYSE, 4.11, 2013-02-13\\n SKX, NYSE, 172.73, 2013-02-14\\n WTM, NYSE, 724.10, 2013-02-06\\n NCI, NYSE, 29.17, 2013-02-15\\n BLT, NYSE, -21.74, 2013-03-08\\n BLK, NYSE, 5.88, 2013-01-18\\n CIR, NYSE, 25.45, 2013-03-01\\n PKG, NYSE, -1.61, 2013-01-23\\n PKI, NYSE, 0.00, 2013-02-01\\n UGP, NYSE, 38.10, 2013-02-21\\n WWE, NYSE, 0.00, 2013-03-01\\n SNN, NYSE, 2.86, 2013-02-08\\n UPS, NYSE, -4.35, 2013-02-01\\n XOXO, NYSE, 62.50, 2013-03-07\\n SLF, NYSE, 36.36, 2013-02-14\\n CDR, NYSE, 33.33, 2013-03-08\\n RLH, NYSE, -21.43, 2013-03-01\\n EW, NYSE, 16.88, 2013-02-05\\n MET, NYSE, 5.93, 2013-02-13\\n FBR, NYSE, -28.57, 2013-01-31\\n VVC, NYSE, 23.81, 2013-02-15\\n BAM, NYSE, 148.28, 2013-02-16\\n NVS, NYSE, 0.00, 2013-01-24\\n VGR, NYSE, -43.75, 2013-02-27\\n BHLB, NYSE, 0.00, 2013-01-29\\n CRL, NYSE, 6.67, 2013-02-14\\n CYH, NYSE, 0.00, 2013-02-22\\n MBT, NYSE, 65.71, 2013-03-20\\n MTOR, NYSE, -375.00, 2013-01-31\\n CNQ, NYSE, -29.55, 2013-03-08\\n ERJ, NYSE, -25.27, 2013-03-13\\n VZ, NYSE, -28.30, 2013-01-23\\n EVC, NYSE, 12.50, 2013-02-28\\n PBR, NYSE, 0.00, 2013-02-05\\n XEL, NYSE, 3.57, 2013-02-01\\n ALE, NYSE, 0.00, 2013-02-16\\n HW, NYSE, -20.00, 2013-01-30\\n POL, NYSE, 0.00, 2013-01-30\\n UMC, NYSE, 0.00, 2013-02-07\\n ASX, NYSE, 41.43, 2013-01-31\\n COH, NYSE, -4.65, 2013-01-23\\n CXW, NYSE, 7.32, 2013-02-14\\n DVA, NYSE, 6.33, 2013-02-15\\n EXC, NYSE, -1.54, 2013-02-08\\n MCO, NYSE, 7.14, 2013-02-09\\n BRFS, NYSE, 43.48, 2013-03-06\\n TU, NYSE, -1.15, 2013-02-16\\n WIT, NYSE, 0.00, 2013-01-18\\n ERF, NYSE, 462.50, 2013-02-22\\n GG, NYSE, -22.22, 2013-02-15\\n HNT, NYSE, -2.70, 2013-01-31\\n NXY, NYSE, -23.44, 2013-02-26\\n NYCB, NYSE, -3.45, 2013-01-31\\n SXT, NYSE, -8.33, 2013-02-08\\n CPG, NYSE, -191.67, 2013-03-15\\n AMX, NYSE, -40.00, 2013-02-13\\n MPX, NYSE, -50.00, 2013-01-24\\n OIS, NYSE, -5.82, 2013-02-20\\n BH, NYSE, -35.35, 2013-01-26\\n MMP, NYSE, 6.15, 2013-02-06\\n PES, NYSE, 250.00, 2013-02-14\\n ABB, NYSE, -18.75, 2013-02-15\\n RDY, NYSE, -27.27, 2013-02-15\\n KMR, NYSE, -19.23, 2013-02-22\\n GEN, NYSE, -20.00, 2013-02-12\\n ADS, NYSE, 2.38, 2013-02-01\\n CVI, NYSE, 5.15, 2013-03-13\\n FTI, NYSE, 0.00, 2013-02-13\\n PRA, NYSE, 10.64, 2013-02-20\\n STO, NYSE, 26.47, 2013-02-08\\n BEL, NYSE, -266.67, 2013-02-21\\n FIS, NYSE, -8.82, 2013-02-13\\n COL, NYSE, 4.44, 2013-01-19\\n KAI, NYSE, 7.32, 2013-02-27\\n FRM, NYSE, 233.33, 2013-03-09\\n ABC, NYSE, 0.00, 2013-01-25\\n BG, NYSE, -76.15, 2013-02-08\\n FRO, NYSE, 106.52, 2013-02-22\\n ECA, NYSE, -3.12, 2013-02-15\\n CS, NYSE, -54.76, 2013-02-08\\n EEP, NYSE, -30.77, 2013-02-14\\n CVX, NYSE, -1.65, 2013-02-02\\n DB, NYSE, 280.49, 2013-02-01\\n GXP, NYSE, 200.00, 2013-03-01\\n JHX, NYSE, 371.43, 2013-02-28\\n PFG, NYSE, 10.81, 2013-02-01\\n PVR, NYSE, -227.78, 2013-02-21\\n AAP, NYSE, 17.33, 2013-02-08\\n KND, NYSE, 4.55, 2013-02-26\\n WTW, NYSE, 9.09, 2013-02-14\\n CNC, NYSE, 42.42, 2013-02-06\\n PRU, NYSE, -2.87, 2013-02-07\\n BCH, NYSE, 12.94, 2013-02-06\\n NS, NYSE, -19.35, 2013-02-02\\n ITUB, NYSE, -5.00, 2013-02-05\\n SXL, NYSE, 20.88, 2013-02-21\\n VALE, NYSE, -26.00, 2013-02-28\\n TNP, NYSE, -128.57, 2013-04-20\\n LCI, NYSE, 233.33, 2013-02-08\\n AUO, NYSE, -122.73, 2013-02-07\\n GTI, NYSE, 19.05, 2013-02-27\\n HNR, NYSE, -127.27, 2013-05-04\\n MWE, NYSE, -38.89, 2013-02-28\\n NLS, NYSE, 4.55, 2013-03-05\\n RGC, NYSE, 40.00, 2013-02-08\\n SBS, NYSE, 48.25, 2013-03-22\\n JAH, NYSE, 2.40, 2013-02-15\\n NPO, NYSE, 110.71, 2013-02-08\\n TRI, NYSE, 9.09, 2013-02-14\\n CAE, NYSE, 12.50, 2013-02-14\\n LF, NYSE, 971.43, 2013-02-07\\n SNY, NYSE, 1.30, 2013-02-08\\n WHG, NYSE, 15.91, 2013-02-08\\n BANC, NYSE, -300.00, 2013-03-02\\n GTN, NYSE, 4.35, 2013-02-21\\n BAK, NYSE, -150.00, 2013-02-08\\n COP, NYSE, 1.42, 2013-01-31\\n CNP, NYSE, 40.00, 2013-02-28\\n EEQ, NYSE, -18.18, 2013-02-15\\n MRH, NYSE, 60.26, 2013-02-08\\n NGS, NYSE, 26.09, 2013-03-15\\n NRP, NYSE, 34.88, 2013-02-14\\n PXP, NYSE, -22.64, 2013-02-22\\n XEC, NYSE, 9.26, 2013-02-20\\n IAG, NYSE, -11.11, 2013-02-21\\n TS, NYSE, -16.44, 2013-02-22\\n EGO, NYSE, 6.67, 2013-02-23\\n JNS, NYSE, 35.71, 2013-01-25\\n PFS, NYSE, 7.41, 2013-02-02\\n ENH, NYSE, 21.68, 2013-02-08\\n IHG, NYSE, 5.56, 2013-02-20\\n CNX, NYSE, 95.45, 2013-02-01\\n AMT, NYSE, -17.07, 2013-02-27\\n ABG, NYSE, 10.77, 2013-02-20\\n LII, NYSE, 0.00, 2013-02-06\\n SRE, NYSE, 11.34, 2013-02-27\\n AEE, NYSE, -36.36, 2013-02-21\\n PLD, NYSE, 0.00, 2013-02-07\\n SAH, NYSE, 4.00, 2013-02-21\\n GPI, NYSE, -17.50, 2013-02-20\\n FIX, NYSE, -11.11, 2013-03-01\\n MMS, NYSE, 12.50, 2013-02-08\\n SRI, NYSE, -28.57, 2013-03-02\\n RTEC, NYSE, 6.25, 2013-02-05\\n NOV, NYSE, 3.47, 2013-02-02\\n DF, NYSE, 33.33, 2013-02-14\\n SAM, NYSE, 1.63, 2013-02-21\\n RL, NYSE, 8.60, 2013-02-07\\n FLR, NYSE, 132.35, 2013-02-21\\n ALL, NYSE, 942.86, 2013-02-07\\n ATI, NYSE, 5.88, 2013-01-24\\n EE, NYSE, -14.29, 2013-02-20\\n AIT, NYSE, 0.00, 2013-02-01\\n CHH, NYSE, 9.76, 2013-02-12\\n FMS, NYSE, 105.77, 2013-02-27\\n BCO, NYSE, -7.69, 2013-02-02\\n CBB, NYSE, -125.00, 2013-02-28\\n MWW, NYSE, 0.00, 2013-02-08\\n PSA, NYSE, 5.68, 2013-02-22\\n E, NYSE, 2.83, 2013-02-16\\n JPM, NYSE, 15.83, 2013-01-17\\n USB, NYSE, 1.35, 2013-01-17\\n HON, NYSE, 0.92, 2013-01-26\\n ITG, NYSE, 100.00, 2013-02-01\\n ARB, NYSE, 6.25, 2013-02-26\\n APL, NYSE, 0.00, 2013-02-19\\n AVA, NYSE, -42.22, 2013-02-21\\n AXS, NYSE, 64.96, 2013-02-05\\n CHT, NYSE, 5.26, 2013-01-31\\n MOH, NYSE, 145.45, 2013-02-08\\n CVD, NYSE, 2.82, 2013-01-25\\n AHT, NYSE, 2.63, 2013-02-28\\n GPK, NYSE, 12.50, 2013-02-08\\n CNO, NYSE, 8.70, 2013-02-12\\n AUQ, NYSE, -28.57, 2013-03-26\\n JRN, NYSE, 34.62, 2013-03-08\\nGRP.U, NYSE, -14.92, 2013-03-06\\n NFP, NYSE, 11.43, 2013-02-15\\n CRI, NYSE, 2.30, 2013-02-28\\n FMD, NYSE, -20.00, 2013-02-08\\n FPO, NYSE, 10.34, 2013-02-22\\n TRQ, NYSE, -350.00, 2013-03-26\\n WLL, NYSE, 9.21, 2013-02-28\\n AEL, NYSE, 14.63, 2013-02-21\\n AHL, NYSE, 87.60, 2013-02-08\\n AUY, NYSE, -3.70, 2013-02-21\\n CMP, NYSE, 0.00, 2013-02-07\\n KRO, NYSE, -400.00, 2013-03-13\\n TPX, NYSE, 9.09, 2013-01-25\\n UTI, NYSE, 75.00, 2013-02-01\\n PJC, NYSE, 31.34, 2013-01-31\\n TRW, NYSE, 14.81, 2013-02-16\\n AIZ, NYSE, 122.58, 2013-02-07\\n HTH, NYSE, 62.50, 2013-03-16\\n ETP, NYSE, 0.00, 2013-02-21\\n SMI, NYSE, 500.00, 2013-02-07\\n LSE, NYSE, -6.25, 2013-02-16\\n BBD, NYSE, -2.63, 2013-01-29\\n NRG, NYSE, 124.14, 2013-02-28\\n HOS, NYSE, 29.17, 2013-02-07\\n ABR, NYSE, 160.00, 2013-02-16\\n FHN, NYSE, 0.00, 2013-01-19\\n AGO, NYSE, 32.39, 2013-02-28\\n HSP, NYSE, 1.85, 2013-02-14\\n HNI, NYSE, -6.98, 2013-02-06\\n GHL, NYSE, -32.43, 2013-01-24\\n XPO, NYSE, -14.00, 2013-02-28\\n CVO, NYSE, 23.08, 2013-02-28\\n CHE, NYSE, 16.92, 2013-02-19\\n GNW, NYSE, 30.77, 2013-02-06\\n CBG, NYSE, 12.24, 2013-02-07\\n SFL, NYSE, -26.67, 2013-02-26\\n NEU, NYSE, -15.57, 2013-01-29\\n GOL, NYSE, -109.09, 2013-03-26\\n CAB, NYSE, 4.17, 2013-02-15\\n LTM, NYSE, 1.82, 2013-02-22\\n VVI, NYSE, 10.53, 2013-02-02\\n WCG, NYSE, 0.00, 2013-02-14\\n HEP, NYSE, -2.63, 2013-02-22\\n DPZ, NYSE, 8.47, 2013-03-01\\n BDC, NYSE, 9.86, 2013-02-08\\n EGY, NYSE, -171.43, 2013-03-15\\n LPL, NYSE, 2.63, 2013-02-22\\n ENS, NYSE, 12.82, 2013-02-07\\n BMR, NYSE, 5.88, 2013-02-06\\n ACC, NYSE, 9.26, 2013-02-13\\n KRG, NYSE, -9.09, 2013-02-08\\n WLK, NYSE, 13.60, 2013-02-20\\n EXR, NYSE, 4.65, 2013-02-22\\n CNS, NYSE, 16.67, 2013-01-24\\n IOC, NYSE, 264.29, 2013-02-28\\n STON, NYSE, -233.33, 2013-03-16\\n CPL, NYSE, 38.10, 2013-03-13\\n TPGI, NYSE, -114.29, 2013-02-14\\n SHO, NYSE, -3.33, 2013-02-20\\n CUBE, NYSE, 5.00, 2013-02-22\\n NRF, NYSE, 170.37, 2013-02-15\\n BBW, NYSE, -68.29, 2013-02-15\\n DLR, NYSE, 4.31, 2013-02-16\\n NWE, NYSE, 2.63, 2013-02-15\\n ORA, NYSE, 200.00, 2013-02-28\\n NP, NYSE, 5.26, 2013-02-21\\n SMA, NYSE, -21.05, 2013-02-22\\n BBG, NYSE, 25.00, 2013-02-22\\n BXC, NYSE, -163.16, 2013-02-14\\n KNL, NYSE, 32.14, 2013-02-06\\n LVS, NYSE, -8.47, 2013-01-31\\n HLF, NYSE, 0.96, 2013-02-20\\n MIC, NYSE, -20.41, 2013-02-21\\n PHH, NYSE, -11.54, 2013-02-07\\n CE, NYSE, 6.35, 2013-01-29\\n EDR, NYSE, 0.00, 2013-02-20\\n WTI, NYSE, 8.33, 2013-02-27\\n ARC, NYSE, -100.00, 2013-03-01\\n PBH, NYSE, 8.82, 2013-02-08\\n HUN, NYSE, 0.00, 2013-02-13\\n DLB, NYSE, 4.44, 2013-01-30\\n DSX, NYSE, -33.33, 2013-03-15\\n LAZ, NYSE, 84.85, 2013-02-08\\n TGP, NYSE, 1.82, 2013-02-22\\n TLP, NYSE, -43.48, 2013-03-13\\n DRH, NYSE, 16.00, 2013-03-01\\n HTGC, NYSE, 8.70, 2013-03-01\\n KFN, NYSE, 5.26, 2013-02-06\\n THS, NYSE, 0.00, 2013-02-22\\n NSR, NYSE, -12.50, 2013-02-06\\n WAL, NYSE, 0.00, 2013-01-25\\n SLW, NYSE, 2.04, 2013-03-22\\n MPW, NYSE, 0.00, 2013-02-08\\nRDS.B, NYSE, 16.00, 2013-02-01\\n GNK, NYSE, -24.71, 2013-02-21\\n MFB, NYSE, 4.76, 2013-03-07\\nRDS.A, NYSE, 9.95, 2013-02-01\\n ITC, NYSE, 0.93, 2013-02-28\\n FTK, NYSE, -158.82, 2013-03-14\\n PIKE, NYSE, 168.00, 2013-02-06\\n ALJ, NYSE, 0.00, 2013-03-07\\n DRC, NYSE, -4.55, 2013-03-01\\n STN, NYSE, 8.06, 2013-02-22\\n SSW, NYSE, -6.90, 2013-03-06\\n CF, NYSE, 3.41, 2013-02-20\\n HPY, NYSE, 0.00, 2013-02-08\\n ACCO, NYSE, 0.00, 2013-02-14\\n ROC, NYSE, -6.25, 2013-02-20\\n WPZ, NYSE, -28.57, 2013-02-20\\n LCC, NYSE, 44.44, 2013-01-24\\n GLP, NYSE, 58.82, 2013-03-15\\n AMP, NYSE, 15.54, 2013-01-31\\n DHT, NYSE, 108.33, 2013-01-30\\n FNF, NYSE, 17.86, 2013-02-20\\n NM, NYSE, 20.00, 2013-02-20\\n CCO, NYSE, 25.00, 2013-02-20\\n BWP, NYSE, 0.00, 2013-02-12\\n ICE, NYSE, 5.14, 2013-02-07\\n BKD, NYSE, -57.14, 2013-02-12\\n AAV, NYSE, 350.00, 2013-03-28\\n BAS, NYSE, -42.11, 2013-02-20\\n CPA, NYSE, -9.87, 2013-02-07\\n LYV, NYSE, -147.06, 2013-02-27\\n WNR, NYSE, 5.84, 2013-03-01\\n CMG, NYSE, 0.00, 2013-02-06\\n RGP, NYSE, -180.00, 2013-02-21\\n KOP, NYSE, 11.86, 2013-02-15\\n UAL, NYSE, -7.41, 2013-01-25\\n ETE, NYSE, -90.91, 2013-02-21\\n RSO, NYSE, -17.65, 2013-03-05\\n XCO, NYSE, 6.25, 2013-02-21\\n PAC, NYSE, 41.18, 2013-02-28\\n NYX, NYSE, 10.26, 2013-02-06\\n TDG, NYSE, 51.65, 2013-02-05\\n BMA, NYSE, 18.40, 2013-02-15\\n THI, NYSE, -2.82, 2013-02-22\\n BTE, NYSE, -40.48, 2013-03-08\\n CNH, NYSE, 29.58, 2013-02-01\\n GLA, NYSE, 67.44, 2013-02-14\\n POR, NYSE, -9.52, 2013-02-23\\n HIL, NYSE, -100.00, 2013-03-12\\n HVB, NYSE, -20.00, 2013-02-01\\n KS, NYSE, 0.00, 2013-02-14\\n HK, NYSE, 0.00, 2013-03-01\\n DCP, NYSE, 59.62, 2013-02-28\\n DK, NYSE, 10.10, 2013-03-08\\n CODI, NYSE, 14.81, 2013-03-07\\n VG, NYSE, 25.00, 2013-02-14\\n MA, NYSE, 1.46, 2013-02-01\\n MWA, NYSE, -200.00, 2013-02-06\\n KOG, NYSE, 14.29, 2013-03-01\\n PWE, NYSE, -500.00, 2013-02-15\\n PGTI, NYSE, 100.00, 2013-02-21\\n AWH, NYSE, 16.23, 2013-02-14\\n NSH, NYSE, -65.71, 2013-02-02\\n WYN, NYSE, 5.00, 2013-02-07\\n WNS, NYSE, 0.00, 2013-01-17\\n AYR, NYSE, 36.84, 2013-02-22\\n EVR, NYSE, 55.77, 2013-01-31\\n HBI, NYSE, 7.00, 2013-02-06\\n WU, NYSE, 20.00, 2013-02-13\\n OC, NYSE, -31.25, 2013-02-21\\n MR, NYSE, 2.08, 2013-02-26\\n DAC, NYSE, -21.43, 2013-02-12\\n AWI, NYSE, 3.03, 2013-02-20\\n SUSS, NYSE, 444.44, 2013-02-28\\n DEI, NYSE, 0.00, 2013-02-13\\n OB, NYSE, -200.00, 2013-02-06\\n SBH, NYSE, -5.88, 2013-02-08\\n EBS, NYSE, -4.35, 2013-03-08\\n KBR, NYSE, 122.22, 2013-02-21\\n AER, NYSE, 30.95, 2013-02-21\\n NOA, NYSE, -11.11, 2013-02-06\\n SPR, NYSE, -2.27, 2013-02-13\\n ANW, NYSE, 0.00, 2013-02-28\\n DCT, NYSE, 10.00, 2013-02-08\\n SE, NYSE, -3.03, 2013-02-06\\n TOO, NYSE, 16.67, 2013-02-22\\n TSL, NYSE, -39.77, 2013-02-27\\n TWC, NYSE, 1.95, 2013-02-01\\n MVO, NYSE, -5.06, 2013-03-15\\n CO, NYSE, 40.00, 2013-02-27\\n EXK, NYSE, -45.83, 2013-03-13\\n EIG, NYSE, -25.00, 2013-02-28\\n HF, NYSE, 21.62, 2013-03-07\\n CEL, NYSE, 34.78, 2013-03-05\\n FIG, NYSE, 53.85, 2013-02-28\\n NGLS, NYSE, 0.00, 2013-02-15\\n TCAP, NYSE, 3.64, 2013-03-07\\n GFA, NYSE, -483.33, 2013-03-12\\n BR, NYSE, -5.56, 2013-02-08\\n SCR, NYSE, 85.71, 2013-03-08\\n CNK, NYSE, -12.82, 2013-02-21\\n DAL, NYSE, 0.00, 2013-01-23\\n ORN, NYSE, 250.00, 2013-03-01\\n ACM, NYSE, 9.09, 2013-02-06\\n JMP, NYSE, 62.50, 2013-02-14\\n SLH, NYSE, 1.69, 2013-02-08\\n CLR, NYSE, 16.85, 2013-02-28\\n BGS, NYSE, -17.95, 2013-02-15\\n STAR, NYSE, 12.50, 2013-02-27\\n YGE, NYSE, -74.07, 2013-03-05\\n DFS, NYSE, -9.40, 2013-03-06\\n TEL, NYSE, 1.56, 2013-01-24\\n BX, NYSE, 25.53, 2013-02-01\\n SEP, NYSE, 8.11, 2013-02-06\\n BZ, NYSE, -30.00, 2013-02-27\\n PPO, NYSE, -28.26, 2013-02-21\\n PRO, NYSE, 25.00, 2013-02-13\\n WBC, NYSE, 13.68, 2013-02-16\\n DHX, NYSE, 7.14, 2013-01-31\\n PMC, NYSE, 13.79, 2013-02-08\\n HGG, NYSE, 0.00, 2013-02-01\\n OWW, NYSE, -14.29, 2013-02-15\\n VR, NYSE, 35.58, 2013-02-01\\n CXO, NYSE, -5.88, 2013-02-21\\n G, NYSE, 4.76, 2013-02-08\\n EJ, NYSE, 160.00, 2013-03-13\\n WX, NYSE, 32.00, 2013-03-08\\n CMLP, NYSE, -50.00, 2013-02-06\\n VMW, NYSE, -5.56, 2013-01-29\\n CZZ, NYSE, 63.64, 2013-02-08\\n CGA, NYSE, -3.23, 2013-02-09\\n TDC, NYSE, 5.71, 2013-02-08\\n FLY, NYSE, 137.65, 2013-03-08\\n DUF, NYSE, 6.25, 2013-02-26\\n MAIN, NYSE, 12.00, 2013-03-08\\n REN, NYSE, -50.00, 2013-03-08\\n TGH, NYSE, 9.57, 2013-02-13\\n DFT, NYSE, -5.00, 2013-02-07\\n RF, NYSE, 10.00, 2013-01-23\\n PZN, NYSE, -22.22, 2013-02-13\\n LL, NYSE, 19.05, 2013-02-21\\n NMM, NYSE, 0.00, 2013-01-25\\n OZM, NYSE, 5.48, 2013-02-08\\n ES, NYSE, -5.08, 2013-02-20\\n MSCI, NYSE, -1.89, 2013-02-08\\n ARR, NYSE, -18.52, 2013-02-23\\n KW, NYSE, 275.00, 2013-03-13\\n GTS, NYSE, -10.17, 2013-02-07\\n FOR, NYSE, 222.22, 2013-02-14\\n LRN, NYSE, 4.35, 2013-02-06\\n TNK, NYSE, -125.00, 2013-02-22\\n N, NYSE, 21.43, 2013-02-01\\n DAN, NYSE, 5.56, 2013-02-22\\n BIP, NYSE, 12.07, 2013-02-09\\n CPN, NYSE, -500.00, 2013-02-14\\n SOL, NYSE, 2.70, 2013-03-15\\n PM, NYSE, 1.64, 2013-02-08\\n HI, NYSE, 7.89, 2013-02-05\\n V, NYSE, 2.25, 2013-02-07\\n IPI, NYSE, 0.00, 2013-02-14\\n AWK, NYSE, -14.29, 2013-02-27\\n HTS, NYSE, 37.84, 2013-02-13\\n DPS, NYSE, -4.71, 2013-02-14\\n CFX, NYSE, 7.69, 2013-02-07\\n WES, NYSE, -27.91, 2013-02-28\\n SB, NYSE, -10.00, 2013-02-21\\n LO, NYSE, 3.95, 2013-02-14\\n LPS, NYSE, 10.45, 2013-02-08\\n FF, NYSE, -31.82, 2013-03-19\\n NNA, NYSE, 150.00, 2013-02-13\\n EPB, NYSE, 14.55, 2013-01-17\\n JBT, NYSE, 3.23, 2013-03-07\\n DL, NYSE, 33.33, 2013-02-27\\n RAX, NYSE, -4.55, 2013-02-13\\n HCI, NYSE, 67.61, 2013-03-06\\n EC, NYSE, -20.47, 2013-02-16\\n CLW, NYSE, 10.53, 2013-02-21\\n MJN, NYSE, 5.88, 2013-02-01\\n EPC, NYSE, 1.85, 2013-02-01\\n BPI, NYSE, -3.33, 2013-03-13\\n RST, NYSE, 55.56, 2013-03-01\\n DGI, NYSE, 92.31, 2013-02-27\\n SWI, NYSE, 10.34, 2013-02-05\\n CYS, NYSE, -46.15, 2013-02-07\\n IVR, NYSE, 20.31, 2013-02-06\\n BUD, NYSE, -5.08, 2013-02-28\\n PMT, NYSE, -2.35, 2013-02-08\\n STWD, NYSE, 15.38, 2013-02-28\\n CFN, NYSE, -16.98, 2013-02-09\\n SPB, NYSE, 71.43, 2013-02-07\\n ARI, NYSE, -10.34, 2013-02-28\\n CLNY, NYSE, -13.89, 2013-03-07\\n ART, NYSE, 300.00, 2013-02-15\\n SEM, NYSE, 12.00, 2013-02-22\\n BSBR, NYSE, 578.57, 2013-03-28\\n DOLE, NYSE, -6100.00, 2013-03-13\\n VSI, NYSE, 0.00, 2013-02-27\\n TWO, NYSE, -15.15, 2013-02-07\\n CVE, NYSE, -14.29, 2013-02-15\\n H, NYSE, 81.82, 2013-02-14\\n LEA, NYSE, 7.25, 2013-02-02\\n CLD, NYSE, 8.00, 2013-02-14\\n AOL, NYSE, 7.50, 2013-02-09\\n CHSP, NYSE, 5.13, 2013-02-22\\n PEB, NYSE, 0.00, 2013-02-22\\n CIT, NYSE, 60.94, 2013-01-30\\n KAR, NYSE, -4.55, 2013-02-21\\n CIE, NYSE, -66.67, 2013-02-27\\n TMH, NYSE, 8.33, 2013-02-06\\n KRA, NYSE, -300.00, 2013-02-28\\n SYA, NYSE, -29.41, 2013-02-05\\n TRNO, NYSE, -162.50, 2013-02-16\\n PDM, NYSE, -2.70, 2013-02-08\\n GNRC, NYSE, 26.09, 2013-02-15\\n ACW, NYSE, -2.17, 2013-03-07\\n BALT, NYSE, -11.76, 2013-02-21\\n ST, NYSE, 2.17, 2013-01-31\\n SEMG, NYSE, 55.56, 2013-03-01\\n CALX, NYSE, 20.00, 2013-02-06\\n MXL, NYSE, -57.14, 2013-02-06\\n STNG, NYSE, -60.00, 2013-02-26\\n PRI, NYSE, -1.43, 2013-02-08\\n SDRL, NYSE, -93.65, 2013-03-01\\n CLDT, NYSE, 0.00, 2013-02-20\\n EXL, NYSE, 0.00, 2013-02-28\\n LYB, NYSE, -0.88, 2013-02-02\\n PNG, NYSE, 7.14, 2013-02-07\\n PLOW, NYSE, -25.00, 2013-03-12\\n SIX, NYSE, 198.00, 2013-02-21\\n NKA, NYSE, 1066.67, 2013-02-01\\n RRTS, NYSE, 0.00, 2013-02-07\\n JKS, NYSE, -332.48, 2013-04-11\\n CODE, NYSE, -13.64, 2013-01-30\\n FAF, NYSE, 44.64, 2013-02-22\\n QEP, NYSE, 3.13, 2013-02-20\\n OAS, NYSE, 6.52, 2013-02-26\\n VPG, NYSE, 15.38, 2013-02-13\\n HPP, NYSE, 9.52, 2013-03-07\\n FN, NYSE, 9.09, 2013-02-05\\n ECT, NYSE, 65.85, 2013-03-16\\n QUAD, NYSE, -6.67, 2013-03-05\\n KKR, NYSE, 54.84, 2013-02-08\\n RLD, NYSE, 20.00, 2013-02-07\\n AMRC, NYSE, 44.44, 2013-03-19\\n GDOT, NYSE, 50.00, 2013-02-01\\n AT, NYSE, -160.00, 2013-03-01\\n ENV, NYSE, 0.00, 2013-02-15\\n IL, NYSE, 200.00, 2013-02-22\\n WSR, NYSE, -12.00, 2013-03-13\\n SFUN, NYSE, 35.71, 2013-02-09\\n COR, NYSE, 5.00, 2013-02-23\\n VC, NYSE, 20.62, 2013-03-01\\n CCSC, NYSE, -20.00, 2013-03-07\\n CCG, NYSE, 0.00, 2013-02-27\\n EFC, NYSE, -72.73, 2013-02-14\\n TOWR, NYSE, 183.33, 2013-02-16\\n CHMT, NYSE, -53.13, 2013-02-26\\n HBM, NYSE, 200.00, 2013-02-21\\n EXAM, NYSE, 55.56, 2013-02-28\\n XUE, NYSE, 7.69, 2013-02-28\\n CMRE, NYSE, 6.67, 2013-01-24\\n NOAH, NYSE, 20.00, 2013-02-26\\n IPHI, NYSE, -40.00, 2013-02-05\\n BITA, NYSE, 33.33, 2013-03-08\\n BAH, NYSE, 11.11, 2013-01-31\\n GM, NYSE, -2.04, 2013-02-15\\n TROX, NYSE, -60.00, 2013-02-21\\n DANG, NYSE, 20.00, 2013-03-08\\n YOKU, NYSE, 9.09, 2013-03-01\\n FRC, NYSE, -16.44, 2013-01-17\\n RFP, NYSE, 52.38, 2013-02-13\\n ISS, NYSE, 15.38, 2013-03-09\\n WD, NYSE, -14.29, 2013-03-07\\n FLT, NYSE, 10.00, 2013-02-08\\n GCAP, NYSE, -325.00, 2013-03-13\\n FRF, NYSE, -25.93, 2013-03-29\\n SWFT, NYSE, 46.15, 2013-01-24\\n AG, NYSE, -10.34, 2013-02-27\\n QRE, NYSE, -174.07, 2013-03-07\\n AAT, NYSE, 11.76, 2013-02-20\\n MCC, NYSE, 5.41, 2013-02-07\\n NLSN, NYSE, 3.51, 2013-02-12\\n AGRO, NYSE, -71.43, 2013-03-22\\n BKU, NYSE, 27.08, 2013-01-30\\n INXN, NYSE, -38.89, 2013-02-28\\n NPTN, NYSE, 16.67, 2013-02-22\\n INN, NYSE, 25.00, 2013-02-27\\n KMI, NYSE, -5.88, 2013-01-17\\n HCA, NYSE, 9.64, 2013-02-05\\n MX, NYSE, 135.21, 2013-01-31\\n HII, NYSE, 8.89, 2013-02-28\\n QIHU, NYSE, 175.00, 2013-03-06\\n APO, NYSE, 119.48, 2013-02-09\\n GNC, NYSE, 8.70, 2013-02-15\\n SDT, NYSE, 11.48, 2013-03-16\\n UAN, NYSE, 16.67, 2013-02-28\\n ARCO, NYSE, 5.00, 2013-03-09\\n ELLI, NYSE, 36.36, 2013-02-15\\n TMS, NYSE, -23.81, 2013-02-15\\n SQNS, NYSE, -16.00, 2013-02-08\\n STAG, NYSE, 17.24, 2013-02-21\\n AL, NYSE, 8.33, 2013-03-01\\n TLLP, NYSE, 10.42, 2013-02-12\\n RENN, NYSE, 14.29, 2013-03-12\\n NQ, NYSE, 800.00, 2013-03-07\\n THR, NYSE, -14.29, 2013-02-08\\n KOS, NYSE, 125.00, 2013-02-26\\n RLJ, NYSE, 4.35, 2013-02-28\\n NGL, NYSE, -7.41, 2013-02-16\\n FENG, NYSE, 100.00, 2013-03-07\\n LNKD, NYSE, 900.00, 2013-02-08\\n NMFC, NYSE, 5.88, 2013-03-07\\n ACTV, NYSE, 5.26, 2013-02-15\\n TAOM, NYSE, 700.00, 2013-03-15\\n RATE, NYSE, -60.00, 2013-02-13\\n VHS, NYSE, -22.22, 2013-01-31\\n MPC, NYSE, 8.13, 2013-01-31\\n MITT, NYSE, -1.16, 2013-03-06\\n OILT, NYSE, 0.00, 2013-03-07\\n SXC, NYSE, 14.71, 2013-02-06\\n AMTG, NYSE, -8.57, 2013-03-07\\n AMID, NYSE, -2500.00, 2013-04-17\\n WAIR, NYSE, -7.41, 2013-01-30\\n PER, NYSE, -7.58, 2013-03-02\\n PPP, NYSE, -44.44, 2013-02-22\\n FNV, NYSE, -8.33, 2013-03-20\\n FSM, NYSE, 16.67, 2013-03-21\\n FBHS, NYSE, 4.55, 2013-02-01\\n XLS, NYSE, 4.44, 2013-03-02\\n XYL, NYSE, 2.17, 2013-02-08\\n NDRO, NYSE, 4.76, 2013-03-19\\n RNF, NYSE, -33.33, 2013-03-20\\n VAC, NYSE, 25.53, 2013-02-22\\n CHKR, NYSE, -7.25, 2013-03-16\\n PACD, NYSE, 14.29, 2013-02-28\\n INVN, NYSE, 0.00, 2013-01-24\\n DLPH, NYSE, 3.45, 2013-02-06\\n MN, NYSE, 0.00, 2013-02-14\\n RRMS, NYSE, -25.00, 2013-03-01\\n WPX, NYSE, -400.00, 2013-03-01\\n LPI, NYSE, 0.00, 2013-03-13\\n SN, NYSE, -80.00, 2013-03-07\\n KORS, NYSE, 60.00, 2013-02-13\\n BCEI, NYSE, -7.89, 2013-03-15\\n BOXC, NYSE, 4.78, 2013-01-29\\n PVG, NYSE, -25.00, 2013-03-06\\n POST, NYSE, 30.43, 2013-02-08\\n SLCA, NYSE, 32.26, 2013-02-27\\n MTDR, NYSE, -116.67, 2013-03-14\\n GWAY, NYSE, -200.00, 2013-02-13\\n EPAM, NYSE, -10.81, 2013-02-28\\n RNDY, NYSE, 5.56, 2013-03-01\\n CPAC, NYSE, -13.33, 2013-02-21\\n PRLB, NYSE, 7.69, 2013-02-14\\n YELP, NYSE, -50.00, 2013-02-07\\n NSM, NYSE, 7.58, 2013-03-08\\n ALSN, NYSE, 257.14, 2013-02-20\\n DWRE, NYSE, 350.00, 2013-02-15\\n VNTV, NYSE, 16.13, 2013-02-21\\n ET, NYSE, 34.78, 2013-02-22\\n VIPS, NYSE, 1100.00, 2013-02-22\\n VCRA, NYSE, -33.33, 2013-02-28\\n RM, NYSE, -1.89, 2013-02-28\\n BNNY, NYSE, 0.00, 2013-02-12\\n MM, NYSE, 200.00, 2013-02-20\\n RXN, NYSE, -15.00, 2013-02-12\\n GLOG, NYSE, -20.00, 2013-02-28\\n PBA, NYSE, 44.44, 2013-03-02\\n RPAI, NYSE, 15.79, 2013-02-20\\n OAK, NYSE, 63.33, 2013-02-15\\n FET, NYSE, -3.45, 2013-02-15\\n MRC, NYSE, 17.02, 2013-02-22\\n PSX, NYSE, 21.18, 2013-01-31\\n TUMI, NYSE, 0.00, 2013-03-21\\n ACRE, NYSE, -38.10, 2013-04-02\\n EVER, NYSE, 17.24, 2013-01-31\\n PDH, NYSE, -13.79, 2013-02-07\\n WMC, NYSE, 3.23, 2013-04-03\\n WAGE, NYSE, 0.00, 2013-02-21\\n HTA, NYSE, 0.00, 2013-02-21\\n ALEX, NYSE, 42.86, 2013-02-20\\n BKW, NYSE, 53.33, 2013-02-16\\n EQM, NYSE, 51.22, 2013-01-25\\n NOW, NYSE, 38.46, 2013-01-31\\n EGL, NYSE, 18.46, 2013-03-13\\n NGVC, NYSE, 25.00, 2013-02-01\\n NTI, NYSE, -25.00, 2013-03-14\\n AMRE, NYSE, 4.35, 2013-02-20\\n GMED, NYSE, 15.79, 2013-02-28\\n MANU, NYSE, -46.43, 2013-02-15\\n HCLP, NYSE, -28.57, 2013-02-01\\n ADT, NYSE, 4.76, 2013-01-31\\n TRLA, NYSE, -20.00, 2013-02-13\\n SRC, NYSE, 8.82, 2013-02-28\\n NBHC, NYSE, -14.29, 2013-01-29\\n BSMX, NYSE, -4.17, 2013-02-19\\n HY, NYSE, 14.53, 2013-02-20\\n SMLP, NYSE, 40.00, 2013-03-14\\n DYN, NYSE, -1714.29, 2013-03-15\\n LXFR, NYSE, 43.75, 2013-03-12\\n LOCK, NYSE, 16.67, 2013-02-21\\n JMI, NYSE, 97.78, 2013-03-22\\n BERY, NYSE, -40.00, 2013-02-01\\n FLTX, NYSE, 0.00, 2013-02-21\\n ANFI, NYSE, 30.77, 2013-02-26\\n SSTK, NYSE, -100.00, 2013-02-22\\n SDLP, NYSE, 90.91, 2013-03-01\\n MPLX, NYSE, -25.00, 2013-01-31\\n WWAV, NYSE, 5.88, 2013-02-14\\n SXE, NYSE, -4121.43, 2013-03-29\\n DKL, NYSE, -5.56, 2013-03-06\\n RKUS, NYSE, -20.00, 2013-02-13\\n WGP, NYSE, 57.14, 2013-02-28\\n PBF, NYSE, -92.31, 2013-03-01\\n SBY, NYSE, 0.00, 2013-03-01\\n RIOM, NYSE, 77.78, 2013-03-29\\n BFAM, NYSE, -1186.36, 2013-03-27\\n ZTS, NYSE, -79.41, 2013-03-29\\n DDC, NYSE, -39.13, 2013-04-04\\n ABM, NYSE, 18.18, 2013-03-05\\n ANN, NYSE, 0.00, 2013-03-09\\n BBY, NYSE, 5.81, 2013-03-02\\n BF.B, NYSE, 4.29, 2013-03-07\\n BKE, NYSE, 2.40, 2013-03-15\\n BNS, NYSE, -3.17, 2013-03-06\\n BRC, NYSE, -22.45, 2013-02-22\\n CATO, NYSE, -3.57, 2013-03-22\\n COO, NYSE, 2.50, 2013-03-08\\n CPB, NYSE, 6.06, 2013-02-16\\n CFI, NYSE, 10.34, 2013-02-28\\n DCI, NYSE, -10.53, 2013-02-26\\n DDS, NYSE, -1.03, 2013-02-26\\n DE, NYSE, 17.02, 2013-02-14\\n DY, NYSE, 50.00, 2013-02-27\\n EV, NYSE, -3.85, 2013-02-21\\n ENZ, NYSE, -133.33, 2013-03-13\\n ESL, NYSE, 13.11, 2013-03-01\\nFCE.A, NYSE, 9.09, 2013-03-28\\n M, NYSE, 3.54, 2013-02-27\\n GCO, NYSE, 1.41, 2013-03-09\\n GPS, NYSE, 2.82, 2013-03-01\\n HD, NYSE, 4.69, 2013-02-27\\n HEI, NYSE, -12.50, 2013-02-21\\n HNZ, NYSE, 10.00, 2013-02-28\\n HOV, NYSE, -66.67, 2013-03-07\\n HRB, NYSE, -633.33, 2013-03-08\\n HRL, NYSE, -2.04, 2013-02-22\\n HPQ, NYSE, 15.49, 2013-02-22\\n JCP, NYSE, -926.32, 2013-02-28\\n KR, NYSE, 25.71, 2013-03-08\\n KSS, NYSE, 1.84, 2013-03-01\\n LB, NYSE, 1.15, 2013-02-28\\n LOW, NYSE, 13.04, 2013-02-26\\n LZB, NYSE, 16.67, 2013-02-20\\n MDT, NYSE, 2.20, 2013-02-20\\n MEI, NYSE, 350.00, 2013-03-01\\n MPR, NYSE, 0.00, 2013-03-22\\n NAV, NYSE, 14.11, 2013-03-08\\n JWN, NYSE, 4.48, 2013-02-22\\n ODC, NYSE, -35.42, 2013-03-12\\n OXM, NYSE, -5.80, 2013-04-03\\n PBY, NYSE, -225.00, 2013-04-16\\n PLL, NYSE, 8.96, 2013-02-28\\n PNY, NYSE, 1.72, 2013-03-07\\n PVH, NYSE, 6.67, 2013-03-28\\n THO, NYSE, 0.00, 2013-03-08\\n TIF, NYSE, 2.19, 2013-03-23\\n TJX, NYSE, 1.23, 2013-02-28\\n TOL, NYSE, -81.82, 2013-02-21\\n TTC, NYSE, 23.26, 2013-02-22\\n VAL, NYSE, -9.09, 2013-02-13\\n JW.A, NYSE, 13.41, 2013-03-08\\n WMT, NYSE, 6.37, 2013-02-22\\n WSM, NYSE, 4.69, 2013-03-20\\n FL, NYSE, -11.11, 2013-03-09\\n CHS, NYSE, 0.00, 2013-03-01\\n REX, NYSE, -800.00, 2013-03-29\\n BKS, NYSE, -136.00, 2013-03-01\\n CAL, NYSE, 75.00, 2013-03-16\\n SIG, NYSE, 1.44, 2013-03-29\\n ZLC, NYSE, -1.92, 2013-02-22\\n AEO, NYSE, 0.00, 2013-03-07\\n FGP, NYSE, -10.00, 2013-03-08\\n BMO, NYSE, 1.37, 2013-02-27\\n RY, NYSE, 0.75, 2013-03-01\\n GEF, NYSE, -13.21, 2013-02-28\\n MOV, NYSE, 70.83, 2013-03-22\\n SKS, NYSE, 13.33, 2013-02-27\\n TD, NYSE, 1.55, 2013-03-01\\n ANF, NYSE, 14.51, 2013-02-23\\n CIEN, NYSE, 116.00, 2013-03-08\\n KMG, NYSE, -17.65, 2013-03-09\\n IRET, NYSE, -5.88, 2013-03-13\\n CM, NYSE, 0.00, 2013-03-01\\nHEI.A, NYSE, -18.60, 2013-02-21\\n UBA, NYSE, 13.04, 2013-03-07\\n KFY, NYSE, 6.90, 2013-03-07\\n TGT, NYSE, 12.24, 2013-02-28\\n KKD, NYSE, 0.00, 2013-03-15\\n NDZ, NYSE, 0.00, 2013-03-06\\n MVC, NYSE, -20.00, 2013-03-08\\n CBK, NYSE, 52.17, 2013-03-14\\n SJM, NYSE, 7.30, 2013-02-16\\n BIG, NYSE, 5.03, 2013-03-07\\n IDT, NYSE, -7.14, 2013-03-08\\n JOY, NYSE, 14.91, 2013-02-28\\n SSI, NYSE, -5.93, 2013-03-13\\n GME, NYSE, 3.35, 2013-03-29\\n DKS, NYSE, -3.74, 2013-03-12\\n A, NYSE, -5.97, 2013-02-15\\n MTN, NYSE, -3.51, 2013-03-07\\n GES, NYSE, 10.47, 2013-03-21\\n CRM, NYSE, 66.67, 2013-03-01\\n NWY, NYSE, 25.00, 2013-03-22\\n PAY, NYSE, 8.11, 2013-03-06\\n DSW, NYSE, -4.17, 2013-03-20\\n NX, NYSE, -183.33, 2013-03-08\\n AGX, NYSE, 15.00, 2013-04-11\\n CMD, NYSE, -5.26, 2013-03-08\\n DG, NYSE, 7.78, 2013-03-26\\n EXPR, NYSE, 1.35, 2013-03-14\\n P, NYSE, 0.00, 2013-03-07\\n GWRE, NYSE, 181.82, 2013-02-27\\n BLOX, NYSE, -20.00, 2013-02-22\\n TLYS, NYSE, 6.67, 2013-03-21\\n PANW, NYSE, -250.00, 2013-03-01\\n WDAY, NYSE, 24.00, 2013-03-08\\n RH, NYSE, 4.92, 2013-04-19\\n AIR, NYSE, 4.55, 2013-03-20\\n ATU, NYSE, -5.41, 2013-03-21\\n AZO, NYSE, 0.84, 2013-02-27\\n AZZ, NYSE, 2.04, 2013-04-09\\n CAG, NYSE, -3.51, 2013-04-04\\n CLC, NYSE, 2.17, 2013-03-21\\n CMC, NYSE, -80.00, 2013-03-29\\n KMX, NYSE, 0.00, 2013-04-11\\n FC, NYSE, -27.27, 2013-04-05\\n FDO, NYSE, -0.82, 2013-04-11\\n FDX, NYSE, -10.87, 2013-03-21\\n FUL, NYSE, -3.92, 2013-03-28\\n GIS, NYSE, 12.28, 2013-03-21\\n KBH, NYSE, 30.43, 2013-03-22\\n LEN, NYSE, 100.00, 2013-03-21\\n LNN, NYSE, 16.28, 2013-03-28\\n LUB, NYSE, -100.00, 2013-03-21\\n MKC, NYSE, 1.79, 2013-04-03\\n RT, NYSE, 0.00, 2013-04-11\\n MSM, NYSE, 0.00, 2013-04-11\\n NKE, NYSE, 8.96, 2013-03-22\\n ORCL, NYSE, -1.56, 2013-03-21\\n PIR, NYSE, 0.00, 2013-04-12\\n PKE, NYSE, -21.43, 2013-05-10\\n RPM, NYSE, 16.67, 2013-04-05\\n SVU, NYSE, -200.00, 2013-04-25\\n TXI, NYSE, 25.00, 2013-03-28\\n UNF, NYSE, 18.75, 2013-03-28\\n WGO, NYSE, 37.50, 2013-03-29\\n WOR, NYSE, 6.12, 2013-03-22\\n JBL, NYSE, -2.17, 2013-03-21\\n GBX, NYSE, 21.62, 2013-04-05\\n DRI, NYSE, 0.99, 2013-03-23\\n FDS, NYSE, -21.24, 2013-03-20\\n SCS, NYSE, 0.00, 2013-03-28\\n SJR, NYSE, 5.56, 2013-04-13\\n RHT, NYSE, 19.05, 2013-03-28\\n OMN, NYSE, -75.00, 2013-04-04\\n MON, NYSE, 7.06, 2013-04-04\\n GPN, NYSE, -1.14, 2013-04-03\\n AYI, NYSE, 0.00, 2013-04-04\\n CCL, NYSE, 100.00, 2013-03-16\\n CUK, NYSE, 33.33, 2013-03-16\\n STZ, NYSE, 4.44, 2013-04-11\\n ACN, NYSE, 3.09, 2013-03-29\\n SNX, NYSE, 1.15, 2013-03-28\\n TAL, NYSE, 50.00, 2013-04-24\\n IHS, NYSE, 11.90, 2013-03-22\\n EDU, NYSE, 63.64, 2013-04-25\\n KED, NYSE, -99.22, 2013-05-02\\n CORR, NYSE, -9.09, 2013-05-11\\n DFS, NYSE, 18.75, 2013-04-24\\n ZEP, NYSE, 54.55, 2013-04-10\\n MG, NYSE, -58.82, 2013-04-09\\n MOS, NYSE, 5.62, 2013-03-28\\n ABT, NYSE, 0.00, 2013-04-18\\n ABX, NYSE, 6.98, 2013-04-25\\n AB, NYSE, 8.57, 2013-05-02\\n ACO, NYSE, -10.64, 2013-04-27\\n ADM, NYSE, -5.88, 2013-05-01\\n AEM, NYSE, -35.29, 2013-04-26\\n AEP, NYSE, 0.00, 2013-04-27\\n AES, NYSE, -14.29, 2013-05-10\\n AET, NYSE, 8.70, 2013-05-01\\n AFL, NYSE, 4.32, 2013-04-25\\n AGCO, NYSE, 35.23, 2013-05-01\\n HES, NYSE, 24.20, 2013-04-25\\n AIG, NYSE, 52.27, 2013-05-03\\n AIN, NYSE, 0.00, 2013-05-02\\n AJG, NYSE, 33.33, 2013-05-01\\n ALU, NYSE, -81.82, 2013-04-27\\n MATX, NYSE, 31.25, 2013-05-07\\n ALK, NYSE, 15.09, 2013-04-26\\n ALX, NYSE, -2.56, 2013-05-07\\n BEAM, NYSE, 18.52, 2013-05-03\\n AME, NYSE, 3.92, 2013-04-26\\n TWX, NYSE, 9.33, 2013-05-02\\n AVD, NYSE, 47.50, 2013-05-03\\n AMN, NYSE, 33.33, 2013-05-03\\n AN, NYSE, 7.94, 2013-04-19\\n AON, NYSE, 0.00, 2013-04-27\\n APA, NYSE, -9.01, 2013-05-10\\n APC, NYSE, 17.39, 2013-05-07\\n APD, NYSE, 0.00, 2013-04-24\\n APH, NYSE, 1.16, 2013-04-19\\n ARG, NYSE, 0.88, 2013-05-03\\n AAN, NYSE, -5.63, 2013-04-26\\n ARW, NYSE, 3.49, 2013-05-02\\n ASGN, NYSE, 94.44, 2013-04-25\\n ASH, NYSE, 14.10, 2013-04-25\\n ASR, NYSE, -13.25, 2013-04-23\\n GAS, NYSE, -2.96, 2013-05-01\\n ATO, NYSE, 1.63, 2013-05-02\\n ATW, NYSE, 2.40, 2013-05-02\\n AU, NYSE, -26.67, 2013-05-14\\n AVP, NYSE, 85.71, 2013-05-01\\n AVT, NYSE, 3.45, 2013-04-26\\n AVY, NYSE, 3.51, 2013-04-25\\n AXP, NYSE, 3.60, 2013-04-18\\n B, NYSE, -11.11, 2013-04-27\\n BA, NYSE, 17.69, 2013-04-25\\n BAC, NYSE, -13.04, 2013-04-17\\n BAX, NYSE, 0.96, 2013-04-19\\n BC, NYSE, 22.58, 2013-04-26\\n OMX, NYSE, -52.17, 2013-05-08\\n BCE, NYSE, 10.00, 2013-05-10\\n BCR, NYSE, 0.00, 2013-04-24\\n BDX, NYSE, 6.67, 2013-05-03\\n BEN, NYSE, 8.47, 2013-05-01\\n BGG, NYSE, -17.59, 2013-04-20\\n BHE, NYSE, 10.00, 2013-04-26\\n BHI, NYSE, 4.84, 2013-04-20\\n BID, NYSE, -175.00, 2013-05-10\\n BIO, NYSE, -38.18, 2013-05-08\\n BK, NYSE, 9.62, 2013-04-18\\n BKH, NYSE, 19.18, 2013-05-03\\n WRB, NYSE, 0.00, 2013-04-24\\n BLC, NYSE, 6.67, 2013-04-26\\n BLL, NYSE, -9.38, 2013-04-26\\n BLX, NYSE, -21.82, 2013-04-18\\n BMI, NYSE, -58.33, 2013-04-17\\n BMS, NYSE, -1.85, 2013-04-26\\n BMY, NYSE, 0.00, 2013-04-26\\n BOH, NYSE, -6.90, 2013-04-23\\n BXS, NYSE, 4.76, 2013-04-23\\n BPL, NYSE, 19.44, 2013-05-04\\nBRK.A, NYSE, 197.70, 2013-05-04\\n BRO, NYSE, 5.13, 2013-04-16\\n BSX, NYSE, 0.00, 2013-04-26\\n MTRN, NYSE, -2.94, 2013-04-26\\n CAI, NYSE, -1.32, 2013-04-25\\n CAT, NYSE, -2.24, 2013-04-23\\n CB, NYSE, 12.44, 2013-04-23\\n CBI, NYSE, 15.49, 2013-05-03\\n CBM, NYSE, 85.00, 2013-05-04\\n CBU, NYSE, -1.96, 2013-04-24\\n CBT, NYSE, -7.25, 2013-05-01\\n CCC, NYSE, 20.00, 2013-05-07\\n CCE, NYSE, 2.63, 2013-04-26\\n C, NYSE, 9.32, 2013-04-16\\n CCK, NYSE, 4.17, 2013-04-18\\n CDE, NYSE, -74.07, 2013-05-10\\n CDI, NYSE, -40.91, 2013-05-03\\n CAH, NYSE, 26.32, 2013-05-03\\n CFR, NYSE, -4.21, 2013-04-25\\n CHD, NYSE, 5.56, 2013-05-03\\n CPK, NYSE, 14.93, 2013-05-03\\n CI, NYSE, 20.28, 2013-05-03\\n CIA, NYSE, 0.00, 2013-05-03\\n CKH, NYSE, -156.12, 2013-04-30\\n CL, NYSE, 0.00, 2013-04-26\\n CLF, NYSE, 87.50, 2013-04-25\\n CLH, NYSE, 25.81, 2013-05-02\\n CLX, NYSE, -5.66, 2013-05-02\\n CMA, NYSE, 4.48, 2013-04-17\\n CMO, NYSE, 3.33, 2013-04-25\\n CRK, NYSE, -11.36, 2013-04-30\\n CMS, NYSE, 15.22, 2013-04-26\\n CNA, NYSE, 21.13, 2013-05-01\\n CNW, NYSE, -29.63, 2013-05-02\\n CHG, NYSE, 19.00, 2013-05-10\\n CNL, NYSE, -8.33, 2013-04-30\\n COG, NYSE, -20.00, 2013-04-25\\n COT, NYSE, -100.00, 2013-05-02\\n CP, NYSE, 2.54, 2013-04-25\\n CPF, NYSE, 105.00, 2013-04-27\\n CQB, NYSE, 28.57, 2013-05-08\\n CR, NYSE, -0.95, 2013-04-23\\nCRD.B, NYSE, -29.17, 2013-05-09\\n CRS, NYSE, -9.21, 2013-04-26\\n CSC, NYSE, 32.29, 2013-05-16\\n CSL, NYSE, 0.00, 2013-04-25\\n CTB, NYSE, 31.82, 2013-05-10\\n CTL, NYSE, 10.14, 2013-05-09\\n CTS, NYSE, 16.67, 2013-04-24\\n CUB, NYSE, 52.24, 2013-05-03\\n CMI, NYSE, -22.58, 2013-05-01\\n CUZ, NYSE, -8.33, 2013-05-09\\n CVC, NYSE, -185.71, 2013-05-10\\n CVH, NYSE, 26.58, 2013-05-02\\n CW, NYSE, 28.21, 2013-05-02\\n CWT, NYSE, -200.00, 2013-05-02\\n CX, NYSE, -140.00, 2013-04-27\\n CYN, NYSE, -2.17, 2013-04-19\\n D, NYSE, -7.78, 2013-04-26\\n DBD, NYSE, -125.00, 2013-05-01\\n DCO, NYSE, -18.60, 2013-05-07\\n DD, NYSE, 1.30, 2013-04-24\\n CVA, NYSE, -61.54, 2013-04-18\\n DHR, NYSE, -1.32, 2013-04-19\\n DIS, NYSE, 2.60, 2013-05-08\\n DLX, NYSE, 3.41, 2013-04-26\\n DNB, NYSE, 2.26, 2013-05-03\\n RRD, NYSE, 12.12, 2013-04-26\\n DOV, NYSE, 1.85, 2013-04-18\\n DOW, NYSE, 15.00, 2013-04-26\\n DRE, NYSE, 0.00, 2013-04-25\\n DHI, NYSE, 60.00, 2013-04-27\\n UFS, NYSE, -35.37, 2013-04-26\\n DTE, NYSE, 30.10, 2013-04-27\\n DUK, NYSE, -1.92, 2013-05-04\\n DVN, NYSE, 17.86, 2013-05-02\\n DV, NYSE, 8.43, 2013-04-24\\n EAT, NYSE, 4.35, 2013-04-24\\n ECL, NYSE, 3.45, 2013-05-01\\n ED, NYSE, 4.85, 2013-05-03\\n EDE, NYSE, 11.11, 2013-04-26\\n EFX, NYSE, 0.00, 2013-04-25\\n EGN, NYSE, -7.32, 2013-04-30\\n EGP, NYSE, -1.30, 2013-04-19\\n ELP, NYSE, 0.00, 2013-05-17\\n ELY, NYSE, 65.00, 2013-04-26\\n EMC, NYSE, 3.23, 2013-04-25\\n EMR, NYSE, -1.28, 2013-05-08\\n EOG, NYSE, 59.29, 2013-05-07\\n EQT, NYSE, 26.92, 2013-04-26\\n ESE, NYSE, -17.65, 2013-05-08\\n ESV, NYSE, 5.43, 2013-04-30\\n ETN, NYSE, 6.33, 2013-04-30\\n ETR, NYSE, 0.00, 2013-04-26\\n EXAR, NYSE, 16.67, 2013-05-01\\n F, NYSE, 7.89, 2013-04-25\\n CLGX, NYSE, 8.11, 2013-04-25\\n FNB, NYSE, -4.76, 2013-04-24\\n FCF, NYSE, 0.00, 2013-04-24\\n FBP, NYSE, -122.22, 2013-05-04\\n FICO, NYSE, -9.38, 2013-04-25\\n FLO, NYSE, 6.98, 2013-05-17\\n FMC, NYSE, 1.85, 2013-05-01\\n FOE, NYSE, 66.67, 2013-04-25\\n S, NYSE, 38.24, 2013-04-25\\n NEE, NYSE, 10.89, 2013-05-01\\n FRT, NYSE, 0.88, 2013-05-02\\n FRX, NYSE, 47.06, 2013-04-24\\n FSS, NYSE, 20.00, 2013-05-07\\n FUN, NYSE, 24.32, 2013-05-09\\n FUR, NYSE, 77.78, 2013-05-03\\n GBL, NYSE, 17.86, 2013-05-08\\n GVA, NYSE, -103.85, 2013-05-10\\n BGC, NYSE, -319.23, 2013-05-01\\n GD, NYSE, 8.00, 2013-04-25\\n GE, NYSE, 11.43, 2013-04-20\\n RHP, NYSE, 26.47, 2013-05-08\\n AXLL, NYSE, -38.02, 2013-05-08\\n GGG, NYSE, 15.07, 2013-04-25\\n GHM, NYSE, 28.13, 2013-06-01\\n GIB, NYSE, 14.58, 2013-05-01\\n GLT, NYSE, 17.65, 2013-05-01\\n GLW, NYSE, 15.38, 2013-04-25\\n GSK, NYSE, 6.49, 2013-04-26\\n GLF, NYSE, 175.00, 2013-04-30\\n GNI, NYSE, -14.58, 2013-04-26\\n GPC, NYSE, -6.06, 2013-04-20\\n GRA, NYSE, 0.00, 2013-04-25\\n GTY, NYSE, 0.00, 2013-05-03\\n GWW, NYSE, 7.69, 2013-04-17\\n HAE, NYSE, 4.35, 2013-05-02\\n HAL, NYSE, 17.54, 2013-04-23\\n HAR, NYSE, 25.40, 2013-05-03\\n HVT, NYSE, 33.33, 2013-05-02\\n HRC, NYSE, -2.00, 2013-04-25\\n HCC, NYSE, 31.71, 2013-05-01\\n HCN, NYSE, 1.11, 2013-05-08\\n HCP, NYSE, 2.78, 2013-05-01\\n HOG, NYSE, 2.06, 2013-04-26\\n HE, NYSE, -12.82, 2013-05-09\\n HL, NYSE, -66.67, 2013-05-11\\n HMA, NYSE, 0.00, 2013-05-03\\n HMC, NYSE, -28.57, 2013-04-27\\n HMN, NYSE, 7.84, 2013-04-25\\n HFC, NYSE, -7.91, 2013-05-08\\n HOT, NYSE, 43.40, 2013-05-01\\n HP, NYSE, 5.43, 2013-04-26\\n HLS, NYSE, 14.29, 2013-04-26\\n HRS, NYSE, 0.00, 2013-05-01\\n HSC, NYSE, 50.00, 2013-05-10\\n HSY, NYSE, 4.81, 2013-04-26\\n HUBB, NYSE, -0.90, 2013-04-19\\n HUM, NYSE, 51.12, 2013-05-02\\n HXL, NYSE, 4.88, 2013-04-23\\n IBM, NYSE, -1.96, 2013-04-19\\n IDA, NYSE, 17.54, 2013-05-03\\n IEX, NYSE, 4.23, 2013-04-23\\n IFF, NYSE, 5.31, 2013-05-08\\n DIN, NYSE, 12.87, 2013-05-03\\n INT, NYSE, 14.06, 2013-05-01\\n IP, NYSE, -12.16, 2013-05-03\\n IPG, NYSE, -7.69, 2013-04-20\\n IO, NYSE, -85.71, 2013-05-01\\n IR, NYSE, 2.44, 2013-04-24\\n IRF, NYSE, 27.50, 2013-04-30\\n ITW, NYSE, 0.00, 2013-04-24\\n JEC, NYSE, -2.44, 2013-04-30\\n JNJ, NYSE, 2.13, 2013-04-17\\n JNY, NYSE, 0.00, 2013-05-02\\n K, NYSE, 0.00, 2013-05-03\\n KAMN, NYSE, -2.94, 2013-04-30\\n KDN, NYSE, 5.71, 2013-05-10\\n KEX, NYSE, 2.15, 2013-04-25\\n KEY, NYSE, 5.00, 2013-04-19\\n KIM, NYSE, 3.13, 2013-05-01\\n KMB, NYSE, 10.45, 2013-04-20\\n KEM, NYSE, -133.33, 2013-05-10\\n KMT, NYSE, -8.45, 2013-04-26\\n KO, NYSE, 2.22, 2013-04-17\\n KSU, NYSE, 2.30, 2013-04-20\\n LDR, NYSE, -9.52, 2013-05-07\\n LEG, NYSE, -13.16, 2013-04-26\\n LLY, NYSE, 8.57, 2013-04-25\\n LM, NYSE, -13.33, 2013-05-01\\n LNC, NYSE, -7.27, 2013-05-02\\n LPX, NYSE, 0.00, 2013-05-08\\n LXU, NYSE, -110.53, 2013-05-07\\n LTC, NYSE, -1.67, 2013-05-01\\n L, NYSE, 1.19, 2013-04-30\\n LUV, NYSE, 133.33, 2013-04-26\\n LUX, NYSE, 7.14, 2013-05-02\\n MKL, NYSE, 40.11, 2013-05-01\\n MAN, NYSE, 40.00, 2013-04-20\\n MTW, NYSE, -35.71, 2013-05-01\\n SM, NYSE, 46.43, 2013-05-01\\n MAS, NYSE, -7.14, 2013-04-30\\n MTZ, NYSE, 12.50, 2013-05-03\\n MCD, NYSE, -0.79, 2013-04-20\\n MDC, NYSE, 73.08, 2013-05-03\\n MDP, NYSE, 4.35, 2013-04-26\\n MDR, NYSE, -40.00, 2013-05-09\\n MDU, NYSE, 36.36, 2013-05-01\\n MED, NYSE, 26.47, 2013-05-09\\n CVS, NYSE, 5.06, 2013-05-02\\n MFC, NYSE, 18.52, 2013-05-03\\n MGA, NYSE, 13.57, 2013-05-11\\n MGM, NYSE, 130.00, 2013-05-03\\n MMC, NYSE, 4.29, 2013-05-03\\n MMM, NYSE, -2.42, 2013-04-26\\n MSA, NYSE, -20.31, 2013-04-25\\n MNR, NYSE, -7.69, 2013-05-09\\n MO, NYSE, 1.89, 2013-04-26\\n MOD, NYSE, 5.88, 2013-05-31\\nMOG.A, NYSE, -1.23, 2013-04-27\\n MHK, NYSE, 3.57, 2013-05-03\\n MSI, NYSE, -1.79, 2013-04-25\\n MCY, NYSE, 46.81, 2013-04-30\\n MRK, NYSE, 8.97, 2013-05-02\\n MRO, NYSE, -28.17, 2013-05-08\\n POWR, NYSE, 0.00, 2013-05-09\\n MTG, NYSE, -60.00, 2013-05-01\\n MTB, NYSE, 6.19, 2013-04-16\\n MTX, NYSE, 0.00, 2013-04-26\\n MUR, NYSE, 11.34, 2013-05-02\\n MYE, NYSE, -11.11, 2013-04-25\\n NBL, NYSE, 21.31, 2013-04-26\\n NBR, NYSE, 13.79, 2013-04-24\\n NE, NYSE, 3.51, 2013-04-18\\n NEM, NYSE, -8.97, 2013-04-30\\n NFG, NYSE, 7.37, 2013-05-03\\n NHI, NYSE, 4.94, 2013-05-07\\n NI, NYSE, -1.43, 2013-05-01\\n NJR, NYSE, 3.16, 2013-05-03\\n THC, NYSE, 17.86, 2013-05-01\\n NNN, NYSE, 4.35, 2013-05-03\\n NOC, NYSE, 12.14, 2013-04-25\\n NR, NYSE, 5.88, 2013-04-26\\n NSC, NYSE, 3.39, 2013-04-24\\n NUE, NYSE, 4.00, 2013-04-19\\n NVR, NYSE, -9.64, 2013-04-23\\n NWL, NYSE, 9.38, 2013-05-04\\n NWN, NYSE, -5.41, 2013-05-03\\n NYT, NYSE, -20.00, 2013-04-26\\n OCR, NYSE, 4.65, 2013-04-25\\n OGE, NYSE, -32.35, 2013-05-03\\n OHI, NYSE, 5.08, 2013-05-08\\n OI, NYSE, 7.14, 2013-04-24\\n OII, NYSE, 16.95, 2013-04-24\\n OKE, NYSE, -6.90, 2013-05-01\\n OLN, NYSE, 10.64, 2013-04-26\\n BRS, NYSE, -1.94, 2013-05-23\\n OMC, NYSE, 1.33, 2013-04-19\\n OMI, NYSE, 4.76, 2013-04-24\\n ORB, NYSE, 43.48, 2013-04-24\\n ORI, NYSE, 600.00, 2013-04-26\\n OSK, NYSE, 12.94, 2013-05-01\\n OXY, NYSE, 7.64, 2013-04-26\\n FCFS, NYSE, 0.00, 2013-04-18\\n PBI, NYSE, 0.00, 2013-05-01\\n PCG, NYSE, -10.00, 2013-05-03\\n PCL, NYSE, 9.38, 2013-04-30\\n PCP, NYSE, 1.81, 2013-05-10\\n TPC, NYSE, 34.78, 2013-05-02\\n PDS, NYSE, 14.29, 2013-04-26\\n PEG, NYSE, 14.86, 2013-05-01\\n PEI, NYSE, 4.76, 2013-04-23\\n PEP, NYSE, 8.45, 2013-04-19\\n PFE, NYSE, -1.82, 2013-05-01\\n PG, NYSE, 3.13, 2013-04-25\\n PGR, NYSE, -4.55, 2013-04-11\\n PH, NYSE, 0.60, 2013-04-26\\n PHM, NYSE, 31.25, 2013-04-26\\n PKD, NYSE, 200.00, 2013-05-02\\n PKY, NYSE, 15.38, 2013-05-07\\n PNC, NYSE, 12.10, 2013-04-18\\n PNM, NYSE, -10.00, 2013-05-07\\n PNR, NYSE, 3.57, 2013-04-24\\n PNW, NYSE, 175.00, 2013-05-04\\n POM, NYSE, -4.00, 2013-05-04\\n POT, NYSE, 3.28, 2013-04-26\\n PPG, NYSE, 1.28, 2013-04-19\\n PPL, NYSE, 0.00, 2013-05-03\\n PRGO, NYSE, -1.39, 2013-05-08\\n PL, NYSE, -4.30, 2013-05-07\\n PSB, NYSE, 0.00, 2013-05-07\\n WTR, NYSE, 7.41, 2013-05-02\\n CSH, NYSE, 8.21, 2013-04-26\\n PWR, NYSE, 24.14, 2013-05-03\\n PX, NYSE, 0.00, 2013-04-25\\n KWR, NYSE, 14.29, 2013-04-30\\n R, NYSE, 1.28, 2013-04-24\\n RBC, NYSE, -6.09, 2013-05-01\\n RDC, NYSE, 5.77, 2013-05-02\\n HTSI, NYSE, 11.67, 2013-05-03\\n RES, NYSE, -33.33, 2013-04-25\\n RGS, NYSE, -90.77, 2013-05-08\\n RGR, NYSE, 15.38, 2013-04-30\\n RHI, NYSE, -2.44, 2013-04-24\\n RJF, NYSE, -9.33, 2013-04-25\\n RLI, NYSE, -1.89, 2013-04-18\\n ROG, NYSE, 0.00, 2013-05-01\\n ROK, NYSE, 2.31, 2013-04-25\\n ROL, NYSE, -5.88, 2013-04-25\\n ROP, NYSE, 4.10, 2013-04-30\\n RTI, NYSE, 20.00, 2013-05-01\\n RTN, NYSE, 21.88, 2013-04-26\\n RYL, NYSE, 43.33, 2013-04-25\\n BSAC, NYSE, -21.74, 2013-04-26\\n T, NYSE, 0.00, 2013-04-24\\n SCG, NYSE, 7.77, 2013-04-26\\n SCHW, NYSE, -6.25, 2013-04-16\\n SCL, NYSE, -4.08, 2013-05-01\\n SMG, NYSE, -19.60, 2013-05-07\\n SEE, NYSE, -5.56, 2013-05-02\\n SF, NYSE, 1.75, 2013-05-10\\n SFE, NYSE, -46.15, 2013-04-26\\n SHW, NYSE, 2.78, 2013-04-19\\n SJI, NYSE, -8.43, 2013-05-04\\n JOE, NYSE, -200.00, 2013-05-09\\n SJW, NYSE, -12.50, 2013-04-25\\n SLB, NYSE, 2.02, 2013-04-20\\n HSH, NYSE, 9.38, 2013-05-03\\n AOS, NYSE, 24.68, 2013-04-24\\n SMP, NYSE, 31.25, 2013-05-04\\n SNA, NYSE, 4.48, 2013-04-19\\n PII, NYSE, 5.94, 2013-04-24\\n SNV, NYSE, 0.00, 2013-04-24\\n SO, NYSE, -3.92, 2013-04-25\\n SON, NYSE, -5.66, 2013-04-19\\n SPA, NYSE, -46.15, 2013-05-08\\n TRV, NYSE, 14.93, 2013-04-24\\n SR, NYSE, -3.36, 2013-05-01\\n NVE, NYSE, 12.50, 2013-05-04\\n SCI, NYSE, 21.74, 2013-04-25\\n SSP, NYSE, 58.33, 2013-05-07\\n STT, NYSE, 3.23, 2013-04-20\\n STI, NYSE, 3.28, 2013-04-20\\n STJ, NYSE, 0.00, 2013-04-18\\n STL, NYSE, 7.14, 2013-04-23\\n STR, NYSE, -2.38, 2013-05-01\\n STE, NYSE, 6.06, 2013-05-08\\n SYK, NYSE, 1.98, 2013-04-25\\n SUN, NYSE, -7.32, 2013-05-09\\n SUP, NYSE, 5.88, 2013-05-04\\n SWK, NYSE, 7.29, 2013-04-26\\n SWN, NYSE, 7.69, 2013-05-03\\n SWX, NYSE, 0.61, 2013-05-04\\n SWY, NYSE, -2.78, 2013-04-26\\n SYY, NYSE, 16.67, 2013-05-07\\n TAC, NYSE, -33.33, 2013-04-24\\n TNC, NYSE, -17.14, 2013-04-23\\n TCB, NYSE, -15.79, 2013-04-20\\n TCO, NYSE, 7.14, 2013-04-26\\n TDS, NYSE, 350.00, 2013-05-04\\n TDW, NYSE, 55.74, 2013-05-22\\n TDY, NYSE, 10.31, 2013-04-25\\n TE, NYSE, 11.76, 2013-05-01\\n TER, NYSE, 200.00, 2013-04-25\\n TEVA, NYSE, 1.82, 2013-05-03\\n TEX, NYSE, -17.86, 2013-04-25\\n TFX, NYSE, 1.98, 2013-05-01\\n TEN, NYSE, 10.77, 2013-04-30\\n TKR, NYSE, 0.00, 2013-04-25\\n TMK, NYSE, 1.46, 2013-04-24\\n TMO, NYSE, 6.20, 2013-04-25\\n TOT, NYSE, -2.38, 2013-04-27\\n TM, NYSE, 80.67, 2013-05-09\\n TR, NYSE, -11.76, 2013-04-25\\n TRN, NYSE, 13.75, 2013-05-01\\n TRP, NYSE, -8.93, 2013-04-27\\n TSO, NYSE, 2.82, 2013-05-02\\n TSS, NYSE, -2.94, 2013-04-24\\n TTI, NYSE, -40.00, 2013-05-09\\n TXT, NYSE, -14.89, 2013-04-18\\n TYL, NYSE, 26.09, 2013-04-25\\n TSN, NYSE, -21.74, 2013-05-07\\n UDR, NYSE, 3.03, 2013-05-01\\n UFI, NYSE, -43.75, 2013-04-25\\n UAM, NYSE, 17.65, 2013-04-30\\n UHS, NYSE, 5.17, 2013-04-25\\n UIL, NYSE, 3.06, 2013-05-03\\n UIS, NYSE, -145.61, 2013-04-24\\n UNH, NYSE, 0.00, 2013-04-19\\n KMPR, NYSE, 35.85, 2013-05-03\\n UNM, NYSE, 2.56, 2013-05-02\\n UNP, NYSE, 3.57, 2013-04-19\\n UNT, NYSE, 6.98, 2013-05-08\\n URS, NYSE, -14.29, 2013-05-08\\n USG, NYSE, -88.89, 2013-04-25\\n MUX, NYSE, -300.00, 2013-05-10\\n USM, NYSE, 214.29, 2013-05-04\\n USPH, NYSE, -3.12, 2013-05-10\\n UTL, NYSE, -9.20, 2013-04-24\\n UTX, NYSE, -1.54, 2013-04-24\\n VMI, NYSE, 15.60, 2013-04-19\\n VAR, NYSE, 2.97, 2013-04-25\\n CBS, NYSE, 7.35, 2013-05-02\\n VLO, NYSE, 16.83, 2013-05-01\\n VMC, NYSE, -24.32, 2013-05-03\\n VLY, NYSE, -11.11, 2013-04-25\\n VNO, NYSE, -38.38, 2013-05-07\\n VSH, NYSE, 63.64, 2013-05-01\\n WTS, NYSE, -14.04, 2013-05-01\\n WBS, NYSE, -2.22, 2013-04-16\\n WEC, NYSE, 7.04, 2013-05-01\\n WFC, NYSE, 5.75, 2013-04-13\\n WG, NYSE, -2400.00, 2013-05-09\\n WGL, NYSE, 19.05, 2013-05-02\\n WHR, NYSE, 1.03, 2013-04-25\\n WMB, NYSE, -8.33, 2013-05-08\\n WNC, NYSE, 0.00, 2013-05-01\\n TEG, NYSE, 10.69, 2013-05-02\\n WR, NYSE, 33.33, 2013-05-09\\n WRE, NYSE, -4.35, 2013-04-26\\n WRI, NYSE, 4.35, 2013-05-01\\n WPP, NYSE, 33.33, 2013-04-30\\n WSO, NYSE, 18.18, 2013-04-19\\n WST, NYSE, 1.16, 2013-05-03\\n WWW, NYSE, 50.00, 2013-04-17\\n WY, NYSE, 18.18, 2013-04-27\\n X, NYSE, -84.21, 2013-05-01\\n XL, NYSE, 38.81, 2013-05-03\\n XOM, NYSE, 4.43, 2013-04-26\\n XRX, NYSE, 12.50, 2013-04-24\\n Y, NYSE, 53.96, 2013-05-07\\n HRG, NYSE, 60.00, 2013-05-10\\n CRY, NYSE, 28.57, 2013-05-01\\n CHK, NYSE, 30.43, 2013-05-02\\n DDR, NYSE, 0.00, 2013-05-01\\n ELS, NYSE, 0.71, 2013-04-23\\n ALG, NYSE, 5.56, 2013-05-02\\n ETH, NYSE, -22.22, 2013-04-24\\n ATR, NYSE, -3.03, 2013-04-26\\n GGP, NYSE, 4.17, 2013-04-30\\n MSL, NYSE, 3.70, 2013-05-01\\n RCL, NYSE, 84.21, 2013-04-26\\n CWEI, NYSE, -61.22, 2013-04-25\\n HR, NYSE, 0.00, 2013-05-02\\n RGA, NYSE, 2.48, 2013-04-26\\n RIG, NYSE, -7.92, 2013-05-09\\n SKT, NYSE, 2.44, 2013-05-01\\n TWI, NYSE, -16.28, 2013-04-25\\n BDN, NYSE, 2.94, 2013-04-25\\n KGC, NYSE, 25.00, 2013-05-08\\n CPT, NYSE, 2.11, 2013-05-03\\n SGY, NYSE, 18.84, 2013-05-07\\n BFS, NYSE, -24.49, 2013-05-01\\n BWA, NYSE, 6.56, 2013-04-26\\n EQR, NYSE, -1.54, 2013-05-01\\n CLP, NYSE, 3.03, 2013-04-26\\n KOF, NYSE, -16.24, 2013-04-25\\n OKS, NYSE, -27.59, 2013-05-01\\n SQM, NYSE, -6.45, 2013-05-29\\n BYD, NYSE, 114.29, 2013-04-25\\n CBL, NYSE, 3.92, 2013-04-30\\n DECK, NYSE, 133.33, 2013-04-26\\n IT, NYSE, -2.50, 2013-05-03\\n HST, NYSE, 21.74, 2013-05-04\\n LXP, NYSE, 0.00, 2013-05-03\\n REG, NYSE, 3.23, 2013-05-08\\n TUC, NYSE, -24.00, 2013-05-03\\n AF, NYSE, 7.69, 2013-04-18\\n BFR, NYSE, -2.56, 2013-05-11\\n HHS, NYSE, 10.00, 2013-04-26\\n MHO, NYSE, 28.57, 2013-04-26\\n NFX, NYSE, -2.17, 2013-04-24\\n SPG, NYSE, 1.99, 2013-04-27\\n SU, NYSE, -1.41, 2013-04-30\\n SUI, NYSE, 2.20, 2013-04-26\\n TV, NYSE, -22.50, 2013-04-26\\n CGI, NYSE, -26.92, 2013-04-26\\n CYT, NYSE, -12.79, 2013-04-19\\n EMN, NYSE, 3.18, 2013-04-26\\n GRT, NYSE, 14.29, 2013-04-25\\n MAA, NYSE, 5.04, 2013-05-02\\n PLT, NYSE, 4.62, 2013-05-08\\n BZH, NYSE, 15.38, 2013-05-03\\n ELX, NYSE, 114.29, 2013-05-03\\n MLM, NYSE, -69.44, 2013-05-01\\n AKS, NYSE, 41.67, 2013-04-24\\n ALB, NYSE, -7.00, 2013-04-18\\n VRX, NYSE, 1.56, 2013-05-03\\n CBR, NYSE, 0.00, 2013-05-01\\n MAC, NYSE, 8.86, 2013-05-02\\n RKT, NYSE, 9.80, 2013-04-24\\n RYN, NYSE, 27.42, 2013-04-26\\n ADC, NYSE, -2.00, 2013-04-30\\nBRK.B, NYSE, 52.31, 2013-05-04\\n EXP, NYSE, 5.00, 2013-05-15\\n GGB, NYSE, -66.67, 2013-05-08\\n SSD, NYSE, -52.38, 2013-04-26\\n ESS, NYSE, -0.53, 2013-05-02\\n FR, NYSE, -7.69, 2013-04-26\\n HIW, NYSE, -2.90, 2013-05-01\\n IMAX, NYSE, 0.00, 2013-04-26\\n AIV, NYSE, 2.13, 2013-05-03\\n FCH, NYSE, 0.00, 2013-05-01\\n ITGR, NYSE, 2.33, 2013-04-26\\n NOK, NYSE, 33.33, 2013-04-19\\n GEO, NYSE, -3.51, 2013-05-09\\n CLI, NYSE, 0.00, 2013-04-26\\n RS, NYSE, -5.22, 2013-04-26\\n CPE, NYSE, 100.00, 2013-05-10\\n KNX, NYSE, 0.00, 2013-04-25\\n O, NYSE, 1.69, 2013-04-26\\n COF, NYSE, 17.79, 2013-04-19\\n IRS, NYSE, 10.34, 2013-05-18\\n MCK, NYSE, -0.43, 2013-05-08\\n SWC, NYSE, 200.00, 2013-04-30\\n STM, NYSE, 23.53, 2013-04-23\\n TEO, NYSE, 1.30, 2013-04-30\\n TRK, NYSE, -400.00, 2013-05-02\\n LMT, NYSE, 23.38, 2013-04-24\\n APU, NYSE, -35.48, 2013-05-16\\n AGU, NYSE, -12.15, 2013-05-10\\n LH, NYSE, -1.69, 2013-04-20\\n DDD, NYSE, -10.00, 2013-05-01\\n AFG, NYSE, 10.84, 2013-05-09\\n RMD, NYSE, 3.51, 2013-04-26\\n WAB, NYSE, 3.60, 2013-04-25\\n CIB, NYSE, 6.78, 2013-05-08\\n CAM, NYSE, -5.41, 2013-04-26\\n FCX, NYSE, 1.39, 2013-04-19\\n RNR, NYSE, 34.25, 2013-05-02\\n AVX, NYSE, 7.14, 2013-04-25\\n RWT, NYSE, 46.81, 2013-05-03\\n AXE, NYSE, -6.62, 2013-04-24\\n CLB, NYSE, 6.09, 2013-04-18\\n MD, NYSE, 0.92, 2013-05-03\\n THG, NYSE, 30.69, 2013-04-30\\n BAP, NYSE, -10.94, 2013-05-07\\n DO, NYSE, 10.43, 2013-04-26\\n RE, NYSE, 36.11, 2013-04-23\\n DST, NYSE, -6.60, 2013-04-26\\n EL, NYSE, 36.36, 2013-05-03\\n ESC, NYSE, -57.14, 2013-05-03\\n LXK, NYSE, -7.55, 2013-04-24\\n MIG, NYSE, 7.69, 2013-05-01\\n WAT, NYSE, -1.83, 2013-04-24\\n EME, NYSE, 2.27, 2013-04-26\\n HIG, NYSE, 10.84, 2013-04-30\\n ITT, NYSE, 9.30, 2013-05-03\\n SPN, NYSE, 0.00, 2013-04-26\\n SWM, NYSE, 8.60, 2013-05-09\\n SCCO, NYSE, -4.84, 2013-04-27\\n RCI, NYSE, -1.27, 2013-04-23\\n EIX, NYSE, 20.31, 2013-05-01\\n IRM, NYSE, 0.00, 2013-05-02\\n SPH, NYSE, -4.82, 2013-05-10\\n CCJ, NYSE, 0.00, 2013-05-02\\n PGI, NYSE, 0.00, 2013-04-19\\n CRR, NYSE, -14.61, 2013-04-26\\n BVN, NYSE, -40.30, 2013-04-30\\n FCN, NYSE, 13.46, 2013-05-10\\n RPT, NYSE, 6.90, 2013-04-24\\n TUP, NYSE, 4.42, 2013-04-25\\n ASB, NYSE, 8.00, 2013-04-19\\n GWR, NYSE, -10.11, 2013-05-02\\n TBI, NYSE, -50.00, 2013-04-25\\n FFG, NYSE, 12.66, 2013-05-03\\n USNA, NYSE, 14.29, 2013-04-24\\n CSV, NYSE, -3.03, 2013-05-08\\n LVB, NYSE, 10.53, 2013-05-09\\n ALR, NYSE, 6.25, 2013-05-10\\n OCN, NYSE, 0.00, 2013-05-03\\n PAA, NYSE, 37.50, 2013-05-07\\n DNR, NYSE, 13.79, 2013-05-03\\n HMY, NYSE, -119.23, 2013-05-04\\n TGI, NYSE, 5.66, 2013-05-02\\n PAG, NYSE, 1.61, 2013-04-30\\n GEL, NYSE, -17.65, 2013-05-03\\n IM, NYSE, 0.00, 2013-04-26\\n NUS, NYSE, 13.92, 2013-05-03\\n CNI, NYSE, -1.67, 2013-04-23\\n LAD, NYSE, 16.67, 2013-04-25\\n NSP, NYSE, 0.00, 2013-04-30\\n DGX, NYSE, -14.42, 2013-04-18\\n KRC, NYSE, 0.00, 2013-05-01\\n MTH, NYSE, 32.00, 2013-04-25\\n NCR, NYSE, 35.00, 2013-05-01\\n OFG, NYSE, 2.78, 2013-04-26\\n IVZ, NYSE, 10.64, 2013-05-01\\n DX, NYSE, 9.68, 2013-05-02\\n FBC, NYSE, -65.98, 2013-04-24\\n ALV, NYSE, 1.57, 2013-04-27\\n ARE, NYSE, 0.00, 2013-04-30\\n BBT, NYSE, 2.99, 2013-04-19\\n CGG, NYSE, 6.25, 2013-05-04\\n BXP, NYSE, -0.83, 2013-05-01\\n CBD, NYSE, -23.73, 2013-05-01\\n MS, NYSE, 7.02, 2013-04-19\\n SRT, NYSE, -314.29, 2013-05-10\\n HLX, NYSE, 38.89, 2013-04-22\\n FLS, NYSE, 3.61, 2013-04-25\\n MT, NYSE, -400.00, 2013-05-11\\n PXD, NYSE, 5.15, 2013-05-02\\n SLG, NYSE, 0.83, 2013-04-24\\n NAT, NYSE, -16.22, 2013-05-14\\n CSU, NYSE, -36.36, 2013-05-07\\n DRQ, NYSE, 22.50, 2013-05-04\\n FDP, NYSE, -24.47, 2013-05-01\\n NLY, NYSE, 30.56, 2013-05-02\\n TLM, NYSE, -250.00, 2013-05-02\\n TSM, NYSE, 13.04, 2013-04-19\\n YUM, NYSE, 12.90, 2013-04-24\\n AMG, NYSE, 12.38, 2013-05-01\\n EPR, NYSE, -1.05, 2013-05-01\\n FE, NYSE, 10.14, 2013-05-08\\n LFL, NYSE, 80.00, 2013-05-15\\n MTD, NYSE, 2.79, 2013-05-03\\n SID, NYSE, -66.67, 2013-05-16\\n IN, NYSE, -271.43, 2013-05-04\\n CBZ, NYSE, 25.64, 2013-05-03\\n URI, NYSE, 11.54, 2013-04-17\\n INGR, NYSE, 6.82, 2013-05-03\\n RAS, NYSE, 181.82, 2013-05-03\\n UNS, NYSE, 35.00, 2013-04-30\\n ASI, NYSE, 18.92, 2013-05-09\\n ANH, NYSE, 15.38, 2013-04-30\\n OFC, NYSE, 17.07, 2013-04-27\\n GPX, NYSE, 0.00, 2013-05-03\\n WAC, NYSE, 1427.27, 2013-05-10\\n RBA, NYSE, -13.33, 2013-05-01\\n WDR, NYSE, 1.61, 2013-04-24\\n LHO, NYSE, 8.00, 2013-04-18\\n LNT, NYSE, 18.03, 2013-05-04\\n LVLT, NYSE, 7.14, 2013-04-26\\n MFA, NYSE, -4.76, 2013-05-02\\n OME, NYSE, 50.00, 2013-05-08\\n EQY, NYSE, 6.90, 2013-05-02\\n FII, NYSE, -2.38, 2013-04-26\\n FMX, NYSE, -37.89, 2013-04-25\\n LLL, NYSE, 3.63, 2013-04-26\\n VTR, NYSE, 4.04, 2013-04-27\\n WCN, NYSE, 20.00, 2013-05-02\\n AVB, NYSE, 0.74, 2013-05-01\\n GIL, NYSE, 5.36, 2013-05-03\\n HZO, NYSE, -92.86, 2013-04-26\\n AWR, NYSE, 38.00, 2013-05-11\\n CLS, NYSE, 10.00, 2013-04-24\\n EPD, NYSE, 16.67, 2013-05-01\\n RSG, NYSE, 15.00, 2013-04-26\\n WM, NYSE, -2.44, 2013-04-25\\n AKR, NYSE, 3.33, 2013-04-24\\n CVG, NYSE, 17.39, 2013-05-01\\n RRC, NYSE, -38.89, 2013-04-26\\n SAP, NYSE, 41.51, 2013-04-20\\n CCI, NYSE, 0.00, 2013-04-25\\n PQ, NYSE, 100.00, 2013-05-08\\n WFT, NYSE, 0.00, 2013-05-03\\n CAA, NYSE, 0.00, 2013-05-03\\n ENB, NYSE, 13.21, 2013-05-09\\n GMK, NYSE, 60.00, 2013-04-25\\n MMR, NYSE, 0.00, 2013-05-07\\n PB, NYSE, 2.38, 2013-04-25\\n VIV, NYSE, -20.00, 2013-05-08\\n AXL, NYSE, 53.33, 2013-05-04\\n BP, NYSE, 33.33, 2013-05-01\\n ETM, NYSE, 0.00, 2013-05-09\\n HT, NYSE, 0.00, 2013-05-01\\n BYI, NYSE, 10.71, 2013-04-25\\n CEB, NYSE, 1.64, 2013-05-02\\n INFY, NYSE, 5.41, 2013-04-13\\n JLL, NYSE, 56.52, 2013-05-01\\n AZN, NYSE, 5.22, 2013-04-26\\n SFG, NYSE, 33.75, 2013-04-24\\n TREX, NYSE, 14.68, 2013-05-04\\n GS, NYSE, 11.43, 2013-04-17\\n SYX, NYSE, -157.14, 2013-05-01\\n WCC, NYSE, -4.27, 2013-04-19\\n JNPR, NYSE, 33.33, 2013-04-24\\n RDN, NYSE, 28.57, 2013-05-02\\n RAI, NYSE, 4.35, 2013-04-24\\n SKX, NYSE, -27.78, 2013-05-16\\n WTM, NYSE, 178.02, 2013-04-30\\n NCI, NYSE, 12.50, 2013-04-26\\n BLT, NYSE, -17.39, 2013-05-08\\n QTM, NYSE, -33.33, 2013-05-09\\n BLK, NYSE, 1.67, 2013-04-17\\n CIR, NYSE, 4.00, 2013-05-03\\n MSO, NYSE, 12.50, 2013-05-01\\n PKG, NYSE, 10.71, 2013-04-23\\n PKI, NYSE, -25.00, 2013-04-26\\n WWE, NYSE, -37.50, 2013-05-03\\n SNN, NYSE, -2.11, 2013-05-03\\n UPS, NYSE, 2.97, 2013-04-26\\n XOXO, NYSE, 16.67, 2013-05-10\\n SLF, NYSE, 7.25, 2013-05-09\\n CDR, NYSE, 9.09, 2013-05-10\\n EW, NYSE, -5.26, 2013-04-24\\n MET, NYSE, 13.85, 2013-05-01\\n FBR, NYSE, -89.47, 2013-04-24\\n VVC, NYSE, -7.58, 2013-05-02\\n BAM, NYSE, 70.00, 2013-05-10\\n NVS, NYSE, 4.00, 2013-04-25\\n BHLB, NYSE, -1.82, 2013-04-30\\n CRL, NYSE, -2.82, 2013-05-02\\n CYH, NYSE, 3.57, 2013-04-30\\n MBT, NYSE, -13.04, 2013-06-08\\n MTOR, NYSE, 500.00, 2013-05-01\\n CNQ, NYSE, -44.19, 2013-05-03\\n ERJ, NYSE, -62.79, 2013-04-30\\n VZ, NYSE, 3.03, 2013-04-19\\n EVC, NYSE, 0.00, 2013-05-03\\n PBR, NYSE, 0.00, 2013-04-27\\n XEL, NYSE, 11.63, 2013-05-03\\n ALE, NYSE, 10.67, 2013-05-09\\n HW, NYSE, -30.00, 2013-05-01\\n POL, NYSE, 14.81, 2013-05-02\\n COH, NYSE, 3.70, 2013-04-24\\n CXW, NYSE, 6.38, 2013-05-09\\n DVA, NYSE, 3.37, 2013-05-08\\n EXC, NYSE, 4.41, 2013-05-02\\n MCO, NYSE, 11.49, 2013-05-04\\n BRFS, NYSE, 23.53, 2013-04-30\\n TU, NYSE, 3.77, 2013-05-10\\n WIT, NYSE, 0.00, 2013-04-20\\n ERF, NYSE, 100.00, 2013-05-11\\n GG, NYSE, -35.00, 2013-05-03\\n HNT, NYSE, 34.15, 2013-04-30\\n NYCB, NYSE, 3.85, 2013-04-25\\n SXT, NYSE, 3.33, 2013-04-19\\n CPG, NYSE, -20.00, 2013-05-10\\n AMX, NYSE, 16.67, 2013-04-20\\n MPX, NYSE, 0.00, 2013-04-25\\n OIS, NYSE, -2.70, 2013-04-25\\n MMP, NYSE, 4.08, 2013-05-03\\n PES, NYSE, 33.33, 2013-05-01\\n ABB, NYSE, -12.12, 2013-04-25\\n KMR, NYSE, -3.28, 2013-05-02\\n GEN, NYSE, -41.18, 2013-05-07\\n ADS, NYSE, -2.88, 2013-04-19\\n CVI, NYSE, 25.00, 2013-05-03\\n FTI, NYSE, -6.52, 2013-04-24\\n PRA, NYSE, 27.63, 2013-05-07\\n STO, NYSE, -16.46, 2013-05-03\\n BEL, NYSE, 41.67, 2013-05-02\\n FIS, NYSE, 1.64, 2013-05-01\\n COL, NYSE, 0.86, 2013-04-20\\n KAI, NYSE, 20.51, 2013-04-30\\n ABC, NYSE, -2.25, 2013-04-26\\n BG, NYSE, 18.56, 2013-04-26\\n FRO, NYSE, 27.08, 2013-05-31\\n ECA, NYSE, 150.00, 2013-04-24\\n CIG, NYSE, 108.33, 2013-05-17\\n EEP, NYSE, 16.67, 2013-05-01\\n CVX, NYSE, 3.25, 2013-04-27\\n GXP, NYSE, 41.67, 2013-05-10\\n JHX, NYSE, -2.78, 2013-05-24\\n PFG, NYSE, 5.33, 2013-04-26\\n PVR, NYSE, 14.29, 2013-04-26\\n AAP, NYSE, 2.48, 2013-05-24\\n KND, NYSE, 36.11, 2013-05-02\\n WTW, NYSE, 38.10, 2013-05-03\\n CNC, NYSE, 5.00, 2013-04-24\\n BCH, NYSE, 3.70, 2013-05-09\\n NS, NYSE, -86.67, 2013-04-25\\n ITUB, NYSE, -4.88, 2013-04-26\\n SXL, NYSE, 26.74, 2013-05-09\\n VALE, NYSE, 50.00, 2013-04-25\\n TNP, NYSE, 150.00, 2013-05-25\\n LCI, NYSE, 40.00, 2013-05-09\\n GTI, NYSE, 50.00, 2013-04-26\\n HNR, NYSE, -26.67, 2013-06-06\\n MWE, NYSE, -90.00, 2013-05-09\\n NLS, NYSE, 50.00, 2013-05-07\\n RGC, NYSE, -7.14, 2013-05-01\\n JAH, NYSE, 30.43, 2013-04-25\\n NPO, NYSE, -23.29, 2013-05-03\\n TRI, NYSE, 22.58, 2013-05-01\\n CAE, NYSE, 10.53, 2013-05-17\\n LF, NYSE, 28.57, 2013-05-02\\n SNY, NYSE, -10.11, 2013-05-03\\n BANC, NYSE, 400.00, 2013-05-09\\n COP, NYSE, 0.00, 2013-04-26\\n CNP, NYSE, -8.11, 2013-05-03\\n EEQ, NYSE, -321.43, 2013-05-02\\n MRH, NYSE, 32.58, 2013-04-25\\n NGS, NYSE, 23.08, 2013-05-10\\n NRP, NYSE, 4.88, 2013-05-07\\n PXP, NYSE, 17.98, 2013-05-03\\n XEC, NYSE, -0.93, 2013-05-08\\n IAG, NYSE, 7.14, 2013-05-08\\n EGO, NYSE, 0.00, 2013-05-03\\n JNS, NYSE, -6.25, 2013-04-24\\n PFS, NYSE, 14.81, 2013-04-27\\n ENH, NYSE, 74.79, 2013-05-02\\n CNX, NYSE, -5.00, 2013-04-26\\n AMT, NYSE, -10.42, 2013-05-02\\n ABG, NYSE, 13.43, 2013-04-25\\n LII, NYSE, 22.22, 2013-04-23\\n SRE, NYSE, -4.90, 2013-05-03\\n AEE, NYSE, -21.43, 2013-05-03\\n PLD, NYSE, 0.00, 2013-04-25\\n SAH, NYSE, -2.38, 2013-04-24\\n GPI, NYSE, 11.54, 2013-05-03\\n FIX, NYSE, 800.00, 2013-05-02\\n MMS, NYSE, 1.41, 2013-05-10\\n SRI, NYSE, 50.00, 2013-05-10\\n RTEC, NYSE, 50.00, 2013-05-03\\n NOV, NYSE, -5.84, 2013-04-27\\n DF, NYSE, 11.54, 2013-05-10\\n SAM, NYSE, -17.74, 2013-05-02\\n RL, NYSE, 8.46, 2013-05-24\\n FLR, NYSE, 6.25, 2013-05-03\\n ALL, NYSE, 2.27, 2013-05-02\\n ATI, NYSE, 0.00, 2013-04-25\\n EE, NYSE, 72.73, 2013-05-02\\n AIT, NYSE, 0.00, 2013-05-03\\n CHH, NYSE, -3.70, 2013-04-30\\n FMS, NYSE, -17.78, 2013-05-01\\n BCO, NYSE, 16.67, 2013-04-26\\n CBB, NYSE, 133.33, 2013-05-10\\n MWW, NYSE, 14.29, 2013-05-03\\n PSA, NYSE, -3.09, 2013-05-10\\n E, NYSE, 0.00, 2013-04-25\\n JPM, NYSE, 15.22, 2013-04-13\\n USB, NYSE, 0.00, 2013-04-17\\n HON, NYSE, 6.14, 2013-04-20\\n ITG, NYSE, 50.00, 2013-05-03\\n ARB, NYSE, -15.49, 2013-05-08\\n APL, NYSE, -28.95, 2013-04-30\\n AVA, NYSE, 0.00, 2013-05-02\\n AXS, NYSE, 85.71, 2013-04-26\\n MOH, NYSE, 146.15, 2013-04-26\\n CVD, NYSE, 4.17, 2013-05-02\\n AHT, NYSE, 2.94, 2013-05-09\\n GPK, NYSE, 25.00, 2013-04-26\\n CNO, NYSE, 0.00, 2013-04-25\\n AUQ, NYSE, -60.00, 2013-05-10\\n NFP, NYSE, -5.45, 2013-05-04\\n CRI, NYSE, 12.86, 2013-05-10\\n FMD, NYSE, 27.27, 2013-04-30\\n FPO, NYSE, 3.45, 2013-04-26\\n TRQ, NYSE, -25.00, 2013-05-14\\n WLL, NYSE, 2.17, 2013-04-25\\n AEL, NYSE, 11.36, 2013-05-02\\n AHL, NYSE, 0.95, 2013-04-25\\n AUY, NYSE, -23.81, 2013-05-01\\n CMP, NYSE, 24.32, 2013-04-30\\n KRO, NYSE, -800.00, 2013-05-09\\n TPX, NYSE, 3.33, 2013-05-03\\n UTI, NYSE, -300.00, 2013-05-01\\n PJC, NYSE, 9.09, 2013-04-18\\n TRW, NYSE, 3.42, 2013-05-01\\n AIZ, NYSE, -14.56, 2013-04-25\\n HTH, NYSE, 11.43, 2013-05-07\\n ETP, NYSE, 33.33, 2013-05-09\\n LSE, NYSE, 0.00, 2013-05-09\\n BBD, NYSE, 0.00, 2013-04-23\\n NRG, NYSE, -37.04, 2013-05-08\\n HOS, NYSE, 96.67, 2013-05-02\\n ABR, NYSE, 84.62, 2013-05-04\\n FHN, NYSE, 0.00, 2013-04-20\\n AGO, NYSE, 86.11, 2013-05-10\\n HSP, NYSE, 18.18, 2013-05-02\\n HNI, NYSE, 250.00, 2013-04-18\\n GHL, NYSE, -34.78, 2013-04-18\\n XPO, NYSE, -16.44, 2013-05-08\\n CVO, NYSE, -200.00, 2013-05-09\\n CHE, NYSE, 9.92, 2013-04-19\\n GNW, NYSE, 11.11, 2013-05-01\\n CBG, NYSE, -5.88, 2013-04-26\\n SFL, NYSE, -43.33, 2013-05-31\\n NEU, NYSE, 3.28, 2013-04-25\\n GOL, NYSE, -1200.00, 2013-05-14\\n CAB, NYSE, 18.64, 2013-04-26\\n LTM, NYSE, 3.08, 2013-04-26\\n VVI, NYSE, 68.00, 2013-04-27\\n WCG, NYSE, -8.70, 2013-05-04\\n HEP, NYSE, -36.36, 2013-05-01\\n DPZ, NYSE, 5.36, 2013-05-01\\n BDC, NYSE, 6.33, 2013-05-03\\n ENS, NYSE, 2.56, 2013-05-29\\n BMR, NYSE, 7.89, 2013-05-02\\n ACC, NYSE, -1.54, 2013-04-24\\n KRG, NYSE, 27.27, 2013-05-03\\n WLK, NYSE, 42.64, 2013-05-07\\n EXR, NYSE, 4.55, 2013-04-30\\n CNS, NYSE, 7.32, 2013-04-18\\n IOC, NYSE, 161.54, 2013-05-14\\n STON, NYSE, -150.00, 2013-05-08\\n TTM, NYSE, 60.56, 2013-05-30\\n CPL, NYSE, 7.69, 2013-05-11\\n TPGI, NYSE, -460.00, 2013-05-07\\n SHO, NYSE, 0.00, 2013-05-07\\n CUBE, NYSE, 0.00, 2013-05-03\\n NRF, NYSE, -51.35, 2013-05-04\\n DLR, NYSE, -1.69, 2013-04-27\\n MTL, NYSE, 100.00, 2013-06-19\\n NWE, NYSE, 8.60, 2013-04-26\\n ORA, NYSE, 550.00, 2013-05-08\\n NP, NYSE, 7.25, 2013-05-09\\n SMA, NYSE, -73.33, 2013-05-03\\n BBG, NYSE, -2600.00, 2013-05-03\\n BXC, NYSE, 35.29, 2013-05-02\\n KNL, NYSE, 8.33, 2013-04-19\\n LVS, NYSE, 7.58, 2013-05-02\\n HLF, NYSE, 18.69, 2013-04-30\\n MIC, NYSE, -89.09, 2013-04-30\\n PHH, NYSE, -81.13, 2013-05-02\\n CE, NYSE, 44.30, 2013-04-19\\n EDR, NYSE, 0.00, 2013-04-30\\n WTI, NYSE, 34.62, 2013-05-08\\n ARC, NYSE, 0.00, 2013-05-08\\n PBH, NYSE, 5.88, 2013-05-17\\n HUN, NYSE, 18.75, 2013-05-01\\n WEX, NYSE, 3.16, 2013-05-02\\n DLB, NYSE, 14.29, 2013-04-26\\n DSX, NYSE, 66.67, 2013-05-23\\n LAZ, NYSE, -17.65, 2013-04-27\\n TGP, NYSE, 14.29, 2013-05-10\\n TLP, NYSE, 7.69, 2013-05-08\\n DRH, NYSE, 55.56, 2013-05-11\\n HTGC, NYSE, 8.00, 2013-05-03\\n KFN, NYSE, 27.78, 2013-05-02\\n THS, NYSE, 5.71, 2013-05-10\\n NSR, NYSE, -8.86, 2013-05-03\\n WAL, NYSE, 14.29, 2013-04-19\\n SLW, NYSE, -9.76, 2013-05-11\\n MPW, NYSE, -3.85, 2013-04-27\\n GNK, NYSE, -2.75, 2013-05-02\\n MFB, NYSE, 28.57, 2013-05-09\\nRDS.A, NYSE, 21.74, 2013-05-03\\n ITC, NYSE, -3.45, 2013-04-24\\n FTK, NYSE, -11.76, 2013-05-10\\n PIKE, NYSE, -20.00, 2013-05-07\\n ALJ, NYSE, 63.27, 2013-05-09\\n DRC, NYSE, 2.38, 2013-04-26\\n STN, NYSE, 0.00, 2013-05-10\\n SSW, NYSE, -8.70, 2013-04-30\\n CF, NYSE, 0.50, 2013-05-09\\n HPY, NYSE, 12.50, 2013-05-01\\n ROC, NYSE, 1.49, 2013-05-01\\n WPZ, NYSE, -57.58, 2013-05-01\\n LCC, NYSE, 29.17, 2013-04-24\\n GLP, NYSE, -7.27, 2013-05-10\\n AMP, NYSE, 1.27, 2013-04-23\\n DHT, NYSE, 58.33, 2013-04-30\\n FNF, NYSE, 5.00, 2013-05-02\\n NM, NYSE, 52.38, 2013-05-22\\n CCO, NYSE, -57.14, 2013-05-03\\n BWP, NYSE, 5.00, 2013-04-30\\n ICE, NYSE, 2.53, 2013-05-02\\n BKD, NYSE, 50.00, 2013-05-02\\n BAS, NYSE, 12.00, 2013-04-25\\n CPA, NYSE, 21.21, 2013-05-14\\n LYV, NYSE, 8.33, 2013-05-08\\n WNR, NYSE, -6.93, 2013-05-03\\n CMG, NYSE, 9.81, 2013-04-19\\n RGP, NYSE, -50.00, 2013-05-09\\n KOP, NYSE, -16.92, 2013-05-04\\n TX, NYSE, 40.43, 2013-05-01\\n UAL, NYSE, 10.09, 2013-04-26\\n ETE, NYSE, -27.03, 2013-05-09\\n RSO, NYSE, -45.00, 2013-05-08\\n XCO, NYSE, 62.50, 2013-05-01\\n PAC, NYSE, 30.00, 2013-04-26\\n NYX, NYSE, 1.79, 2013-05-01\\n TDG, NYSE, 0.61, 2013-05-08\\n BMA, NYSE, 11.68, 2013-05-09\\n THI, NYSE, 1.67, 2013-05-09\\n BTE, NYSE, -112.00, 2013-05-10\\n CNH, NYSE, 41.49, 2013-05-01\\n GLA, NYSE, -82.35, 2013-05-02\\n POR, NYSE, 0.00, 2013-05-02\\n HIL, NYSE, 50.00, 2013-05-03\\n HVB, NYSE, 12.50, 2013-04-24\\n KS, NYSE, -9.30, 2013-05-08\\n HK, NYSE, -28.57, 2013-05-03\\n DCP, NYSE, 3.28, 2013-05-07\\n DK, NYSE, 7.56, 2013-05-09\\n CODI, NYSE, 0.00, 2013-05-08\\n MA, NYSE, 0.65, 2013-05-02\\n MWA, NYSE, 150.00, 2013-05-01\\n KOG, NYSE, -21.43, 2013-05-03\\n PWE, NYSE, -150.00, 2013-05-03\\n PGTI, NYSE, 100.00, 2013-05-02\\n AWH, NYSE, 8.45, 2013-04-25\\n NSH, NYSE, -29.73, 2013-04-25\\n WYN, NYSE, 7.58, 2013-04-25\\n WNS, NYSE, 15.38, 2013-04-18\\n PGH, NYSE, 0.00, 2013-05-02\\n AYR, NYSE, 34.48, 2013-05-03\\n EVR, NYSE, -24.49, 2013-04-25\\n HBI, NYSE, 2.00, 2013-04-24\\n WU, NYSE, 12.12, 2013-05-01\\n OC, NYSE, 45.00, 2013-04-25\\n DAC, NYSE, 44.44, 2013-04-30\\n AWI, NYSE, -43.59, 2013-04-30\\n SUSS, NYSE, 0.00, 2013-05-09\\n DEI, NYSE, 5.71, 2013-05-08\\n OB, NYSE, 79.31, 2013-04-30\\n SBH, NYSE, -7.69, 2013-05-03\\n EBS, NYSE, -144.44, 2013-05-03\\n KBR, NYSE, 25.53, 2013-04-26\\n AER, NYSE, 23.40, 2013-05-08\\n NOA, NYSE, -442.86, 2013-06-11\\n SPR, NYSE, 29.79, 2013-05-03\\n ANW, NYSE, -7.14, 2013-05-16\\n DCT, NYSE, 10.00, 2013-05-03\\n SE, NYSE, 6.25, 2013-05-04\\n TOO, NYSE, -17.86, 2013-05-10\\n TSL, NYSE, -27.78, 2013-05-30\\n TWC, NYSE, 2.92, 2013-04-26\\n MVO, NYSE, -13.92, 2013-05-09\\n CO, NYSE, 150.00, 2013-06-19\\n EXK, NYSE, -18.75, 2013-05-07\\n EIG, NYSE, 22.22, 2013-05-09\\n HF, NYSE, -50.00, 2013-05-02\\n FIG, NYSE, 33.33, 2013-05-03\\n NGLS, NYSE, -20.00, 2013-05-04\\n TCAP, NYSE, -1.75, 2013-05-09\\n GFA, NYSE, -211.11, 2013-05-14\\n BR, NYSE, 18.18, 2013-05-08\\n SCR, NYSE, 12.50, 2013-05-10\\n CNK, NYSE, 12.00, 2013-05-08\\n DAL, NYSE, 42.86, 2013-04-24\\n ORN, NYSE, 42.86, 2013-05-03\\n ACM, NYSE, 3.92, 2013-05-08\\n SLH, NYSE, 5.00, 2013-05-08\\n CLR, NYSE, 2.63, 2013-05-09\\n BGS, NYSE, -5.13, 2013-04-19\\n STAR, NYSE, 26.42, 2013-05-01\\n YGE, NYSE, -40.00, 2013-05-31\\n DFS, NYSE, 18.75, 2013-04-24\\n TEL, NYSE, 7.04, 2013-04-25\\n BX, NYSE, 1.85, 2013-04-19\\n SEP, NYSE, 4.65, 2013-05-04\\n BZ, NYSE, -77.78, 2013-05-03\\n PPO, NYSE, -41.18, 2013-05-09\\n PRO, NYSE, 100.00, 2013-05-03\\n WBC, NYSE, 7.34, 2013-04-26\\n DHX, NYSE, 0.00, 2013-04-24\\n PMC, NYSE, 23.53, 2013-05-02\\n HGG, NYSE, 3.33, 2013-05-21\\n OWW, NYSE, -33.33, 2013-05-10\\n VR, NYSE, 35.97, 2013-04-26\\n CXO, NYSE, -27.50, 2013-05-02\\n G, NYSE, 5.00, 2013-05-02\\n EJ, NYSE, 89.47, 2013-05-16\\n WX, NYSE, 11.11, 2013-05-14\\n CMLP, NYSE, -92.86, 2013-05-08\\n VMW, NYSE, 10.87, 2013-04-24\\n CZZ, NYSE, -40.00, 2013-06-06\\n CGA, NYSE, 6.67, 2013-05-14\\n TDC, NYSE, -26.92, 2013-05-03\\n FLY, NYSE, 61.73, 2013-05-03\\n MAIN, NYSE, 2.04, 2013-05-10\\n REN, NYSE, 100.00, 2013-05-07\\n TGH, NYSE, -12.90, 2013-05-08\\n DFT, NYSE, -5.00, 2013-05-08\\n RF, NYSE, 15.00, 2013-04-24\\n PZN, NYSE, 0.00, 2013-04-25\\n LL, NYSE, 29.55, 2013-04-25\\n NMM, NYSE, 0.00, 2013-04-26\\n OZM, NYSE, 81.25, 2013-05-03\\n ES, NYSE, 12.31, 2013-05-02\\n MSCI, NYSE, 5.56, 2013-05-02\\n ARR, NYSE, -21.74, 2013-05-03\\n KW, NYSE, 62.50, 2013-05-08\\n GTS, NYSE, 52.78, 2013-05-02\\n FOR, NYSE, 450.00, 2013-05-09\\n LRN, NYSE, 34.78, 2013-05-04\\n TNK, NYSE, -100.00, 2013-05-10\\n N, NYSE, -21.43, 2013-04-26\\n DAN, NYSE, -33.33, 2013-04-26\\n BIP, NYSE, 0.00, 2013-05-03\\n CPN, NYSE, -6.67, 2013-05-03\\n SOL, NYSE, -15.38, 2013-05-17\\n PM, NYSE, -4.44, 2013-04-19\\n V, NYSE, 6.08, 2013-05-02\\n IPI, NYSE, 5.26, 2013-05-02\\n AWK, NYSE, -5.88, 2013-05-08\\n HTS, NYSE, -7.46, 2013-04-23\\n DPS, NYSE, 12.77, 2013-04-25\\n CFX, NYSE, 8.33, 2013-04-26\\n WES, NYSE, -22.50, 2013-05-02\\n SB, NYSE, 0.00, 2013-05-16\\n LO, NYSE, 4.76, 2013-04-25\\n LPS, NYSE, 0.00, 2013-04-25\\n FF, NYSE, -6.90, 2013-05-08\\n NNA, NYSE, 200.00, 2013-05-03\\n EPB, NYSE, 7.41, 2013-04-18\\n JBT, NYSE, -17.65, 2013-05-08\\n DL, NYSE, -33.33, 2013-05-22\\n RAX, NYSE, -5.00, 2013-05-09\\n GSL, NYSE, -50.00, 2013-05-10\\n HCI, NYSE, 66.06, 2013-05-03\\n EC, NYSE, -18.58, 2013-05-04\\n CLW, NYSE, -98.08, 2013-04-25\\n MJN, NYSE, -1.16, 2013-04-26\\n EPC, NYSE, 39.53, 2013-05-02\\n BPI, NYSE, 0.00, 2013-05-07\\n RST, NYSE, 25.00, 2013-05-09\\n DGI, NYSE, 22.22, 2013-05-08\\n SWI, NYSE, 6.25, 2013-05-01\\n CYS, NYSE, -45.16, 2013-04-18\\n IVR, NYSE, 1.59, 2013-05-02\\n BUD, NYSE, 50.65, 2013-05-01\\n SLD, NYSE, -66.67, 2013-05-15\\n PMT, NYSE, 11.11, 2013-04-24\\n STWD, NYSE, -20.93, 2013-05-09\\n CFN, NYSE, 11.32, 2013-05-10\\n SPB, NYSE, 7.32, 2013-05-01\\n ARI, NYSE, 33.33, 2013-05-02\\n CLNY, NYSE, -26.47, 2013-05-07\\n ART, NYSE, -800.00, 2013-05-07\\n SEM, NYSE, -11.11, 2013-05-03\\n BSBR, NYSE, -71.43, 2013-04-26\\n DOLE, NYSE, -50.00, 2013-05-03\\n VSI, NYSE, 2.86, 2013-05-08\\n TWO, NYSE, -9.38, 2013-05-08\\n CVE, NYSE, -6.38, 2013-04-25\\n H, NYSE, 12.50, 2013-05-02\\n LEA, NYSE, 19.27, 2013-04-26\\n SVN, NYSE, -81.82, 2013-05-14\\n CLD, NYSE, -59.26, 2013-05-01\\n AOL, NYSE, 6.25, 2013-05-09\\n CHSP, NYSE, 25.00, 2013-05-08\\n PEB, NYSE, 5.88, 2013-04-26\\n CIT, NYSE, -8.99, 2013-04-24\\n KAR, NYSE, -3.03, 2013-05-02\\n CIE, NYSE, -15.38, 2013-05-01\\n TMH, NYSE, 0.00, 2013-05-01\\n KRA, NYSE, -75.00, 2013-05-02\\n SYA, NYSE, 8.82, 2013-04-25\\n TRNO, NYSE, -11.11, 2013-05-09\\n PDM, NYSE, 0.00, 2013-05-03\\n GNRC, NYSE, 23.47, 2013-05-03\\n ACW, NYSE, -9.68, 2013-04-24\\n BALT, NYSE, -9.52, 2013-05-02\\n ST, NYSE, 4.35, 2013-04-24\\n SEMG, NYSE, -15.00, 2013-05-09\\n CALX, NYSE, 50.00, 2013-04-26\\n MXL, NYSE, 33.33, 2013-05-01\\n STNG, NYSE, 60.00, 2013-04-30\\n PRI, NYSE, -4.35, 2013-05-08\\n SDRL, NYSE, 16.95, 2013-05-29\\n CLDT, NYSE, 7.50, 2013-05-08\\n EXL, NYSE, 5.00, 2013-05-02\\n LYB, NYSE, 9.09, 2013-04-27\\n PNG, NYSE, 4.35, 2013-05-07\\n PLOW, NYSE, 13.33, 2013-05-07\\n SIX, NYSE, 19.61, 2013-04-23\\n NKA, NYSE, -140.00, 2013-05-10\\n RRTS, NYSE, 3.57, 2013-05-02\\n JKS, NYSE, 66.27, 2013-06-08\\n CODE, NYSE, 7.69, 2013-05-01\\n FAF, NYSE, -31.71, 2013-04-26\\n QEP, NYSE, -6.67, 2013-05-01\\n OAS, NYSE, 31.37, 2013-05-08\\n HPP, NYSE, 18.18, 2013-05-07\\n FN, NYSE, 3.70, 2013-04-30\\n ECT, NYSE, 7.32, 2013-05-11\\n QUAD, NYSE, -88.10, 2013-05-08\\n KKR, NYSE, 4.76, 2013-04-26\\n RLD, NYSE, 70.00, 2013-06-07\\n AMRC, NYSE, -200.00, 2013-05-10\\n GDOT, NYSE, 9.37, 2013-05-01\\n AT, NYSE, 40.00, 2013-05-09\\n ENV, NYSE, 0.00, 2013-05-17\\n COR, NYSE, 0.00, 2013-04-25\\n VC, NYSE, 75.65, 2013-05-10\\n CCG, NYSE, 5.88, 2013-05-01\\n EFC, NYSE, -32.00, 2013-05-07\\n TOWR, NYSE, 255.56, 2013-05-03\\n CHMT, NYSE, -21.05, 2013-05-03\\n HBM, NYSE, 200.00, 2013-05-02\\n EXAM, NYSE, 0.00, 2013-05-09\\n XUE, NYSE, -25.00, 2013-05-17\\n CMRE, NYSE, 26.09, 2013-04-25\\n NOAH, NYSE, 112.50, 2013-05-07\\n IPHI, NYSE, 18.18, 2013-05-02\\n BITA, NYSE, 0.00, 2013-05-10\\n BAH, NYSE, 11.43, 2013-05-23\\n GM, NYSE, 19.64, 2013-05-03\\n XNY, NYSE, 28.57, 2013-05-20\\n TROX, NYSE, -181.25, 2013-05-09\\n TRGP, NYSE, 52.38, 2013-05-04\\n DANG, NYSE, 21.05, 2013-05-17\\n YOKU, NYSE, 0.00, 2013-05-16\\n FRC, NYSE, 0.00, 2013-04-16\\n RFP, NYSE, 64.29, 2013-05-01\\n ISS, NYSE, 50.00, 2013-05-18\\n WD, NYSE, -45.65, 2013-05-09\\n FLT, NYSE, 10.39, 2013-05-03\\n GCAP, NYSE, -15.38, 2013-05-08\\n FRF, NYSE, -27.27, 2013-05-14\\n SWFT, NYSE, 23.53, 2013-04-23\\n AG, NYSE, -8.00, 2013-05-16\\n QRE, NYSE, 0.00, 2013-05-09\\n AAT, NYSE, 8.57, 2013-05-01\\n MCC, NYSE, -2.70, 2013-05-03\\n NLSN, NYSE, 9.09, 2013-04-26\\n AGRO, NYSE, -100.00, 2013-05-17\\n BKU, NYSE, 4.44, 2013-04-25\\n INXN, NYSE, -7.14, 2013-05-09\\n NPTN, NYSE, 10.00, 2013-05-10\\n INN, NYSE, 5.88, 2013-05-07\\n KMI, NYSE, -12.50, 2013-04-18\\n HCA, NYSE, -4.82, 2013-05-03\\n MX, NYSE, 13.04, 2013-05-01\\n HII, NYSE, 0.00, 2013-05-09\\n QIHU, NYSE, 100.00, 2013-05-20\\n APO, NYSE, 56.20, 2013-05-07\\n GNC, NYSE, 1.39, 2013-04-27\\n SDT, NYSE, 16.07, 2013-05-11\\n UAN, NYSE, 4.26, 2013-05-02\\n ARCO, NYSE, -142.86, 2013-05-01\\n ELLI, NYSE, -16.67, 2013-05-01\\n TMS, NYSE, -12.00, 2013-04-26\\n SQNS, NYSE, 0.00, 2013-04-26\\n STAG, NYSE, 3.13, 2013-05-07\\n AL, NYSE, 5.13, 2013-05-10\\n TLLP, NYSE, -14.89, 2013-05-07\\n RENN, NYSE, 85.71, 2013-05-14\\n NQ, NYSE, -16.67, 2013-05-16\\n KOS, NYSE, -37.50, 2013-05-10\\n RLJ, NYSE, 10.81, 2013-05-09\\n NGL, NYSE, -62.86, 2013-06-15\\n FENG, NYSE, 60.00, 2013-05-15\\n LNKD, NYSE, 340.00, 2013-05-03\\n NMFC, NYSE, -2.86, 2013-05-07\\n ACTV, NYSE, 32.14, 2013-05-03\\n FIO, NYSE, 20.00, 2013-04-25\\n TAOM, NYSE, -25.00, 2013-05-24\\n RATE, NYSE, 10.00, 2013-05-01\\n VHS, NYSE, 8.33, 2013-05-01\\n MPC, NYSE, 0.00, 2013-05-01\\n MITT, NYSE, -9.64, 2013-05-07\\n OILT, NYSE, 17.07, 2013-05-09\\n SXC, NYSE, -40.00, 2013-04-26\\n AMTG, NYSE, 14.06, 2013-05-07\\n AMID, NYSE, -200.00, 2013-05-14\\n WAIR, NYSE, 22.22, 2013-04-30\\n PER, NYSE, -7.58, 2013-05-11\\n PPP, NYSE, 260.00, 2013-05-09\\n FSM, NYSE, -28.57, 2013-05-08\\n FBHS, NYSE, 41.18, 2013-05-03\\n XLS, NYSE, 73.91, 2013-05-04\\n XYL, NYSE, -3.57, 2013-05-01\\n GNE, NYSE, -550.00, 2013-05-08\\n NDRO, NYSE, -8.11, 2013-05-04\\n RNF, NYSE, -29.63, 2013-05-10\\n VAC, NYSE, 10.20, 2013-04-26\\n CHKR, NYSE, -2.90, 2013-05-10\\n PACD, NYSE, 250.00, 2013-05-07\\n INVN, NYSE, -13.33, 2013-05-03\\n DLPH, NYSE, 11.46, 2013-05-02\\n MN, NYSE, 0.00, 2013-05-02\\n RRMS, NYSE, 51.28, 2013-05-10\\n WPX, NYSE, -4.17, 2013-05-03\\n LPI, NYSE, -15.38, 2013-05-10\\n SN, NYSE, -82.61, 2013-05-08\\n KORS, NYSE, 35.14, 2013-05-30\\n BCEI, NYSE, -20.93, 2013-05-10\\n BOXC, NYSE, 2.56, 2013-04-23\\n PVG, NYSE, -25.00, 2013-05-11\\n POST, NYSE, -29.63, 2013-05-14\\n SLCA, NYSE, -2.78, 2013-05-01\\n MTDR, NYSE, 0.00, 2013-05-09\\n GWAY, NYSE, -120.00, 2013-05-07\\n EPAM, NYSE, -14.71, 2013-05-09\\n RNDY, NYSE, -9.52, 2013-05-10\\n PRLB, NYSE, 0.00, 2013-04-26\\n YELP, NYSE, -40.00, 2013-05-02\\n NSM, NYSE, 23.19, 2013-05-08\\n ALSN, NYSE, 95.24, 2013-04-30\\n DWRE, NYSE, -22.73, 2013-05-08\\n VNTV, NYSE, 3.70, 2013-05-07\\n ET, NYSE, 0.00, 2013-05-10\\n VCRA, NYSE, -160.00, 2013-05-03\\n RM, NYSE, -1.82, 2013-05-03\\n BNNY, NYSE, 3.57, 2013-06-11\\n MM, NYSE, 25.00, 2013-05-09\\n RXN, NYSE, 0.00, 2013-05-22\\n GLOG, NYSE, -16.67, 2013-05-16\\n RPAI, NYSE, 9.52, 2013-05-07\\n OAK, NYSE, 39.86, 2013-05-08\\n FET, NYSE, 3.03, 2013-04-26\\n MRC, NYSE, 4.65, 2013-05-03\\n PSX, NYSE, 17.74, 2013-05-02\\n TUMI, NYSE, 6.67, 2013-05-09\\n ACRE, NYSE, -5.88, 2013-05-16\\n EVER, NYSE, 13.79, 2013-04-25\\n PDH, NYSE, -13.24, 2013-04-25\\n ROYT, NYSE, 10.00, 2013-05-11\\n WMC, NYSE, -2.15, 2013-05-16\\n WAGE, NYSE, 35.71, 2013-05-10\\n HTA, NYSE, 6.67, 2013-05-08\\n ALEX, NYSE, -28.57, 2013-05-10\\n BKW, NYSE, 0.00, 2013-04-27\\n CNCO, NYSE, -88.24, 2013-05-31\\n EQM, NYSE, 41.30, 2013-04-26\\n NOW, NYSE, 0.00, 2013-04-25\\n EGL, NYSE, -11.24, 2013-05-14\\n NGVC, NYSE, 7.69, 2013-05-10\\n NTI, NYSE, 3.51, 2013-05-14\\n AMRE, NYSE, 4.00, 2013-05-08\\n GMED, NYSE, 5.00, 2013-05-03\\n MANU, NYSE, -25.00, 2013-05-03\\n HCLP, NYSE, -23.08, 2013-05-15\\n ADT, NYSE, -4.65, 2013-05-02\\n TRLA, NYSE, -75.00, 2013-05-01\\n SRC, NYSE, 19.44, 2013-05-09\\n NBHC, NYSE, -50.00, 2013-04-30\\n BSMX, NYSE, 30.43, 2013-04-27\\n HY, NYSE, 67.05, 2013-05-02\\n SMLP, NYSE, -10.71, 2013-05-14\\n DYN, NYSE, -254.55, 2013-05-03\\n LXFR, NYSE, 0.00, 2013-05-08\\n LOCK, NYSE, 25.00, 2013-05-02\\n JMI, NYSE, 224.44, 2013-05-08\\n BERY, NYSE, 16.67, 2013-05-03\\n FLTX, NYSE, 8.33, 2013-05-09\\n ANFI, NYSE, 0.00, 2013-06-11\\n SSTK, NYSE, 23.08, 2013-05-09\\n RLGY, NYSE, -13.33, 2013-05-02\\n SDLP, NYSE, 88.64, 2013-05-29\\n MPLX, NYSE, -7.14, 2013-05-01\\n WWAV, NYSE, 6.67, 2013-05-10\\n SXE, NYSE, -44.44, 2013-05-09\\n DKL, NYSE, 31.58, 2013-05-08\\n SCM, NYSE, -8.82, 2013-05-10\\n RKUS, NYSE, -100.00, 2013-05-07\\n ALDW, NYSE, -1.32, 2013-05-08\\n WGP, NYSE, 0.00, 2013-05-02\\n ABBV, NYSE, 3.03, 2013-04-27\\n PBF, NYSE, -54.72, 2013-05-03\\n SBY, NYSE, -433.33, 2013-05-14\\n RIOM, NYSE, 0.00, 2013-05-15\\n USAC, NYSE, -30.00, 2013-05-10\\n CVRR, NYSE, -2.56, 2013-05-03\\n SXCP, NYSE, -9.76, 2013-04-26\\n BFAM, NYSE, 81.82, 2013-05-10\\n TPH, NYSE, 200.00, 2013-05-15\\n ZTS, NYSE, 5.88, 2013-05-01\\n BCC, NYSE, 146.15, 2013-04-23\\n AGI, NYSE, 0.00, 2013-04-26\\n APAM, NYSE, -11.32, 2013-05-02\\n SSNI, NYSE, -1211.77, 2013-05-02\\n MODN, NYSE, 0.00, 2013-05-08\\n AVIV, NYSE, 150.00, 2013-05-08\\n OAKS, NYSE, 509.09, 2013-05-04\\n MRIN, NYSE, -7.50, 2013-05-09\\n PF, NYSE, 17.24, 2013-05-16\\n TMHC, NYSE, -66.67, 2013-05-16\\n ARPI, NYSE, -600.00, 2013-06-25\\n CSTM, NYSE, -105.08, 2013-06-18\\n DDC, NYSE, -80.00, 2013-06-06\\n ABM, NYSE, 9.09, 2013-06-04\\n ANN, NYSE, 4.76, 2013-06-07\\n BBY, NYSE, 28.00, 2013-05-22\\n BF.B, NYSE, -2.17, 2013-06-06\\n BKE, NYSE, -4.88, 2013-05-24\\n NCS, NYSE, -21.74, 2013-06-05\\n BNS, NYSE, -0.83, 2013-05-29\\n BRC, NYSE, -6.78, 2013-05-17\\n CATO, NYSE, 1.94, 2013-05-24\\n COO, NYSE, 9.49, 2013-06-07\\n CPB, NYSE, 10.71, 2013-05-21\\n CFI, NYSE, 10.81, 2013-06-13\\n DCI, NYSE, -4.17, 2013-05-18\\n DDS, NYSE, 15.38, 2013-05-15\\n DE, NYSE, 0.73, 2013-05-16\\n DY, NYSE, 0.00, 2013-05-22\\n EV, NYSE, 0.00, 2013-05-23\\n ESL, NYSE, -11.81, 2013-05-31\\n M, NYSE, 3.77, 2013-05-16\\n GCO, NYSE, 11.90, 2013-06-01\\n GPS, NYSE, 2.90, 2013-05-24\\n HD, NYSE, 7.79, 2013-05-22\\n HEI, NYSE, 10.00, 2013-05-23\\n HOV, NYSE, 120.00, 2013-06-06\\n HRB, NYSE, -1.93, 2013-06-13\\n HRL, NYSE, 0.00, 2013-05-24\\n HPQ, NYSE, 7.41, 2013-05-23\\n JCP, NYSE, -12.93, 2013-05-17\\n KR, NYSE, 4.55, 2013-06-21\\n KSS, NYSE, 15.79, 2013-05-17\\n LB, NYSE, 4.35, 2013-05-23\\n LOW, NYSE, -3.92, 2013-05-23\\n LZB, NYSE, 7.14, 2013-06-19\\n MDT, NYSE, 6.80, 2013-05-22\\n MEI, NYSE, 60.00, 2013-06-21\\n MPR, NYSE, -33.33, 2013-06-07\\n NAV, NYSE, -302.75, 2013-06-11\\n JWN, NYSE, -3.95, 2013-05-17\\n OXM, NYSE, 5.13, 2013-06-12\\n PBY, NYSE, -85.71, 2013-06-11\\n PLL, NYSE, 1.37, 2013-05-31\\n PNY, NYSE, 0.00, 2013-06-08\\n PVH, NYSE, 39.42, 2013-06-13\\n THO, NYSE, -7.87, 2013-06-07\\n TIF, NYSE, 32.08, 2013-05-29\\n TJX, NYSE, 0.00, 2013-05-22\\n TOL, NYSE, 0.00, 2013-05-23\\n TTC, NYSE, 10.92, 2013-05-24\\n VAL, NYSE, 2.25, 2013-05-15\\n JW.A, NYSE, -16.47, 2013-06-19\\n TGT, NYSE, 23.53, 2013-05-23\\n WMT, NYSE, -0.87, 2013-05-17\\n WSM, NYSE, 11.11, 2013-05-24\\n FL, NYSE, 3.41, 2013-05-25\\n CHS, NYSE, -11.11, 2013-05-30\\n BKS, NYSE, 52.22, 2013-06-26\\n CAL, NYSE, 45.45, 2013-05-30\\n SIG, NYSE, 0.89, 2013-05-24\\n ZLC, NYSE, 1200.00, 2013-05-23\\n AEO, NYSE, 5.88, 2013-05-23\\n FGP, NYSE, 15.69, 2013-06-07\\n BMO, NYSE, -4.73, 2013-05-30\\n RY, NYSE, -2.34, 2013-05-31\\n GEF, NYSE, 1.45, 2013-06-06\\n SKS, NYSE, 0.00, 2013-05-22\\n TD, NYSE, 1.09, 2013-05-24\\n ANF, NYSE, -80.00, 2013-05-25\\n CIEN, NYSE, 20.00, 2013-06-07\\n KMG, NYSE, 8.70, 2013-06-11\\n IRET, NYSE, 11.76, 2013-07-02\\n CM, NYSE, 0.00, 2013-05-31\\n UBA, NYSE, 12.00, 2013-06-08\\n KFY, NYSE, 3.23, 2013-06-18\\n KKD, NYSE, 25.00, 2013-05-31\\n MVC, NYSE, -37.50, 2013-06-11\\n CBK, NYSE, 150.00, 2013-06-08\\n SJM, NYSE, 12.17, 2013-06-07\\n BIG, NYSE, 0.00, 2013-05-31\\n JOY, NYSE, 11.61, 2013-05-31\\n SSI, NYSE, -122.22, 2013-05-18\\n GME, NYSE, 15.00, 2013-05-24\\n DKS, NYSE, 0.00, 2013-05-22\\n A, NYSE, 14.93, 2013-05-15\\n MTN, NYSE, -3.62, 2013-06-07\\n GES, NYSE, 75.00, 2013-05-31\\n CRM, NYSE, -600.00, 2013-05-24\\n NWY, NYSE, 128.57, 2013-05-24\\n PAY, NYSE, -7.69, 2013-06-06\\n DSW, NYSE, 11.11, 2013-05-30\\n NX, NYSE, -300.00, 2013-06-08\\n DG, NYSE, -1.39, 2013-06-05\\n EXPR, NYSE, 5.56, 2013-05-31\\n P, NYSE, 0.00, 2013-05-23\\n GWRE, NYSE, 44.44, 2013-05-29\\n BLOX, NYSE, 100.00, 2013-05-24\\n TLYS, NYSE, 14.29, 2013-05-30\\n PANW, NYSE, -900.00, 2013-05-31\\n WDAY, NYSE, 13.04, 2013-05-23\\n RH, NYSE, 50.00, 2013-06-14\\n RALY, NYSE, 14.78, 2013-06-07\\n AIR, NYSE, 13.64, 2013-07-26\\n ATU, NYSE, -1.59, 2013-06-20\\n AZO, NYSE, 0.69, 2013-05-22\\n AZZ, NYSE, -8.20, 2013-06-29\\n CAG, NYSE, 1.69, 2013-06-28\\n CLC, NYSE, -1.49, 2013-06-20\\n CMC, NYSE, -15.79, 2013-06-28\\n FC, NYSE, 18.18, 2013-07-10\\n FDO, NYSE, 1.94, 2013-07-11\\n FDX, NYSE, 8.67, 2013-06-20\\n FUL, NYSE, -5.63, 2013-06-27\\n GIS, NYSE, -1.85, 2013-06-27\\n KBH, NYSE, 20.00, 2013-06-28\\n LEN, NYSE, 30.30, 2013-06-26\\n LNN, NYSE, 12.92, 2013-06-27\\n MKC, NYSE, 0.00, 2013-06-28\\n RT, NYSE, -36.84, 2013-07-25\\n MCS, NYSE, -6.25, 2013-07-26\\n MSM, NYSE, 9.37, 2013-07-11\\n NKE, NYSE, 2.70, 2013-06-28\\n ORCL, NYSE, 0.00, 2013-06-21\\n PIR, NYSE, 0.00, 2013-06-21\\n PKE, NYSE, -13.79, 2013-06-27\\n RAD, NYSE, 0.00, 2013-06-21\\n RPM, NYSE, 7.46, 2013-07-23\\n SVU, NYSE, 250.00, 2013-07-19\\n TISI, NYSE, 0.00, 2013-08-07\\n TXI, NYSE, 116.00, 2013-07-11\\n UNF, NYSE, 2.88, 2013-06-27\\n WGO, NYSE, 0.00, 2013-06-28\\n WOR, NYSE, -7.46, 2013-06-28\\n JBL, NYSE, 4.35, 2013-06-20\\n GBX, NYSE, -5.66, 2013-07-03\\n DRI, NYSE, -1.94, 2013-06-22\\n FDS, NYSE, -1.71, 2013-06-19\\n KMX, NYSE, 12.28, 2013-06-22\\n SCS, NYSE, 0.00, 2013-06-20\\n SJR, NYSE, 16.28, 2013-06-29\\n RHT, NYSE, 9.09, 2013-06-20\\n OMN, NYSE, 14.29, 2013-06-28\\n MON, NYSE, 3.75, 2013-06-27\\n GPN, NYSE, -3.92, 2013-07-26\\n AYI, NYSE, 7.78, 2013-07-03\\n CCL, NYSE, 50.00, 2013-06-26\\n CUK, NYSE, 50.00, 2013-06-26\\n STZ, NYSE, -7.32, 2013-07-03\\n ACN, NYSE, 0.00, 2013-06-28\\n SNX, NYSE, 0.00, 2013-06-26\\n TAL, NYSE, 66.67, 2013-07-23\\n IHS, NYSE, 1.45, 2013-06-21\\n EDU, NYSE, 20.00, 2013-07-24\\n ZEP, NYSE, -31.71, 2013-07-03\\n MG, NYSE, -5.88, 2013-08-08\\n MOS, NYSE, -0.88, 2013-07-16\\n ABT, NYSE, 4.55, 2013-07-18\\n ABX, NYSE, 17.86, 2013-08-02\\n AB, NYSE, 7.89, 2013-08-01\\n TAP, NYSE, 8.63, 2013-08-07\\n ACO, NYSE, 1.79, 2013-07-27\\n ADM, NYSE, 9.52, 2013-08-07\\n AEM, NYSE, -85.71, 2013-07-25\\n AEP, NYSE, -5.19, 2013-07-26\\n AES, NYSE, 23.08, 2013-08-09\\n AET, NYSE, 9.35, 2013-07-31\\n AFL, NYSE, 6.58, 2013-07-31\\n AGCO, NYSE, 18.78, 2013-08-01\\n AGN, NYSE, 1.01, 2013-07-26\\n HES, NYSE, 7.09, 2013-08-01\\n AIG, NYSE, 31.76, 2013-08-02\\n AIN, NYSE, -23.08, 2013-08-01\\n AJG, NYSE, 5.80, 2013-07-31\\n ALU, NYSE, 33.33, 2013-07-31\\n MATX, NYSE, 6.82, 2013-08-08\\n ALK, NYSE, -0.68, 2013-07-26\\n BEAM, NYSE, 6.67, 2013-08-09\\n AME, NYSE, 0.00, 2013-08-08\\n TWX, NYSE, 10.67, 2013-08-08\\n AVD, NYSE, -17.14, 2013-08-06\\n AMN, NYSE, 20.00, 2013-08-02\\n AN, NYSE, -1.35, 2013-07-19\\n AON, NYSE, 0.91, 2013-07-27\\n APA, NYSE, -0.50, 2013-08-02\\n APC, NYSE, 16.67, 2013-07-30\\n APD, NYSE, 0.00, 2013-07-24\\n APH, NYSE, 1.06, 2013-07-19\\n ARG, NYSE, -0.87, 2013-07-26\\n AAN, NYSE, 0.00, 2013-07-25\\n ARW, NYSE, 8.74, 2013-07-25\\n ASGN, NYSE, 14.29, 2013-07-25\\n ASH, NYSE, -8.29, 2013-07-26\\n ASR, NYSE, 21.90, 2013-07-23\\n GAS, NYSE, 51.85, 2013-08-01\\n ATO, NYSE, 13.51, 2013-08-07\\n ATW, NYSE, 0.74, 2013-08-01\\n AVP, NYSE, 11.54, 2013-08-02\\n AVT, NYSE, 3.16, 2013-08-08\\n AVY, NYSE, 2.90, 2013-07-24\\n AXP, NYSE, 4.96, 2013-07-18\\n B, NYSE, 0.00, 2013-07-27\\n BA, NYSE, 5.70, 2013-07-25\\n BAC, NYSE, 28.00, 2013-07-18\\n BAX, NYSE, 2.65, 2013-07-19\\n BC, NYSE, 13.89, 2013-07-26\\n OMX, NYSE, -33.33, 2013-08-07\\n BCE, NYSE, -2.67, 2013-08-09\\n BCR, NYSE, 2.90, 2013-07-24\\n BDX, NYSE, 7.48, 2013-08-02\\n BEN, NYSE, 1.18, 2013-07-30\\n BGG, NYSE, 15.79, 2013-08-16\\n BHE, NYSE, 10.71, 2013-07-26\\n BHI, NYSE, -6.15, 2013-07-20\\n BID, NYSE, -9.56, 2013-08-07\\n BIO, NYSE, 7.14, 2013-08-07\\n BK, NYSE, 6.90, 2013-07-18\\n BKH, NYSE, -2.38, 2013-08-06\\n WRB, NYSE, -2.99, 2013-07-23\\n BLC, NYSE, 9.09, 2013-07-31\\n BLL, NYSE, 1.19, 2013-07-26\\n BLX, NYSE, 5.56, 2013-07-19\\n BMI, NYSE, -20.00, 2013-07-19\\n BMS, NYSE, 1.67, 2013-07-26\\n BMY, NYSE, 0.00, 2013-07-26\\n BOH, NYSE, 2.41, 2013-07-23\\n BXS, NYSE, 10.00, 2013-07-23\\n BPL, NYSE, -8.86, 2013-08-03\\nBRK.A, NYSE, 176.30, 2013-08-03\\n BRO, NYSE, 2.86, 2013-07-16\\n BSX, NYSE, 12.50, 2013-07-26\\n BT, NYSE, 6.17, 2013-07-26\\n MTRN, NYSE, 7.50, 2013-07-27\\n CAI, NYSE, -8.54, 2013-07-31\\n CAT, NYSE, -15.20, 2013-07-25\\n CB, NYSE, 19.27, 2013-07-24\\n CBI, NYSE, 0.00, 2013-07-31\\n CBM, NYSE, -64.29, 2013-08-02\\n CBU, NYSE, 4.00, 2013-07-24\\n CBT, NYSE, -4.35, 2013-08-01\\n CCC, NYSE, 14.29, 2013-08-07\\n CCE, NYSE, 2.67, 2013-07-26\\n C, NYSE, 5.93, 2013-07-16\\n CCK, NYSE, 3.23, 2013-07-18\\n CCU, NYSE, 25.00, 2013-08-08\\n CDE, NYSE, -1100.00, 2013-08-09\\n CDI, NYSE, 6.25, 2013-08-02\\n CAH, NYSE, 2.60, 2013-08-02\\n CFR, NYSE, 0.00, 2013-07-25\\n CHD, NYSE, 1.67, 2013-08-03\\n CKP, NYSE, -15.38, 2013-08-07\\n CPK, NYSE, -7.02, 2013-08-10\\n CI, NYSE, 11.95, 2013-08-02\\n CKH, NYSE, 51.67, 2013-07-31\\n CL, NYSE, 0.00, 2013-07-26\\n CLF, NYSE, 85.25, 2013-07-26\\n CLH, NYSE, -25.00, 2013-08-08\\n CLX, NYSE, 2.99, 2013-08-02\\n CMA, NYSE, 8.57, 2013-07-17\\n CMO, NYSE, -15.63, 2013-07-25\\n CRK, NYSE, -6.67, 2013-07-30\\n CMS, NYSE, -14.71, 2013-07-26\\n CNA, NYSE, 17.19, 2013-07-31\\n CNW, NYSE, 13.56, 2013-08-01\\n CNL, NYSE, -6.06, 2013-08-01\\n COG, NYSE, 35.48, 2013-07-25\\n COT, NYSE, -4.76, 2013-08-02\\n CP, NYSE, -4.14, 2013-07-25\\n CPF, NYSE, 25.93, 2013-07-26\\n CQB, NYSE, 43.48, 2013-08-09\\n CR, NYSE, 0.00, 2013-07-23\\nCRD.B, NYSE, 42.86, 2013-08-06\\n CRS, NYSE, 11.59, 2013-07-31\\n CSC, NYSE, 42.19, 2013-08-07\\n CSL, NYSE, -14.93, 2013-07-24\\n CTB, NYSE, -38.20, 2013-08-09\\n CTL, NYSE, 2.99, 2013-08-08\\n CTS, NYSE, 33.33, 2013-07-23\\n CUB, NYSE, 9.52, 2013-08-02\\n CMI, NYSE, 11.11, 2013-07-31\\n CUZ, NYSE, 9.09, 2013-07-30\\n CVC, NYSE, 80.00, 2013-08-03\\n CW, NYSE, 6.06, 2013-08-01\\n CWT, NYSE, 0.00, 2013-08-01\\n CX, NYSE, 0.00, 2013-07-26\\n CYN, NYSE, 8.33, 2013-07-19\\n D, NYSE, -4.62, 2013-08-07\\n DBD, NYSE, 0.00, 2013-08-15\\n DCO, NYSE, 30.77, 2013-08-06\\n DD, NYSE, 0.79, 2013-07-24\\n CVA, NYSE, 150.00, 2013-07-18\\n DHR, NYSE, 2.35, 2013-07-19\\n DIS, NYSE, 0.00, 2013-08-07\\n DLX, NYSE, 10.34, 2013-07-26\\n DNB, NYSE, 2.00, 2013-08-08\\n RRD, NYSE, 4.65, 2013-07-30\\n DOV, NYSE, 5.43, 2013-07-19\\n DOW, NYSE, 1.59, 2013-07-26\\n DRE, NYSE, 0.00, 2013-08-01\\n DHI, NYSE, 23.53, 2013-07-26\\n UFS, NYSE, -25.00, 2013-07-26\\n DTE, NYSE, -21.52, 2013-07-27\\n DUK, NYSE, -6.45, 2013-08-08\\n DVN, NYSE, 28.72, 2013-08-08\\n DV, NYSE, 31.71, 2013-08-09\\n EAT, NYSE, 4.05, 2013-08-03\\n ECL, NYSE, 2.38, 2013-07-31\\n ED, NYSE, -5.26, 2013-08-02\\n EDE, NYSE, 8.00, 2013-07-26\\n EFX, NYSE, 2.22, 2013-07-25\\n EGN, NYSE, 8.20, 2013-08-01\\n EGP, NYSE, 2.56, 2013-07-19\\n ELP, NYSE, 17.65, 2013-08-16\\n ELY, NYSE, 20.00, 2013-07-26\\n EMC, NYSE, 2.94, 2013-07-25\\n EMR, NYSE, -2.02, 2013-08-07\\n EOG, NYSE, 19.32, 2013-08-07\\n EQT, NYSE, 3.64, 2013-07-26\\n ESE, NYSE, -41.07, 2013-08-09\\n ESV, NYSE, 3.33, 2013-07-30\\n ETN, NYSE, -1.80, 2013-08-03\\n ETR, NYSE, 3.06, 2013-07-31\\n EXAR, NYSE, 14.29, 2013-07-25\\n F, NYSE, 21.62, 2013-07-25\\n CLGX, NYSE, 13.64, 2013-07-25\\n FNB, NYSE, 0.00, 2013-07-24\\n FCF, NYSE, -50.00, 2013-07-25\\n FBP, NYSE, -11.11, 2013-07-25\\n FICO, NYSE, 6.35, 2013-07-31\\n FLO, NYSE, 4.35, 2013-08-14\\n FMC, NYSE, 0.00, 2013-07-30\\n FOE, NYSE, 27.27, 2013-08-01\\n S, NYSE, 6.06, 2013-07-31\\n NEE, NYSE, 13.18, 2013-07-31\\n FRT, NYSE, 0.88, 2013-08-01\\n FRX, NYSE, 300.00, 2013-07-24\\n FSS, NYSE, 64.29, 2013-08-10\\n FUN, NYSE, 2.41, 2013-08-09\\n FUR, NYSE, -48.15, 2013-08-02\\n GBL, NYSE, 17.20, 2013-08-07\\n GVA, NYSE, -78.13, 2013-08-02\\n BGC, NYSE, 23.21, 2013-08-01\\n GD, NYSE, 11.73, 2013-07-25\\n GE, NYSE, 0.00, 2013-07-20\\n RHP, NYSE, -26.85, 2013-08-07\\n AXLL, NYSE, 2.59, 2013-08-01\\n GGG, NYSE, 9.52, 2013-07-25\\n GHM, NYSE, 52.00, 2013-07-26\\n GIB, NYSE, 10.71, 2013-08-01\\n GLT, NYSE, 20.00, 2013-07-31\\n GLW, NYSE, 3.23, 2013-07-31\\n GSK, NYSE, -5.88, 2013-07-25\\n GLF, NYSE, 25.71, 2013-07-23\\n GPC, NYSE, 14.88, 2013-07-19\\n GRA, NYSE, 2.75, 2013-07-26\\n GTY, NYSE, 36.00, 2013-08-08\\n GWW, NYSE, 2.71, 2013-07-18\\n HAE, NYSE, 0.00, 2013-07-30\\n HAL, NYSE, 1.39, 2013-07-23\\n HAR, NYSE, 4.60, 2013-08-07\\n HVT, NYSE, 31.25, 2013-08-01\\n HRC, NYSE, 0.00, 2013-07-25\\n HCC, NYSE, 21.69, 2013-07-31\\n HCN, NYSE, 1.09, 2013-08-07\\n HCP, NYSE, -2.70, 2013-07-31\\n HOG, NYSE, 3.42, 2013-07-26\\n HE, NYSE, 7.89, 2013-08-09\\n HMA, NYSE, -46.15, 2013-08-10\\n HMN, NYSE, 30.00, 2013-07-25\\n HFC, NYSE, 0.00, 2013-08-08\\n HOT, NYSE, 8.22, 2013-07-26\\n HP, NYSE, 6.67, 2013-07-27\\n HLS, NYSE, 18.60, 2013-07-26\\n HRS, NYSE, 23.68, 2013-07-31\\n HSC, NYSE, -11.76, 2013-08-09\\n HSY, NYSE, 1.41, 2013-07-26\\n HUBB, NYSE, 5.38, 2013-07-19\\n HUM, NYSE, 6.91, 2013-08-01\\n HXL, NYSE, 2.13, 2013-07-23\\n IBM, NYSE, 3.44, 2013-07-18\\n IDA, NYSE, 33.82, 2013-08-02\\n IEX, NYSE, 2.70, 2013-07-23\\n IFF, NYSE, -3.39, 2013-08-07\\n DIN, NYSE, 12.09, 2013-07-31\\n INT, NYSE, 11.76, 2013-08-01\\n IP, NYSE, -5.45, 2013-07-26\\n IPG, NYSE, -14.29, 2013-07-20\\n IO, NYSE, -100.00, 2013-08-08\\n IR, NYSE, 5.56, 2013-07-20\\n IRF, NYSE, 81.82, 2013-08-20\\n ITW, NYSE, -0.92, 2013-07-24\\n JEC, NYSE, -1.19, 2013-07-30\\n JNJ, NYSE, 5.71, 2013-07-17\\n JNY, NYSE, 116.67, 2013-08-01\\n K, NYSE, 3.09, 2013-08-02\\n KAMN, NYSE, 13.56, 2013-07-30\\n KDN, NYSE, 10.53, 2013-07-26\\n KEX, NYSE, 0.94, 2013-07-25\\n KEY, NYSE, 5.00, 2013-07-19\\n KIM, NYSE, 6.06, 2013-07-30\\n KMB, NYSE, 1.44, 2013-07-23\\n KEM, NYSE, -95.00, 2013-07-26\\n KMT, NYSE, 4.11, 2013-07-26\\n KO, NYSE, 0.00, 2013-07-17\\n KSU, NYSE, 1.05, 2013-07-20\\n LDR, NYSE, -19.64, 2013-08-06\\n LEG, NYSE, 0.00, 2013-07-26\\n LLY, NYSE, 13.73, 2013-07-25\\n LM, NYSE, -1.45, 2013-07-26\\n LNC, NYSE, 10.43, 2013-08-01\\n LPX, NYSE, 32.26, 2013-08-07\\n LXU, NYSE, 29.17, 2013-08-09\\n LTC, NYSE, -3.39, 2013-08-09\\n L, NYSE, -5.48, 2013-07-30\\n LUV, NYSE, -2.56, 2013-07-26\\n LUX, NYSE, -1.67, 2013-07-26\\n MKL, NYSE, 7.46, 2013-08-08\\n MAN, NYSE, 17.98, 2013-07-20\\n MTW, NYSE, 25.00, 2013-07-30\\n SM, NYSE, 0.00, 2013-07-31\\n MAS, NYSE, 21.05, 2013-07-30\\n MTZ, NYSE, 2.33, 2013-08-02\\n MCD, NYSE, -1.43, 2013-07-23\\n MDC, NYSE, 38.18, 2013-07-31\\n MDP, NYSE, 5.63, 2013-07-26\\n MDR, NYSE, -1966.67, 2013-08-06\\n MDU, NYSE, -3.85, 2013-08-01\\n MED, NYSE, 2.00, 2013-08-07\\n CVS, NYSE, 1.04, 2013-08-07\\n MFC, NYSE, -3.12, 2013-08-09\\n MGA, NYSE, 11.25, 2013-08-10\\n MGM, NYSE, 300.00, 2013-08-07\\n MMC, NYSE, 2.94, 2013-08-08\\n MMM, NYSE, 0.59, 2013-07-26\\n MSA, NYSE, 0.00, 2013-07-25\\n MNR, NYSE, -27.78, 2013-08-07\\n MO, NYSE, -1.59, 2013-07-24\\n MOD, NYSE, 145.45, 2013-08-02\\nMOG.A, NYSE, 8.43, 2013-07-27\\n MHK, NYSE, 10.84, 2013-08-02\\n MSI, NYSE, 11.96, 2013-07-25\\n MCY, NYSE, 3.28, 2013-07-30\\n MRK, NYSE, 2.44, 2013-07-31\\n MRO, NYSE, -5.63, 2013-08-07\\n POWR, NYSE, 20.00, 2013-08-08\\n MTG, NYSE, 118.75, 2013-07-24\\n MTB, NYSE, 26.19, 2013-07-18\\n MTX, NYSE, 8.62, 2013-07-26\\n MUR, NYSE, 12.90, 2013-08-01\\n MYE, NYSE, 19.05, 2013-07-19\\n NBL, NYSE, -5.48, 2013-07-26\\n NBR, NYSE, -11.11, 2013-07-24\\n NE, NYSE, 12.50, 2013-07-18\\n NEM, NYSE, -124.39, 2013-07-27\\n NFG, NYSE, 6.15, 2013-08-09\\n NHI, NYSE, -1.14, 2013-08-07\\n NI, NYSE, -4.17, 2013-08-01\\n NJR, NYSE, 15.00, 2013-08-08\\n THC, NYSE, -4.35, 2013-08-07\\n NNN, NYSE, 0.00, 2013-08-02\\n NOC, NYSE, 20.59, 2013-07-25\\n NR, NYSE, -5.26, 2013-07-26\\n NSC, NYSE, -2.67, 2013-07-24\\n NUE, NYSE, -10.00, 2013-07-19\\n NVR, NYSE, -18.34, 2013-07-23\\n NWL, NYSE, 2.04, 2013-07-27\\n NWN, NYSE, -11.11, 2013-08-08\\n NYT, NYSE, 16.67, 2013-08-02\\n OCR, NYSE, 4.65, 2013-07-25\\n OGE, NYSE, -2.13, 2013-08-09\\n OHI, NYSE, 1.64, 2013-08-01\\n OI, NYSE, 2.53, 2013-07-25\\n OII, NYSE, 8.33, 2013-07-25\\n OKE, NYSE, -225.93, 2013-07-31\\n OLN, NYSE, 3.85, 2013-07-26\\n BRS, NYSE, 1.01, 2013-08-06\\n OMC, NYSE, 0.00, 2013-07-19\\n OMI, NYSE, 0.00, 2013-07-30\\n ORB, NYSE, 17.39, 2013-07-19\\n ORI, NYSE, 1750.00, 2013-07-26\\n OSK, NYSE, 53.21, 2013-07-31\\n OXY, NYSE, -1.86, 2013-07-31\\n FCFS, NYSE, 1.79, 2013-07-18\\n PBI, NYSE, 15.56, 2013-07-31\\n PCG, NYSE, 9.72, 2013-08-01\\n PCL, NYSE, 21.74, 2013-07-30\\n PCP, NYSE, -0.69, 2013-07-26\\n TPC, NYSE, -11.11, 2013-08-10\\n PEG, NYSE, 4.35, 2013-07-31\\n PEI, NYSE, 7.69, 2013-07-24\\n PEP, NYSE, 10.08, 2013-07-25\\n PFE, NYSE, 3.70, 2013-07-31\\n PG, NYSE, 2.60, 2013-08-02\\n PGR, NYSE, -2.44, 2013-07-12\\n PH, NYSE, -8.72, 2013-08-07\\n PHM, NYSE, -10.34, 2013-07-26\\n PKD, NYSE, 0.00, 2013-08-07\\n PKY, NYSE, 0.00, 2013-08-06\\n PNC, NYSE, 21.34, 2013-07-18\\n PNM, NYSE, 15.15, 2013-08-03\\n PNR, NYSE, 2.22, 2013-07-24\\n PNW, NYSE, 3.51, 2013-08-03\\n POM, NYSE, -8.33, 2013-08-08\\n POT, NYSE, -10.98, 2013-07-26\\n PPG, NYSE, 4.70, 2013-07-19\\n PPL, NYSE, 0.00, 2013-08-02'\n\n\nif __name__ == \"__main__\":\n main()\n"
] | [
[
"numpy.set_printoptions",
"pandas.compat.StringIO"
]
] |
SJCosgrove/quantoipian | [
"8beba055aa4211dc2debc5c3083077cbd19d0bbc"
] | [
"zipline/data/history_loader.py"
] | [
"# Copyright 2016 Quantopian, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom abc import (\n ABCMeta,\n abstractmethod,\n abstractproperty,\n)\n\nfrom numpy import concatenate\nfrom lru import LRU\nfrom pandas import isnull\nfrom pandas.tslib import normalize_date\nfrom toolz import sliding_window\n\nfrom six import with_metaclass\n\nfrom zipline.assets import Equity, Future\nfrom zipline.assets.continuous_futures import ContinuousFuture\nfrom zipline.lib._int64window import AdjustedArrayWindow as Int64Window\nfrom zipline.lib._float64window import AdjustedArrayWindow as Float64Window\nfrom zipline.lib.adjustment import Float64Multiply, Float64Add\nfrom zipline.utils.cache import ExpiringCache\nfrom zipline.utils.math_utils import number_of_decimal_places\nfrom zipline.utils.memoize import lazyval\nfrom zipline.utils.numpy_utils import float64_dtype\nfrom zipline.utils.pandas_utils import find_in_sorted_index\n\n# Default number of decimal places used for rounding asset prices.\nDEFAULT_ASSET_PRICE_DECIMALS = 3\n\n\nclass HistoryCompatibleUSEquityAdjustmentReader(object):\n\n def __init__(self, adjustment_reader):\n self._adjustments_reader = adjustment_reader\n\n def load_adjustments(self, columns, dts, assets):\n \"\"\"\n Returns\n -------\n adjustments : list[dict[int -> Adjustment]]\n A list, where each element corresponds to the `columns`, of\n mappings from index to adjustment objects to apply at that index.\n \"\"\"\n out = [None] * len(columns)\n for i, column in enumerate(columns):\n adjs = {}\n for asset in assets:\n adjs.update(self._get_adjustments_in_range(\n asset, dts, column))\n out[i] = adjs\n return out\n\n def _get_adjustments_in_range(self, asset, dts, field):\n \"\"\"\n Get the Float64Multiply objects to pass to an AdjustedArrayWindow.\n\n For the use of AdjustedArrayWindow in the loader, which looks back\n from current simulation time back to a window of data the dictionary is\n structured with:\n - the key into the dictionary for adjustments is the location of the\n day from which the window is being viewed.\n - the start of all multiply objects is always 0 (in each window all\n adjustments are overlapping)\n - the end of the multiply object is the location before the calendar\n location of the adjustment action, making all days before the event\n adjusted.\n\n Parameters\n ----------\n asset : Asset\n The assets for which to get adjustments.\n dts : iterable of datetime64-like\n The dts for which adjustment data is needed.\n field : str\n OHLCV field for which to get the adjustments.\n\n Returns\n -------\n out : dict[loc -> Float64Multiply]\n The adjustments as a dict of loc -> Float64Multiply\n \"\"\"\n sid = int(asset)\n start = normalize_date(dts[0])\n end = normalize_date(dts[-1])\n adjs = {}\n if field != 'volume':\n mergers = self._adjustments_reader.get_adjustments_for_sid(\n 'mergers', sid)\n for m in mergers:\n dt = m[0]\n if start < dt <= end:\n end_loc = dts.searchsorted(dt)\n adj_loc = end_loc\n mult = Float64Multiply(0,\n end_loc - 1,\n 0,\n 0,\n m[1])\n try:\n adjs[adj_loc].append(mult)\n except KeyError:\n adjs[adj_loc] = [mult]\n divs = self._adjustments_reader.get_adjustments_for_sid(\n 'dividends', sid)\n for d in divs:\n dt = d[0]\n if start < dt <= end:\n end_loc = dts.searchsorted(dt)\n adj_loc = end_loc\n mult = Float64Multiply(0,\n end_loc - 1,\n 0,\n 0,\n d[1])\n try:\n adjs[adj_loc].append(mult)\n except KeyError:\n adjs[adj_loc] = [mult]\n splits = self._adjustments_reader.get_adjustments_for_sid(\n 'splits', sid)\n for s in splits:\n dt = s[0]\n if start < dt <= end:\n if field == 'volume':\n ratio = 1.0 / s[1]\n else:\n ratio = s[1]\n end_loc = dts.searchsorted(dt)\n adj_loc = end_loc\n mult = Float64Multiply(0,\n end_loc - 1,\n 0,\n 0,\n ratio)\n try:\n adjs[adj_loc].append(mult)\n except KeyError:\n adjs[adj_loc] = [mult]\n return adjs\n\n\nclass ContinuousFutureAdjustmentReader(object):\n \"\"\"\n Calculates adjustments for continuous futures, based on the\n close and open of the contracts on the either side of each roll.\n \"\"\"\n\n def __init__(self,\n trading_calendar,\n asset_finder,\n bar_reader,\n roll_finders,\n frequency):\n self._trading_calendar = trading_calendar\n self._asset_finder = asset_finder\n self._bar_reader = bar_reader\n self._roll_finders = roll_finders\n self._frequency = frequency\n\n def load_adjustments(self, columns, dts, assets):\n \"\"\"\n Returns\n -------\n adjustments : list[dict[int -> Adjustment]]\n A list, where each element corresponds to the `columns`, of\n mappings from index to adjustment objects to apply at that index.\n \"\"\"\n out = [None] * len(columns)\n for i, column in enumerate(columns):\n adjs = {}\n for asset in assets:\n adjs.update(self._get_adjustments_in_range(\n asset, dts, column))\n out[i] = adjs\n return out\n\n def _make_adjustment(self,\n adjustment_type,\n front_close,\n back_close,\n end_loc):\n adj_base = back_close - front_close\n if adjustment_type == 'mul':\n adj_value = 1.0 + adj_base / front_close\n adj_class = Float64Multiply\n elif adjustment_type == 'add':\n adj_value = adj_base\n adj_class = Float64Add\n return adj_class(0,\n end_loc,\n 0,\n 0,\n adj_value)\n\n def _get_adjustments_in_range(self, cf, dts, field):\n if field == 'volume' or field == 'sid':\n return {}\n if cf.adjustment is None:\n return {}\n rf = self._roll_finders[cf.roll_style]\n partitions = []\n\n rolls = rf.get_rolls(cf.root_symbol, dts[0], dts[-1],\n cf.offset)\n\n tc = self._trading_calendar\n\n adjs = {}\n\n for front, back in sliding_window(2, rolls):\n front_sid, roll_dt = front\n back_sid = back[0]\n dt = tc.previous_session_label(roll_dt)\n if self._frequency == 'minute':\n dt = tc.open_and_close_for_session(dt)[1]\n roll_dt = tc.open_and_close_for_session(roll_dt)[0]\n partitions.append((front_sid,\n back_sid,\n dt,\n roll_dt))\n for partition in partitions:\n front_sid, back_sid, dt, roll_dt = partition\n last_front_dt = self._bar_reader.get_last_traded_dt(\n self._asset_finder.retrieve_asset(front_sid), dt)\n last_back_dt = self._bar_reader.get_last_traded_dt(\n self._asset_finder.retrieve_asset(back_sid), dt)\n if isnull(last_front_dt) or isnull(last_back_dt):\n continue\n front_close = self._bar_reader.get_value(\n front_sid, last_front_dt, 'close')\n back_close = self._bar_reader.get_value(\n back_sid, last_back_dt, 'close')\n adj_loc = dts.searchsorted(roll_dt)\n end_loc = adj_loc - 1\n adj = self._make_adjustment(cf.adjustment,\n front_close,\n back_close,\n end_loc)\n try:\n adjs[adj_loc].append(adj)\n except KeyError:\n adjs[adj_loc] = [adj]\n return adjs\n\n\nclass SlidingWindow(object):\n \"\"\"\n Wrapper around an AdjustedArrayWindow which supports monotonically\n increasing (by datetime) requests for a sized window of data.\n\n Parameters\n ----------\n window : AdjustedArrayWindow\n Window of pricing data with prefetched values beyond the current\n simulation dt.\n cal_start : int\n Index in the overall calendar at which the window starts.\n \"\"\"\n\n def __init__(self, window, size, cal_start, offset):\n self.window = window\n self.cal_start = cal_start\n self.current = next(window)\n self.offset = offset\n self.most_recent_ix = self.cal_start + size\n\n def get(self, end_ix):\n \"\"\"\n Returns\n -------\n out : A np.ndarray of the equity pricing up to end_ix after adjustments\n and rounding have been applied.\n \"\"\"\n if self.most_recent_ix == end_ix:\n return self.current\n\n target = end_ix - self.cal_start - self.offset + 1\n self.current = self.window.seek(target)\n\n self.most_recent_ix = end_ix\n return self.current\n\n\nclass HistoryLoader(with_metaclass(ABCMeta)):\n \"\"\"\n Loader for sliding history windows, with support for adjustments.\n\n Parameters\n ----------\n trading_calendar: TradingCalendar\n Contains the grouping logic needed to assign minutes to periods.\n reader : DailyBarReader, MinuteBarReader\n Reader for pricing bars.\n adjustment_reader : SQLiteAdjustmentReader\n Reader for adjustment data.\n \"\"\"\n FIELDS = ('open', 'high', 'low', 'close', 'volume', 'sid')\n\n def __init__(self, trading_calendar, reader, equity_adjustment_reader,\n asset_finder,\n roll_finders=None,\n sid_cache_size=1000,\n prefetch_length=0):\n self.trading_calendar = trading_calendar\n self._asset_finder = asset_finder\n self._reader = reader\n self._adjustment_readers = {}\n if equity_adjustment_reader is not None:\n self._adjustment_readers[Equity] = \\\n HistoryCompatibleUSEquityAdjustmentReader(\n equity_adjustment_reader)\n if roll_finders:\n self._adjustment_readers[ContinuousFuture] =\\\n ContinuousFutureAdjustmentReader(trading_calendar,\n asset_finder,\n reader,\n roll_finders,\n self._frequency)\n self._window_blocks = {\n field: ExpiringCache(LRU(sid_cache_size))\n for field in self.FIELDS\n }\n self._prefetch_length = prefetch_length\n\n @abstractproperty\n def _frequency(self):\n pass\n\n @abstractproperty\n def _calendar(self):\n pass\n\n @abstractmethod\n def _array(self, start, end, assets, field):\n pass\n\n def _decimal_places_for_asset(self, asset, reference_date):\n if isinstance(asset, Future) and asset.tick_size:\n return number_of_decimal_places(asset.tick_size)\n elif isinstance(asset, ContinuousFuture):\n # Tick size should be the same for all contracts of a continuous\n # future, so arbitrarily get the contract with next upcoming auto\n # close date.\n oc = self._asset_finder.get_ordered_contracts(asset.root_symbol)\n contract_sid = oc.contract_before_auto_close(reference_date.value)\n if contract_sid is not None:\n contract = self._asset_finder.retrieve_asset(contract_sid)\n if contract.tick_size:\n return number_of_decimal_places(contract.tick_size)\n return DEFAULT_ASSET_PRICE_DECIMALS\n\n def _ensure_sliding_windows(self, assets, dts, field,\n is_perspective_after):\n \"\"\"\n Ensure that there is a Float64Multiply window for each asset that can\n provide data for the given parameters.\n If the corresponding window for the (assets, len(dts), field) does not\n exist, then create a new one.\n If a corresponding window does exist for (assets, len(dts), field), but\n can not provide data for the current dts range, then create a new\n one and replace the expired window.\n\n Parameters\n ----------\n assets : iterable of Assets\n The assets in the window\n dts : iterable of datetime64-like\n The datetimes for which to fetch data.\n Makes an assumption that all dts are present and contiguous,\n in the calendar.\n field : str\n The OHLCV field for which to retrieve data.\n is_perspective_after : bool\n see: `PricingHistoryLoader.history`\n\n Returns\n -------\n out : list of Float64Window with sufficient data so that each asset's\n window can provide `get` for the index corresponding with the last\n value in `dts`\n \"\"\"\n end = dts[-1]\n size = len(dts)\n asset_windows = {}\n needed_assets = []\n cal = self._calendar\n\n assets = self._asset_finder.retrieve_all(assets)\n end_ix = find_in_sorted_index(cal, end)\n\n for asset in assets:\n try:\n window = self._window_blocks[field].get(\n (asset, size, is_perspective_after), end)\n except KeyError:\n needed_assets.append(asset)\n else:\n if end_ix < window.most_recent_ix:\n # Window needs reset. Requested end index occurs before the\n # end index from the previous history call for this window.\n # Grab new window instead of rewinding adjustments.\n needed_assets.append(asset)\n else:\n asset_windows[asset] = window\n\n if needed_assets:\n offset = 0\n start_ix = find_in_sorted_index(cal, dts[0])\n\n prefetch_end_ix = min(end_ix + self._prefetch_length, len(cal) - 1)\n prefetch_end = cal[prefetch_end_ix]\n prefetch_dts = cal[start_ix:prefetch_end_ix + 1]\n if is_perspective_after:\n adj_end_ix = min(prefetch_end_ix + 1, len(cal) - 1)\n adj_dts = cal[start_ix:adj_end_ix + 1]\n else:\n adj_dts = prefetch_dts\n prefetch_len = len(prefetch_dts)\n array = self._array(prefetch_dts, needed_assets, field)\n\n if field == 'sid':\n window_type = Int64Window\n else:\n window_type = Float64Window\n\n view_kwargs = {}\n if field == 'volume':\n array = array.astype(float64_dtype)\n\n for i, asset in enumerate(needed_assets):\n adj_reader = None\n try:\n adj_reader = self._adjustment_readers[type(asset)]\n except KeyError:\n adj_reader = None\n if adj_reader is not None:\n adjs = adj_reader.load_adjustments(\n [field], adj_dts, [asset])[0]\n else:\n adjs = {}\n window = window_type(\n array[:, i].reshape(prefetch_len, 1),\n view_kwargs,\n adjs,\n offset,\n size,\n int(is_perspective_after),\n self._decimal_places_for_asset(asset, dts[-1]),\n )\n sliding_window = SlidingWindow(window, size, start_ix, offset)\n asset_windows[asset] = sliding_window\n self._window_blocks[field].set(\n (asset, size, is_perspective_after),\n sliding_window,\n prefetch_end)\n\n return [asset_windows[asset] for asset in assets]\n\n def history(self, assets, dts, field, is_perspective_after):\n \"\"\"\n A window of pricing data with adjustments applied assuming that the\n end of the window is the day before the current simulation time.\n\n Parameters\n ----------\n assets : iterable of Assets\n The assets in the window.\n dts : iterable of datetime64-like\n The datetimes for which to fetch data.\n Makes an assumption that all dts are present and contiguous,\n in the calendar.\n field : str\n The OHLCV field for which to retrieve data.\n is_perspective_after : bool\n True, if the window is being viewed immediately after the last dt\n in the sliding window.\n False, if the window is viewed on the last dt.\n\n This flag is used for handling the case where the last dt in the\n requested window immediately precedes a corporate action, e.g.:\n\n - is_perspective_after is True\n\n When the viewpoint is after the last dt in the window, as when a\n daily history window is accessed from a simulation that uses a\n minute data frequency, the history call to this loader will not\n include the current simulation dt. At that point in time, the raw\n data for the last day in the window will require adjustment, so the\n most recent adjustment with respect to the simulation time is\n applied to the last dt in the requested window.\n\n An example equity which has a 0.5 split ratio dated for 05-27,\n with the dts for a history call of 5 bars with a '1d' frequency at\n 05-27 9:31. Simulation frequency is 'minute'.\n\n (In this case this function is called with 4 daily dts, and the\n calling function is responsible for stitching back on the\n 'current' dt)\n\n | | | | | last dt | <-- viewer is here |\n | | 05-23 | 05-24 | 05-25 | 05-26 | 05-27 9:31 |\n | raw | 10.10 | 10.20 | 10.30 | 10.40 | |\n | adj | 5.05 | 5.10 | 5.15 | 5.25 | |\n\n The adjustment is applied to the last dt, 05-26, and all previous\n dts.\n\n - is_perspective_after is False, daily\n\n When the viewpoint is the same point in time as the last dt in the\n window, as when a daily history window is accessed from a\n simulation that uses a daily data frequency, the history call will\n include the current dt. At that point in time, the raw data for the\n last day in the window will be post-adjustment, so no adjustment\n is applied to the last dt.\n\n An example equity which has a 0.5 split ratio dated for 05-27,\n with the dts for a history call of 5 bars with a '1d' frequency at\n 05-27 0:00. Simulation frequency is 'daily'.\n\n | | | | | | <-- viewer is here |\n | | | | | | last dt |\n | | 05-23 | 05-24 | 05-25 | 05-26 | 05-27 |\n | raw | 10.10 | 10.20 | 10.30 | 10.40 | 5.25 |\n | adj | 5.05 | 5.10 | 5.15 | 5.20 | 5.25 |\n\n Adjustments are applied 05-23 through 05-26 but not to the last dt,\n 05-27\n\n Returns\n -------\n out : np.ndarray with shape(len(days between start, end), len(assets))\n \"\"\"\n block = self._ensure_sliding_windows(assets,\n dts,\n field,\n is_perspective_after)\n end_ix = self._calendar.searchsorted(dts[-1])\n\n return concatenate(\n [window.get(end_ix) for window in block],\n axis=1,\n )\n\n\nclass DailyHistoryLoader(HistoryLoader):\n\n @property\n def _frequency(self):\n return 'daily'\n\n @property\n def _calendar(self):\n return self._reader.sessions\n\n def _array(self, dts, assets, field):\n return self._reader.load_raw_arrays(\n [field],\n dts[0],\n dts[-1],\n assets,\n )[0]\n\n\nclass MinuteHistoryLoader(HistoryLoader):\n\n @property\n def _frequency(self):\n return 'minute'\n\n @lazyval\n def _calendar(self):\n mm = self.trading_calendar.all_minutes\n start = mm.searchsorted(self._reader.first_trading_day)\n end = mm.searchsorted(self._reader.last_available_dt, side='right')\n return mm[start:end]\n\n def _array(self, dts, assets, field):\n return self._reader.load_raw_arrays(\n [field],\n dts[0],\n dts[-1],\n assets,\n )[0]\n"
] | [
[
"pandas.tslib.normalize_date",
"pandas.isnull"
]
] |
taroxd/mindspore | [
"9bb620ff2caaac7f1c53c4b104935f22352cb88f",
"9bb620ff2caaac7f1c53c4b104935f22352cb88f"
] | [
"model_zoo/official/cv/ssd/src/dataset.py",
"model_zoo/official/nlp/lstm/eval.py"
] | [
"# Copyright 2020 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\n\"\"\"SSD dataset\"\"\"\n\nfrom __future__ import division\n\nimport os\nimport json\nimport xml.etree.ElementTree as et\nimport numpy as np\nimport cv2\n\nimport mindspore.dataset as de\nimport mindspore.dataset.vision.c_transforms as C\nfrom mindspore.mindrecord import FileWriter\nfrom .config import config\nfrom .box_utils import jaccard_numpy, ssd_bboxes_encode\n\n\ndef _rand(a=0., b=1.):\n \"\"\"Generate random.\"\"\"\n return np.random.rand() * (b - a) + a\n\n\ndef get_imageId_from_fileName(filename, id_iter):\n \"\"\"Get imageID from fileName if fileName is int, else return id_iter.\"\"\"\n filename = os.path.splitext(filename)[0]\n if filename.isdigit():\n return int(filename)\n return id_iter\n\n\ndef random_sample_crop(image, boxes):\n \"\"\"Random Crop the image and boxes\"\"\"\n height, width, _ = image.shape\n min_iou = np.random.choice([None, 0.1, 0.3, 0.5, 0.7, 0.9])\n\n if min_iou is None:\n return image, boxes\n\n # max trails (50)\n for _ in range(50):\n image_t = image\n\n w = _rand(0.3, 1.0) * width\n h = _rand(0.3, 1.0) * height\n\n # aspect ratio constraint b/t .5 & 2\n if h / w < 0.5 or h / w > 2:\n continue\n\n left = _rand() * (width - w)\n top = _rand() * (height - h)\n\n rect = np.array([int(top), int(left), int(top + h), int(left + w)])\n overlap = jaccard_numpy(boxes, rect)\n\n # dropout some boxes\n drop_mask = overlap > 0\n if not drop_mask.any():\n continue\n\n if overlap[drop_mask].min() < min_iou and overlap[drop_mask].max() > (min_iou + 0.2):\n continue\n\n image_t = image_t[rect[0]:rect[2], rect[1]:rect[3], :]\n\n centers = (boxes[:, :2] + boxes[:, 2:4]) / 2.0\n\n m1 = (rect[0] < centers[:, 0]) * (rect[1] < centers[:, 1])\n m2 = (rect[2] > centers[:, 0]) * (rect[3] > centers[:, 1])\n\n # mask in that both m1 and m2 are true\n mask = m1 * m2 * drop_mask\n\n # have any valid boxes? try again if not\n if not mask.any():\n continue\n\n # take only matching gt boxes\n boxes_t = boxes[mask, :].copy()\n\n boxes_t[:, :2] = np.maximum(boxes_t[:, :2], rect[:2])\n boxes_t[:, :2] -= rect[:2]\n boxes_t[:, 2:4] = np.minimum(boxes_t[:, 2:4], rect[2:4])\n boxes_t[:, 2:4] -= rect[:2]\n\n return image_t, boxes_t\n return image, boxes\n\n\ndef preprocess_fn(img_id, image, box, is_training):\n \"\"\"Preprocess function for dataset.\"\"\"\n cv2.setNumThreads(2)\n\n def _infer_data(image, input_shape):\n img_h, img_w, _ = image.shape\n input_h, input_w = input_shape\n\n image = cv2.resize(image, (input_w, input_h))\n\n # When the channels of image is 1\n if len(image.shape) == 2:\n image = np.expand_dims(image, axis=-1)\n image = np.concatenate([image, image, image], axis=-1)\n\n return img_id, image, np.array((img_h, img_w), np.float32)\n\n def _data_aug(image, box, is_training, image_size=(300, 300)):\n \"\"\"Data augmentation function.\"\"\"\n ih, iw, _ = image.shape\n w, h = image_size\n\n if not is_training:\n return _infer_data(image, image_size)\n\n # Random crop\n box = box.astype(np.float32)\n image, box = random_sample_crop(image, box)\n ih, iw, _ = image.shape\n\n # Resize image\n image = cv2.resize(image, (w, h))\n\n # Flip image or not\n flip = _rand() < .5\n if flip:\n image = cv2.flip(image, 1, dst=None)\n\n # When the channels of image is 1\n if len(image.shape) == 2:\n image = np.expand_dims(image, axis=-1)\n image = np.concatenate([image, image, image], axis=-1)\n\n box[:, [0, 2]] = box[:, [0, 2]] / ih\n box[:, [1, 3]] = box[:, [1, 3]] / iw\n\n if flip:\n box[:, [1, 3]] = 1 - box[:, [3, 1]]\n\n box, label, num_match = ssd_bboxes_encode(box)\n return image, box, label, num_match\n\n return _data_aug(image, box, is_training, image_size=config.img_shape)\n\n\ndef create_voc_label(is_training):\n \"\"\"Get image path and annotation from VOC.\"\"\"\n voc_root = config.voc_root\n cls_map = {name: i for i, name in enumerate(config.classes)}\n sub_dir = 'train' if is_training else 'eval'\n voc_dir = os.path.join(voc_root, sub_dir)\n if not os.path.isdir(voc_dir):\n raise ValueError(f'Cannot find {sub_dir} dataset path.')\n\n image_dir = anno_dir = voc_dir\n if os.path.isdir(os.path.join(voc_dir, 'Images')):\n image_dir = os.path.join(voc_dir, 'Images')\n if os.path.isdir(os.path.join(voc_dir, 'Annotations')):\n anno_dir = os.path.join(voc_dir, 'Annotations')\n\n if not is_training:\n json_file = os.path.join(config.voc_root, config.voc_json)\n file_dir = os.path.split(json_file)[0]\n if not os.path.isdir(file_dir):\n os.makedirs(file_dir)\n json_dict = {\"images\": [], \"type\": \"instances\", \"annotations\": [],\n \"categories\": []}\n bnd_id = 1\n\n image_files_dict = {}\n image_anno_dict = {}\n images = []\n id_iter = 0\n for anno_file in os.listdir(anno_dir):\n print(anno_file)\n if not anno_file.endswith('xml'):\n continue\n tree = et.parse(os.path.join(anno_dir, anno_file))\n root_node = tree.getroot()\n file_name = root_node.find('filename').text\n img_id = get_imageId_from_fileName(file_name, id_iter)\n id_iter += 1\n image_path = os.path.join(image_dir, file_name)\n print(image_path)\n if not os.path.isfile(image_path):\n print(f'Cannot find image {file_name} according to annotations.')\n continue\n\n labels = []\n for obj in root_node.iter('object'):\n cls_name = obj.find('name').text\n if cls_name not in cls_map:\n print(f'Label \"{cls_name}\" not in \"{config.classes}\"')\n continue\n bnd_box = obj.find('bndbox')\n x_min = int(bnd_box.find('xmin').text) - 1\n y_min = int(bnd_box.find('ymin').text) - 1\n x_max = int(bnd_box.find('xmax').text) - 1\n y_max = int(bnd_box.find('ymax').text) - 1\n labels.append([y_min, x_min, y_max, x_max, cls_map[cls_name]])\n\n if not is_training:\n o_width = abs(x_max - x_min)\n o_height = abs(y_max - y_min)\n ann = {'area': o_width * o_height, 'iscrowd': 0, 'image_id': \\\n img_id, 'bbox': [x_min, y_min, o_width, o_height], \\\n 'category_id': cls_map[cls_name], 'id': bnd_id, \\\n 'ignore': 0, \\\n 'segmentation': []}\n json_dict['annotations'].append(ann)\n bnd_id = bnd_id + 1\n\n if labels:\n images.append(img_id)\n image_files_dict[img_id] = image_path\n image_anno_dict[img_id] = np.array(labels)\n\n if not is_training:\n size = root_node.find(\"size\")\n width = int(size.find('width').text)\n height = int(size.find('height').text)\n image = {'file_name': file_name, 'height': height, 'width': width,\n 'id': img_id}\n json_dict['images'].append(image)\n\n if not is_training:\n for cls_name, cid in cls_map.items():\n cat = {'supercategory': 'none', 'id': cid, 'name': cls_name}\n json_dict['categories'].append(cat)\n json_fp = open(json_file, 'w')\n json_str = json.dumps(json_dict)\n json_fp.write(json_str)\n json_fp.close()\n\n return images, image_files_dict, image_anno_dict\n\n\ndef create_coco_label(is_training):\n \"\"\"Get image path and annotation from COCO.\"\"\"\n from pycocotools.coco import COCO\n\n coco_root = config.coco_root\n data_type = config.val_data_type\n if is_training:\n data_type = config.train_data_type\n\n # Classes need to train or test.\n train_cls = config.classes\n train_cls_dict = {}\n for i, cls in enumerate(train_cls):\n train_cls_dict[cls] = i\n\n anno_json = os.path.join(coco_root, config.instances_set.format(data_type))\n\n coco = COCO(anno_json)\n classs_dict = {}\n cat_ids = coco.loadCats(coco.getCatIds())\n for cat in cat_ids:\n classs_dict[cat[\"id\"]] = cat[\"name\"]\n\n image_ids = coco.getImgIds()\n images = []\n image_path_dict = {}\n image_anno_dict = {}\n\n for img_id in image_ids:\n image_info = coco.loadImgs(img_id)\n file_name = image_info[0][\"file_name\"]\n anno_ids = coco.getAnnIds(imgIds=img_id, iscrowd=None)\n anno = coco.loadAnns(anno_ids)\n image_path = os.path.join(coco_root, data_type, file_name)\n annos = []\n iscrowd = False\n for label in anno:\n bbox = label[\"bbox\"]\n class_name = classs_dict[label[\"category_id\"]]\n iscrowd = iscrowd or label[\"iscrowd\"]\n if class_name in train_cls:\n x_min, x_max = bbox[0], bbox[0] + bbox[2]\n y_min, y_max = bbox[1], bbox[1] + bbox[3]\n annos.append(list(map(round, [y_min, x_min, y_max, x_max])) + [train_cls_dict[class_name]])\n\n if not is_training and iscrowd:\n continue\n if len(annos) >= 1:\n images.append(img_id)\n image_path_dict[img_id] = image_path\n image_anno_dict[img_id] = np.array(annos)\n\n return images, image_path_dict, image_anno_dict\n\n\ndef anno_parser(annos_str):\n \"\"\"Parse annotation from string to list.\"\"\"\n annos = []\n for anno_str in annos_str:\n anno = list(map(int, anno_str.strip().split(',')))\n annos.append(anno)\n return annos\n\n\ndef filter_valid_data(image_dir, anno_path):\n \"\"\"Filter valid image file, which both in image_dir and anno_path.\"\"\"\n images = []\n image_path_dict = {}\n image_anno_dict = {}\n if not os.path.isdir(image_dir):\n raise RuntimeError(\"Path given is not valid.\")\n if not os.path.isfile(anno_path):\n raise RuntimeError(\"Annotation file is not valid.\")\n\n with open(anno_path, \"rb\") as f:\n lines = f.readlines()\n for img_id, line in enumerate(lines):\n line_str = line.decode(\"utf-8\").strip()\n line_split = str(line_str).split(' ')\n file_name = line_split[0]\n image_path = os.path.join(image_dir, file_name)\n if os.path.isfile(image_path):\n images.append(img_id)\n image_path_dict[img_id] = image_path\n image_anno_dict[img_id] = anno_parser(line_split[1:])\n\n return images, image_path_dict, image_anno_dict\n\n\ndef voc_data_to_mindrecord(mindrecord_dir, is_training, prefix=\"ssd.mindrecord\", file_num=8):\n \"\"\"Create MindRecord file by image_dir and anno_path.\"\"\"\n mindrecord_path = os.path.join(mindrecord_dir, prefix)\n writer = FileWriter(mindrecord_path, file_num)\n images, image_path_dict, image_anno_dict = create_voc_label(is_training)\n\n ssd_json = {\n \"img_id\": {\"type\": \"int32\", \"shape\": [1]},\n \"image\": {\"type\": \"bytes\"},\n \"annotation\": {\"type\": \"int32\", \"shape\": [-1, 5]},\n }\n writer.add_schema(ssd_json, \"ssd_json\")\n\n for img_id in images:\n image_path = image_path_dict[img_id]\n with open(image_path, 'rb') as f:\n img = f.read()\n annos = np.array(image_anno_dict[img_id], dtype=np.int32)\n img_id = np.array([img_id], dtype=np.int32)\n row = {\"img_id\": img_id, \"image\": img, \"annotation\": annos}\n writer.write_raw_data([row])\n writer.commit()\n\n\ndef data_to_mindrecord_byte_image(dataset=\"coco\", is_training=True, prefix=\"ssd.mindrecord\", file_num=8):\n \"\"\"Create MindRecord file.\"\"\"\n mindrecord_dir = config.mindrecord_dir\n mindrecord_path = os.path.join(mindrecord_dir, prefix)\n writer = FileWriter(mindrecord_path, file_num)\n if dataset == \"coco\":\n images, image_path_dict, image_anno_dict = create_coco_label(is_training)\n else:\n images, image_path_dict, image_anno_dict = filter_valid_data(config.image_dir, config.anno_path)\n\n ssd_json = {\n \"img_id\": {\"type\": \"int32\", \"shape\": [1]},\n \"image\": {\"type\": \"bytes\"},\n \"annotation\": {\"type\": \"int32\", \"shape\": [-1, 5]},\n }\n writer.add_schema(ssd_json, \"ssd_json\")\n\n for img_id in images:\n image_path = image_path_dict[img_id]\n with open(image_path, 'rb') as f:\n img = f.read()\n annos = np.array(image_anno_dict[img_id], dtype=np.int32)\n img_id = np.array([img_id], dtype=np.int32)\n row = {\"img_id\": img_id, \"image\": img, \"annotation\": annos}\n writer.write_raw_data([row])\n writer.commit()\n\n\ndef create_ssd_dataset(mindrecord_file, batch_size=32, repeat_num=10, device_num=1, rank=0,\n is_training=True, num_parallel_workers=4, use_multiprocessing=True):\n \"\"\"Create SSD dataset with MindDataset.\"\"\"\n ds = de.MindDataset(mindrecord_file, columns_list=[\"img_id\", \"image\", \"annotation\"], num_shards=device_num,\n shard_id=rank, num_parallel_workers=num_parallel_workers, shuffle=is_training)\n decode = C.Decode()\n ds = ds.map(operations=decode, input_columns=[\"image\"])\n change_swap_op = C.HWC2CHW()\n normalize_op = C.Normalize(mean=[0.485 * 255, 0.456 * 255, 0.406 * 255],\n std=[0.229 * 255, 0.224 * 255, 0.225 * 255])\n color_adjust_op = C.RandomColorAdjust(brightness=0.4, contrast=0.4, saturation=0.4)\n compose_map_func = (lambda img_id, image, annotation: preprocess_fn(img_id, image, annotation, is_training))\n if is_training:\n output_columns = [\"image\", \"box\", \"label\", \"num_match\"]\n trans = [color_adjust_op, normalize_op, change_swap_op]\n else:\n output_columns = [\"img_id\", \"image\", \"image_shape\"]\n trans = [normalize_op, change_swap_op]\n ds = ds.map(operations=compose_map_func, input_columns=[\"img_id\", \"image\", \"annotation\"],\n output_columns=output_columns, column_order=output_columns,\n python_multiprocessing=use_multiprocessing,\n num_parallel_workers=num_parallel_workers)\n ds = ds.map(operations=trans, input_columns=[\"image\"], python_multiprocessing=use_multiprocessing,\n num_parallel_workers=num_parallel_workers)\n ds = ds.batch(batch_size, drop_remainder=True)\n ds = ds.repeat(repeat_num)\n return ds\n\n\ndef create_mindrecord(dataset=\"coco\", prefix=\"ssd.mindrecord\", is_training=True):\n print(\"Start create dataset!\")\n\n # It will generate mindrecord file in config.mindrecord_dir,\n # and the file name is ssd.mindrecord0, 1, ... file_num.\n\n mindrecord_dir = config.mindrecord_dir\n mindrecord_file = os.path.join(mindrecord_dir, prefix + \"0\")\n if not os.path.exists(mindrecord_file):\n if not os.path.isdir(mindrecord_dir):\n os.makedirs(mindrecord_dir)\n if dataset == \"coco\":\n if os.path.isdir(config.coco_root):\n print(\"Create Mindrecord.\")\n data_to_mindrecord_byte_image(\"coco\", is_training, prefix)\n print(\"Create Mindrecord Done, at {}\".format(mindrecord_dir))\n else:\n print(\"coco_root not exits.\")\n elif dataset == \"voc\":\n if os.path.isdir(config.voc_root):\n print(\"Create Mindrecord.\")\n voc_data_to_mindrecord(mindrecord_dir, is_training, prefix)\n print(\"Create Mindrecord Done, at {}\".format(mindrecord_dir))\n else:\n print(\"voc_root not exits.\")\n else:\n if os.path.isdir(config.image_dir) and os.path.exists(config.anno_path):\n print(\"Create Mindrecord.\")\n data_to_mindrecord_byte_image(\"other\", is_training, prefix)\n print(\"Create Mindrecord Done, at {}\".format(mindrecord_dir))\n else:\n print(\"image_dir or anno_path not exits.\")\n return mindrecord_file\n",
"# Copyright 2020 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"\n#################train lstm example on aclImdb########################\n\"\"\"\nimport argparse\nimport os\n\nimport numpy as np\n\nfrom src.config import lstm_cfg as cfg, lstm_cfg_ascend\nfrom src.dataset import lstm_create_dataset, convert_to_mindrecord\nfrom src.lr_schedule import get_lr\nfrom src.lstm import SentimentNet\nfrom mindspore import Tensor, nn, Model, context\nfrom mindspore.nn import Accuracy\nfrom mindspore.train.callback import LossMonitor\nfrom mindspore.train.serialization import load_checkpoint, load_param_into_net\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='MindSpore LSTM Example')\n parser.add_argument('--preprocess', type=str, default='false', choices=['true', 'false'],\n help='whether to preprocess data.')\n parser.add_argument('--aclimdb_path', type=str, default=\"./aclImdb\",\n help='path where the dataset is stored.')\n parser.add_argument('--glove_path', type=str, default=\"./glove\",\n help='path where the GloVe is stored.')\n parser.add_argument('--preprocess_path', type=str, default=\"./preprocess\",\n help='path where the pre-process data is stored.')\n parser.add_argument('--ckpt_path', type=str, default=None,\n help='the checkpoint file path used to evaluate model.')\n parser.add_argument('--device_target', type=str, default=\"Ascend\", choices=['GPU', 'CPU', 'Ascend'],\n help='the target device to run, support \"GPU\", \"CPU\". Default: \"Ascend\".')\n args = parser.parse_args()\n\n context.set_context(\n mode=context.GRAPH_MODE,\n save_graphs=False,\n device_target=args.device_target)\n\n if args.device_target == 'Ascend':\n cfg = lstm_cfg_ascend\n else:\n cfg = lstm_cfg\n\n if args.preprocess == \"true\":\n print(\"============== Starting Data Pre-processing ==============\")\n convert_to_mindrecord(cfg.embed_size, args.aclimdb_path, args.preprocess_path, args.glove_path)\n\n embedding_table = np.loadtxt(os.path.join(args.preprocess_path, \"weight.txt\")).astype(np.float32)\n # DynamicRNN in this network on Ascend platform only support the condition that the shape of input_size\n # and hiddle_size is multiples of 16, this problem will be solved later.\n if args.device_target == 'Ascend':\n pad_num = int(np.ceil(cfg.embed_size / 16) * 16 - cfg.embed_size)\n if pad_num > 0:\n embedding_table = np.pad(embedding_table, [(0, 0), (0, pad_num)], 'constant')\n cfg.embed_size = int(np.ceil(cfg.embed_size / 16) * 16)\n\n network = SentimentNet(vocab_size=embedding_table.shape[0],\n embed_size=cfg.embed_size,\n num_hiddens=cfg.num_hiddens,\n num_layers=cfg.num_layers,\n bidirectional=cfg.bidirectional,\n num_classes=cfg.num_classes,\n weight=Tensor(embedding_table),\n batch_size=cfg.batch_size)\n\n loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction='mean')\n ds_eval = lstm_create_dataset(args.preprocess_path, cfg.batch_size, training=False)\n if cfg.dynamic_lr:\n lr = Tensor(get_lr(global_step=cfg.global_step,\n lr_init=cfg.lr_init, lr_end=cfg.lr_end, lr_max=cfg.lr_max,\n warmup_epochs=cfg.warmup_epochs,\n total_epochs=cfg.num_epochs,\n steps_per_epoch=ds_eval.get_dataset_size(),\n lr_adjust_epoch=cfg.lr_adjust_epoch))\n else:\n lr = cfg.learning_rate\n\n opt = nn.Momentum(network.trainable_params(), lr, cfg.momentum)\n loss_cb = LossMonitor()\n\n model = Model(network, loss, opt, {'acc': Accuracy()})\n\n print(\"============== Starting Testing ==============\")\n param_dict = load_checkpoint(args.ckpt_path)\n load_param_into_net(network, param_dict)\n if args.device_target == \"CPU\":\n acc = model.eval(ds_eval, dataset_sink_mode=False)\n else:\n acc = model.eval(ds_eval)\n print(\"============== {} ==============\".format(acc))\n"
] | [
[
"numpy.random.choice",
"numpy.expand_dims",
"numpy.random.rand",
"numpy.maximum",
"numpy.concatenate",
"numpy.array",
"numpy.minimum"
],
[
"numpy.pad",
"numpy.ceil"
]
] |
doronbehar/lab4 | [
"90af5a8fd562ba6a35b6ba90611122573e7de485"
] | [
"x2.ESR/ESRB.py"
] | [
"import numpy as np\nimport matplotlib.pyplot as plt\nimport pint\n# Use the same registry\nfrom main import ureg\nureg.setup_matplotlib(True)\nfrom uncertainties import ufloat, umath, unumpy\nimport pandas as pd\nfrom scipy.signal import find_peaks\nfrom scipy.integrate import simpson\nfrom scipy.optimize import curve_fit\nplt.rcParams['text.usetex'] = True\n\namp = 700*ureg.mV\nR=ufloat(0.82, 0.82*0.1)*ureg.ohm\n\ndf = pd.read_csv(\"./ESRB.csv\")\n# The I0_modulation signal is horrible, the system was too noisy, so instead:\n#\n# I0_modulation = (unumpy.uarray(\n # df['V_modulation_raw'].values,\n # df['V_modulation_err'].values\n# )*ureg.mV/R).to('ampere')\n#\n# we regnerate it, assuming it should be linear, just as V_DC is.\nI0_modulation = (unumpy.uarray(np.linspace(\n df['V_modulation_raw'].min(),\n df['V_modulation_raw'].max(),\n len(df)\n), df['V_modulation_err'].mean())*ureg.mV/R).to('ampere')\n\nptp_Y = unumpy.uarray(\n df['ptp_Y_raw'].values*df['phase_sign'].values,\n df['ptp_Y_err'].values\n)*ureg.mV\nptp_X_modulation = ufloat(3.09, 0.01)*ureg.mV\n\nfig, ax = plt.subplots()\nI0_modulation_err = np.array([val.m.s for val in I0_modulation])\nI0_modulation_raw = np.array([val.m.n for val in I0_modulation])\nptp_ratio = ptp_Y/ptp_X_modulation\nabsorption_deriviative = ptp_ratio/max(ptp_ratio)\nabsorption_deriviative_raw = np.array([val.m.n for val in absorption_deriviative])\nabsorption_deriviative_err = np.array([val.m.s for val in absorption_deriviative])\nax.errorbar(\n I0_modulation_raw*ureg.ampere,\n absorption_deriviative_raw, # Dimensionless\n fmt='.',\n yerr=absorption_deriviative_err,\n # TODO: Mention in report that error is too big to be drafted\n #xerr=I_modulation_err,\n # TODO: Is this the correct label?\n label='Absorption Deriviative'\n)\n\ndef lorentzian_dif_fit(I, I0, gamma, amplitude):\n return amplitude*(-2*(gamma**2)*(I - I0))/ \\\n (gamma**2 + (I - I0)**2)**2\ndef lorentzian_fit(I, I0, gamma, amplitude):\n return amplitude*gamma**2/\\\n (gamma**2 + (I - I0)**2)**2\n##### By MATLAB:\n# Goodness of fit:\n# SSE: 0.197\n# R-square: 0.9845\n# Adjusted R-square: 0.9838\n# RMSE: 0.06769\n# I0 gamma amplitude\nmatlab_p0 = [0.5479, 0.03847, 0.05554]\nmatlab_bounds=((0.547, 0.03672, 0.05304),\n (0.5488, 0.04021, 0.05805))\nI_rf = ufloat(matlab_p0[0], abs(matlab_bounds[0][0] - matlab_p0[0]))*ureg.ampere\nI_hwhm = ufloat(matlab_p0[1], abs(matlab_bounds[0][1] - matlab_p0[1]))*ureg.ampere\n\nfrom main import g_times_bohr\n# TODO: Take this value from Itamar & Tomer\nH_RF = ufloat(34.914, 0.009)*ureg.gauss\nk = H_RF/I_rf\n# Converts current I To frequency f using all of the constants\ndef I2f(I):\n return (I*k*g_times_bohr/ureg.planck_constant).to('megahertz')\n\nf0_modulation = I2f(I0_modulation)\nf_rf = I2f(I_rf)\nf_hwhm = I2f(I_hwhm)\nT2 = (1/f_hwhm).to('nanosecond')\n\n##### A failing Python fit attempt - I consider it as a failure because it hits\n##### the bounds :/\n# popt, pcov = curve_fit(\n # lorentzian_dif_fit, absorption_deriviative_raw, I0_modulation_raw,\n # p0=matlab_p0, bounds=matlab_bounds\n# )\n# lorentzian_dif_fit_points = lorentzian_dif_fit(I0_modulation_raw, *popt)\n# ax.plot(\n # I0_modulation_raw*ureg.ampere,\n # lorentzian_dif_fit_points,\n # label=\"Python fit\"\n# )\n\nI0_modulation_seq = np.linspace(\n I0_modulation.min().m.n,\n I0_modulation.max().m.n,\n len(I0_modulation)*100\n)\nax.plot(\n I0_modulation_seq*ureg.ampere,\n lorentzian_dif_fit(I0_modulation_seq, I_rf.m.n, I_hwhm.m.n, matlab_p0[2]),\n label=\"Matlab fit\"\n)\nax.set_yticks([])\naxt = ax.twiny()\naxt.grid(linestyle='--')\naxt.set_yticks([])\nf0_modulation_seq = np.linspace(\n f0_modulation.min().m.n,\n f0_modulation.max().m.n,\n len(f0_modulation)*100\n)\ndef lorentzian_wrapper(f0):\n # From some reason this need to be amplified by a factor of 800 so it will\n # look good.\n return lorentzian_fit(f0, f_rf.m.n, f_hwhm.m.n, matlab_p0[2]*800)\naxt.plot(\n f0_modulation_seq*ureg.megahertz,\n lorentzian_wrapper(f0_modulation_seq),\n label = \"Lorenzian fit\", color='green'\n)\naxt.set_xticks(\n [(f_rf - f_hwhm).m.n, f_rf.m.n, (f_rf + f_hwhm).m.n],\n ['', '$f_{rf}$', '']\n)\naxt.set_xlabel('')\naxt.arrow(\n length_includes_head = True,\n x = (f_rf - f_hwhm).m.n*ureg.megahertz,\n y = lorentzian_wrapper((f_rf - f_hwhm).m.n),\n dx = 2*f_hwhm.m.n*ureg.megahertz,\n dy = 0,\n head_length = f_hwhm.m.n/10,\n head_width = matlab_p0[2],\n label=\"Full Width Half Max\",\n)\naxt.arrow(\n length_includes_head = True,\n x = (f_rf + f_hwhm).m.n*ureg.megahertz,\n y = lorentzian_wrapper((f_rf + f_hwhm).m.n),\n dx = -2*f_hwhm.m.n*ureg.megahertz,\n head_length = f_hwhm.m.n/10,\n head_width = matlab_p0[2],\n dy = 0,\n)\naxt.text(\n 0.5, 0.63,\n # (f_hwhm.m.n/10),\n # lorentzian_wrapper((f0 - f_hwhm).m.n)*2,\n \"FWHM\",\n transform=ax.transAxes,\n # fontsize=00\n)\nax.legend(loc='upper right')\n# axt.legend(loc='upper left')\nplt.show()\nfig.savefig(\"ESRB.pgf\")\nfig.savefig(\"ESRB.png\")\n\n# TODO: Integrate numerically / or fit to a laurenzian's differentiation\n\n# TODO: Scale the x axis to frequency and find the width of the laurenzian in\n# frequency scale\n"
] | [
[
"pandas.read_csv",
"matplotlib.pyplot.show",
"numpy.array",
"matplotlib.pyplot.subplots"
]
] |
ayushkarnawat/profit | [
"f3c4d601078b52513af6832c3faf75ddafc59ac5"
] | [
"examples/gb1/train_oracle.py"
] | [
"\"\"\"Train (basic) densely-connected oracle.\"\"\"\n\nimport os\nimport time\nimport multiprocessing as mp\n\nimport pandas as pd\n\nimport torch\nfrom torch import optim\nfrom torch.utils.data import DataLoader, Subset, TensorDataset, WeightedRandomSampler\n\nfrom profit.dataset.splitters import split_method_dict\nfrom profit.models.torch import SequenceOracle\nfrom profit.utils.data_utils.tokenizers import AminoAcidTokenizer\nfrom profit.utils.training_utils.torch import losses as L\nfrom profit.utils.training_utils.torch.callbacks import ModelCheckpoint\nfrom profit.utils.training_utils.torch.callbacks import EarlyStopping\n\nfrom examples.gb1.data import load_dataset\n\n\ntimestep = time.strftime(\"%Y-%b-%d-%H:%M:%S\", time.gmtime())\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\ntensor = torch.cuda.FloatTensor if torch.cuda.is_available() else torch.Tensor\nsplits = [\"train\", \"valid\"]\n\n# Preprocess + load the dataset\ndataset = load_dataset(\"lstm\", \"primary\", labels=\"Fitness\", num_data=-1,\n filetype=\"mdb\", as_numpy=False, vocab=\"aa20\")\n# Stratify train/val/test sets s.t. the target labels are equally represented in\n# each subset. Each subset will have the same ratio of low/mid/high variants in\n# each batch as the full dataset. See: https://discuss.pytorch.org/t/29907/2\n_dataset = dataset[:][\"arr_0\"]\n_labels = dataset[:][\"arr_1\"].view(-1)\n# # Remove samples below a certain threshold\n# high_idx = torch.where(_labels > _labels.mean())\n# dataset = Subset(dataset, sorted(high_idx))\n# _dataset = _dataset[high_idx]\n# _labels = _labels[high_idx]\n\n# Compute sample weights (each sample should get its own weight)\ndef sampler(labels: torch.Tensor,\n nbins: int = 10,\n stratify: bool = False) -> WeightedRandomSampler:\n discretize = pd.qcut if stratify else pd.cut\n bin_labels = torch.LongTensor(discretize(labels.tolist(), nbins,\n labels=False, duplicates=\"drop\"))\n class_sample_count = torch.LongTensor(\n [(bin_labels == t).sum() for t in torch.arange(nbins)])\n weight = 1. / class_sample_count.float()\n sample_weights = torch.zeros_like(labels)\n for t in torch.unique(bin_labels):\n sample_weights[bin_labels == t] = weight[t]\n return WeightedRandomSampler(sample_weights, len(sample_weights))\n\n# Compute sample weights and add to original dataset\nweights = sampler(_labels, nbins=10, stratify=False).weights.type(torch.float)\ndataset = TensorDataset(*dataset[:].values(), weights)\n\n# Create subset indicies\nsubset_idx = split_method_dict[\"stratified\"]().train_valid_test_split(\n dataset=_dataset, labels=_labels.tolist(), frac_train=0.9,\n frac_valid=0.1, frac_test=0.0, return_idxs=True, n_bins=10)\nstratified = {split: Subset(dataset, sorted(idx))\n for split, idx in zip(splits, subset_idx)}\n\n# Create stratified sampler (only needed for training)\ntrain_sampler = sampler(stratified[\"train\"][:][1].view(-1), stratify=True)\n\n# Initialize model\ntokenizer = AminoAcidTokenizer(\"aa20\")\nvocab_size = tokenizer.vocab_size\nseqlen = stratified[\"train\"][0][0].size(0)\nmodel = SequenceOracle(seqlen, vocab_size, hidden_size=50, out_size=2)\n\n# Initialize callbacks\n# NOTE: Must set model (within save_clbk) to ensure weights get saved\nstop_clbk = EarlyStopping(patience=5, verbose=1)\nsave_clbk = ModelCheckpoint(os.path.join(\"bin/3gb1/oracle\", timestep),\n monitor=\"val_loss\",\n verbose=1,\n save_weights_only=True)\nsave_clbk.set_model(model)\n\n# Initialize callbacks\noptimizer = optim.AdamW(model.parameters(), lr=1e-3)\n\nepochs = 50\nfor epoch in range(1, epochs+1):\n for split in splits:\n summed_loss = 0\n data_loader = DataLoader(\n dataset=stratified[split],\n batch_size=32,\n sampler=train_sampler if split == \"train\" else None,\n num_workers=mp.cpu_count(),\n pin_memory=torch.cuda.is_available()\n )\n\n # Enable/disable dropout\n model.train() if split == \"train\" else model.eval()\n\n for it, batch in enumerate(data_loader):\n data = batch[0].long().to(device)\n target = batch[1].to(device)\n sample_weight = batch[2].to(device)\n # One-hot encode (see: https://discuss.pytorch.org/t/507/34)\n batch_size, seqlen = data.size()\n onehot = torch.zeros(batch_size, seqlen, vocab_size)\n onehot.scatter_(2, torch.unsqueeze(data, 2), 1)\n\n # Forward pass\n pred = model(onehot)\n # Loss calculation\n nll_loss = L.gaussian_nll_loss(pred, target, reduction=\"none\")\n # Reweight nll_loss w/ sample weights\n nll_loss = (nll_loss * sample_weight).sum()\n summed_loss += nll_loss.item()\n loss = nll_loss / batch_size\n # Compute gradients and update params/weights\n if split == \"train\":\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n # Bookkeeping (batch)\n if it % 5 == 0 or it+1 == len(data_loader):\n print(\"{} Batch {:04d}/{:d} ({:.2f}%)\\tLoss: {:.4f}\".format(\n split.upper(), it+1, len(data_loader),\n 100. * ((it+1)/len(data_loader)), loss.item()))\n\n # Bookkeeping (epoch)\n avg_loss = summed_loss / len(data_loader.dataset)\n print(\"{} Epoch {}/{}, Average NLL loss: {:.4f}\".format(\n split.upper(), epoch, epochs, avg_loss))\n\n # Stop training (based off val loss) and save (top k) ckpts\n if split == \"valid\":\n save_clbk.on_epoch_end(epoch, logs={\"val_loss\": avg_loss})\n should_stop = stop_clbk.on_epoch_end(epoch, logs={\"val_loss\": avg_loss})\n if should_stop:\n break\n else:\n continue\n break\n"
] | [
[
"torch.unsqueeze",
"torch.zeros_like",
"torch.cuda.is_available",
"torch.arange",
"torch.unique",
"torch.zeros"
]
] |
Air-Factories-2-0/af2-hyperledger | [
"7aeeb831cf03fdf7fe64f9500da17c02688a0886"
] | [
"scripts/printingValidation/venv/lib/python3.9/site-packages/skimage/morphology/tests/test_max_tree.py"
] | [
"import numpy as np\nfrom skimage.morphology import max_tree, area_closing, area_opening\nfrom skimage.morphology import max_tree_local_maxima, diameter_opening\nfrom skimage.morphology import diameter_closing\nfrom skimage.util import invert\n\nfrom skimage._shared.testing import assert_array_equal, TestCase\n\neps = 1e-12\n\n\ndef _full_type_test(img, param, expected, func, param_scale=False,\n **keywords):\n\n # images as they are\n out = func(img, param, **keywords)\n assert_array_equal(out, expected)\n\n # unsigned int\n for dt in [np.uint32, np.uint64]:\n img_cast = img.astype(dt)\n out = func(img_cast, param, **keywords)\n exp_cast = expected.astype(dt)\n assert_array_equal(out, exp_cast)\n\n # float\n data_float = img.astype(np.float64)\n data_float = data_float / 255.0\n expected_float = expected.astype(np.float64)\n expected_float = expected_float / 255.0\n if param_scale:\n param_cast = param / 255.0\n else:\n param_cast = param\n for dt in [np.float32, np.float64]:\n data_cast = data_float.astype(dt)\n out = func(data_cast, param_cast, **keywords)\n exp_cast = expected_float.astype(dt)\n error_img = 255.0 * exp_cast - 255.0 * out\n error = (error_img >= 1.0).sum()\n assert error < eps\n\n # signed images\n img_signed = img.astype(np.int16)\n img_signed = img_signed - 128\n exp_signed = expected.astype(np.int16)\n exp_signed = exp_signed - 128\n for dt in [np.int8, np.int16, np.int32, np.int64]:\n img_s = img_signed.astype(dt)\n out = func(img_s, param, **keywords)\n exp_s = exp_signed.astype(dt)\n assert_array_equal(out, exp_s)\n\n\nclass TestMaxtree(TestCase):\n\n def test_max_tree(self):\n \"Test for max tree\"\n img_type = np.uint8\n img = np.array([[10, 8, 8, 9],\n [7, 7, 9, 9],\n [8, 7, 10, 10],\n [9, 9, 10, 10]], dtype=img_type)\n\n P_exp = np.array([[1, 4, 1, 1],\n [4, 4, 3, 3],\n [1, 4, 3, 10],\n [3, 3, 10, 10]], dtype=np.int64)\n\n S_exp = np.array([4, 5, 9, 1, 2, 8, 3, 6, 7,\n 12, 13, 0, 10, 11, 14, 15],\n dtype=np.int64)\n\n for img_type in [np.uint8, np.uint16, np.uint32, np.uint64]:\n img = img.astype(img_type)\n P, S = max_tree(img, connectivity=2)\n assert_array_equal(P, P_exp)\n assert_array_equal(S, S_exp)\n\n for img_type in [np.int8, np.int16, np.int32, np.int64]:\n img = img.astype(img_type)\n img_shifted = img - 9\n P, S = max_tree(img_shifted, connectivity=2)\n assert_array_equal(P, P_exp)\n assert_array_equal(S, S_exp)\n\n img_float = img.astype(float)\n img_float = (img_float - 8) / 2.0\n for img_type in [np.float32, np.float64]:\n img_float = img_float.astype(img_type)\n P, S = max_tree(img_float, connectivity=2)\n assert_array_equal(P, P_exp)\n assert_array_equal(S, S_exp)\n\n return\n\n def test_area_closing(self):\n \"Test for Area Closing (2 thresholds, all types)\"\n\n # original image\n img = np.array(\n [[240, 240, 240, 240, 240, 240, 240, 240, 240, 240, 240, 240],\n [240, 200, 200, 240, 200, 240, 200, 200, 240, 240, 200, 240],\n [240, 200, 40, 240, 240, 240, 240, 240, 240, 240, 40, 240],\n [240, 240, 240, 240, 100, 240, 100, 100, 240, 240, 200, 240],\n [240, 240, 240, 240, 240, 240, 240, 240, 240, 240, 240, 240],\n [200, 200, 200, 200, 200, 200, 200, 240, 200, 200, 255, 255],\n [200, 255, 200, 200, 200, 255, 200, 240, 255, 255, 255, 40],\n [200, 200, 200, 100, 200, 200, 200, 240, 255, 255, 255, 255],\n [200, 200, 200, 100, 200, 200, 200, 240, 200, 200, 255, 255],\n [200, 200, 200, 200, 200, 40, 200, 240, 240, 100, 255, 255],\n [200, 40, 255, 255, 255, 40, 200, 255, 200, 200, 255, 255],\n [200, 200, 200, 200, 200, 200, 200, 255, 255, 255, 255, 255]],\n dtype=np.uint8)\n\n # expected area closing with area 2\n expected_2 = np.array(\n [[240, 240, 240, 240, 240, 240, 240, 240, 240, 240, 240, 240],\n [240, 200, 200, 240, 240, 240, 200, 200, 240, 240, 200, 240],\n [240, 200, 200, 240, 240, 240, 240, 240, 240, 240, 200, 240],\n [240, 240, 240, 240, 240, 240, 100, 100, 240, 240, 200, 240],\n [240, 240, 240, 240, 240, 240, 240, 240, 240, 240, 240, 240],\n [200, 200, 200, 200, 200, 200, 200, 240, 200, 200, 255, 255],\n [200, 255, 200, 200, 200, 255, 200, 240, 255, 255, 255, 255],\n [200, 200, 200, 100, 200, 200, 200, 240, 255, 255, 255, 255],\n [200, 200, 200, 100, 200, 200, 200, 240, 200, 200, 255, 255],\n [200, 200, 200, 200, 200, 40, 200, 240, 240, 200, 255, 255],\n [200, 200, 255, 255, 255, 40, 200, 255, 200, 200, 255, 255],\n [200, 200, 200, 200, 200, 200, 200, 255, 255, 255, 255, 255]],\n dtype=np.uint8)\n\n # expected diameter closing with diameter 4\n expected_4 = np.array(\n [[240, 240, 240, 240, 240, 240, 240, 240, 240, 240, 240, 240],\n [240, 200, 200, 240, 240, 240, 240, 240, 240, 240, 240, 240],\n [240, 200, 200, 240, 240, 240, 240, 240, 240, 240, 240, 240],\n [240, 240, 240, 240, 240, 240, 240, 240, 240, 240, 240, 240],\n [240, 240, 240, 240, 240, 240, 240, 240, 240, 240, 240, 240],\n [200, 200, 200, 200, 200, 200, 200, 240, 240, 240, 255, 255],\n [200, 255, 200, 200, 200, 255, 200, 240, 255, 255, 255, 255],\n [200, 200, 200, 200, 200, 200, 200, 240, 255, 255, 255, 255],\n [200, 200, 200, 200, 200, 200, 200, 240, 200, 200, 255, 255],\n [200, 200, 200, 200, 200, 200, 200, 240, 240, 200, 255, 255],\n [200, 200, 255, 255, 255, 200, 200, 255, 200, 200, 255, 255],\n [200, 200, 200, 200, 200, 200, 200, 255, 255, 255, 255, 255]],\n dtype=np.uint8)\n\n # _full_type_test makes a test with many image types.\n _full_type_test(img, 2, expected_2, area_closing, connectivity=2)\n _full_type_test(img, 4, expected_4, area_closing, connectivity=2)\n\n P, S = max_tree(invert(img), connectivity=2)\n _full_type_test(img, 4, expected_4, area_closing,\n parent=P, tree_traverser=S)\n\n def test_area_opening(self):\n \"Test for Area Opening (2 thresholds, all types)\"\n\n # original image\n img = np.array([[15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15],\n [15, 55, 55, 15, 55, 15, 55, 55, 15, 15, 55, 15],\n [15, 55, 215, 15, 15, 15, 15, 15, 15, 15, 215, 15],\n [15, 15, 15, 15, 155, 15, 155, 155, 15, 15, 55, 15],\n [15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15],\n [55, 55, 55, 55, 55, 55, 55, 15, 55, 55, 0, 0],\n [55, 0, 55, 55, 55, 0, 55, 15, 0, 0, 0, 215],\n [55, 55, 55, 155, 55, 55, 55, 15, 0, 0, 0, 0],\n [55, 55, 55, 155, 55, 55, 55, 15, 55, 55, 0, 0],\n [55, 55, 55, 55, 55, 215, 55, 15, 15, 155, 0, 0],\n [55, 215, 0, 0, 0, 215, 55, 0, 55, 55, 0, 0],\n [55, 55, 55, 55, 55, 55, 55, 0, 0, 0, 0, 0]],\n dtype=np.uint8)\n\n # expected area closing with area 2\n expected_2 = np.array([[15, 15, 15, 15, 15, 15, 15, 15, 15,\n 15, 15, 15],\n [15, 55, 55, 15, 15, 15, 55, 55, 15,\n 15, 55, 15],\n [15, 55, 55, 15, 15, 15, 15, 15, 15,\n 15, 55, 15],\n [15, 15, 15, 15, 15, 15, 155, 155, 15,\n 15, 55, 15],\n [15, 15, 15, 15, 15, 15, 15, 15, 15,\n 15, 15, 15],\n [55, 55, 55, 55, 55, 55, 55, 15, 55,\n 55, 0, 0],\n [55, 0, 55, 55, 55, 0, 55, 15, 0,\n 0, 0, 0],\n [55, 55, 55, 155, 55, 55, 55, 15, 0,\n 0, 0, 0],\n [55, 55, 55, 155, 55, 55, 55, 15, 55,\n 55, 0, 0],\n [55, 55, 55, 55, 55, 215, 55, 15, 15,\n 55, 0, 0],\n [55, 55, 0, 0, 0, 215, 55, 0, 55,\n 55, 0, 0],\n [55, 55, 55, 55, 55, 55, 55, 0, 0,\n 0, 0, 0]],\n dtype=np.uint8)\n\n # expected diameter closing with diameter 4\n expected_4 = np.array([[15, 15, 15, 15, 15, 15, 15, 15, 15,\n 15, 15, 15],\n [15, 55, 55, 15, 15, 15, 15, 15, 15,\n 15, 15, 15],\n [15, 55, 55, 15, 15, 15, 15, 15, 15,\n 15, 15, 15],\n [15, 15, 15, 15, 15, 15, 15, 15, 15,\n 15, 15, 15],\n [15, 15, 15, 15, 15, 15, 15, 15, 15,\n 15, 15, 15],\n [55, 55, 55, 55, 55, 55, 55, 15, 15,\n 15, 0, 0],\n [55, 0, 55, 55, 55, 0, 55, 15, 0,\n 0, 0, 0],\n [55, 55, 55, 55, 55, 55, 55, 15, 0,\n 0, 0, 0],\n [55, 55, 55, 55, 55, 55, 55, 15, 55,\n 55, 0, 0],\n [55, 55, 55, 55, 55, 55, 55, 15, 15,\n 55, 0, 0],\n [55, 55, 0, 0, 0, 55, 55, 0, 55,\n 55, 0, 0],\n [55, 55, 55, 55, 55, 55, 55, 0, 0,\n 0, 0, 0]],\n dtype=np.uint8)\n\n # _full_type_test makes a test with many image types.\n _full_type_test(img, 2, expected_2, area_opening, connectivity=2)\n _full_type_test(img, 4, expected_4, area_opening, connectivity=2)\n\n P, S = max_tree(img, connectivity=2)\n _full_type_test(img, 4, expected_4, area_opening,\n parent=P, tree_traverser=S)\n\n def test_diameter_closing(self):\n \"Test for Diameter Opening (2 thresholds, all types)\"\n img = np.array([[97, 95, 93, 92, 91, 90, 90, 90, 91, 92, 93, 95],\n [95, 93, 91, 89, 88, 88, 88, 88, 88, 89, 91, 93],\n [93, 63, 63, 63, 63, 86, 86, 86, 87, 43, 43, 91],\n [92, 89, 88, 86, 85, 85, 84, 85, 85, 43, 43, 89],\n [91, 88, 87, 85, 84, 84, 83, 84, 84, 85, 87, 88],\n [90, 88, 86, 85, 84, 83, 83, 83, 84, 85, 86, 88],\n [90, 88, 86, 84, 83, 83, 82, 83, 83, 84, 86, 88],\n [90, 88, 86, 85, 84, 83, 83, 83, 84, 85, 86, 88],\n [91, 88, 87, 85, 84, 84, 83, 84, 84, 85, 87, 88],\n [92, 89, 23, 23, 85, 85, 84, 85, 85, 3, 3, 89],\n [93, 91, 23, 23, 87, 86, 86, 86, 87, 88, 3, 91],\n [95, 93, 91, 89, 88, 88, 88, 88, 88, 89, 91, 93]],\n dtype=np.uint8)\n\n ex2 = np.array([[97, 95, 93, 92, 91, 90, 90, 90, 91, 92, 93, 95],\n [95, 93, 91, 89, 88, 88, 88, 88, 88, 89, 91, 93],\n [93, 63, 63, 63, 63, 86, 86, 86, 87, 43, 43, 91],\n [92, 89, 88, 86, 85, 85, 84, 85, 85, 43, 43, 89],\n [91, 88, 87, 85, 84, 84, 83, 84, 84, 85, 87, 88],\n [90, 88, 86, 85, 84, 83, 83, 83, 84, 85, 86, 88],\n [90, 88, 86, 84, 83, 83, 83, 83, 83, 84, 86, 88],\n [90, 88, 86, 85, 84, 83, 83, 83, 84, 85, 86, 88],\n [91, 88, 87, 85, 84, 84, 83, 84, 84, 85, 87, 88],\n [92, 89, 23, 23, 85, 85, 84, 85, 85, 3, 3, 89],\n [93, 91, 23, 23, 87, 86, 86, 86, 87, 88, 3, 91],\n [95, 93, 91, 89, 88, 88, 88, 88, 88, 89, 91, 93]],\n dtype=np.uint8)\n\n ex4 = np.array([[97, 95, 93, 92, 91, 90, 90, 90, 91, 92, 93, 95],\n [95, 93, 91, 89, 88, 88, 88, 88, 88, 89, 91, 93],\n [93, 63, 63, 63, 63, 86, 86, 86, 87, 84, 84, 91],\n [92, 89, 88, 86, 85, 85, 84, 85, 85, 84, 84, 89],\n [91, 88, 87, 85, 84, 84, 83, 84, 84, 85, 87, 88],\n [90, 88, 86, 85, 84, 83, 83, 83, 84, 85, 86, 88],\n [90, 88, 86, 84, 83, 83, 83, 83, 83, 84, 86, 88],\n [90, 88, 86, 85, 84, 83, 83, 83, 84, 85, 86, 88],\n [91, 88, 87, 85, 84, 84, 83, 84, 84, 85, 87, 88],\n [92, 89, 84, 84, 85, 85, 84, 85, 85, 84, 84, 89],\n [93, 91, 84, 84, 87, 86, 86, 86, 87, 88, 84, 91],\n [95, 93, 91, 89, 88, 88, 88, 88, 88, 89, 91, 93]],\n dtype=np.uint8)\n\n # _full_type_test makes a test with many image types.\n _full_type_test(img, 2, ex2, diameter_closing, connectivity=2)\n _full_type_test(img, 4, ex4, diameter_closing, connectivity=2)\n\n P, S = max_tree(invert(img), connectivity=2)\n _full_type_test(img, 4, ex4, diameter_opening,\n parent=P, tree_traverser=S)\n\n def test_diameter_opening(self):\n \"Test for Diameter Opening (2 thresholds, all types)\"\n img = np.array([[5, 7, 9, 11, 12, 12, 12, 12, 12, 11, 9, 7],\n [7, 10, 11, 13, 14, 14, 15, 14, 14, 13, 11, 10],\n [9, 40, 40, 40, 40, 16, 16, 16, 16, 60, 60, 11],\n [11, 13, 15, 16, 17, 18, 18, 18, 17, 60, 60, 13],\n [12, 14, 16, 17, 18, 19, 19, 19, 18, 17, 16, 14],\n [12, 14, 16, 18, 19, 19, 19, 19, 19, 18, 16, 14],\n [12, 15, 16, 18, 19, 19, 20, 19, 19, 18, 16, 15],\n [12, 14, 16, 18, 19, 19, 19, 19, 19, 18, 16, 14],\n [12, 14, 16, 17, 18, 19, 19, 19, 18, 17, 16, 14],\n [11, 13, 80, 80, 17, 18, 18, 18, 17, 100, 100, 13],\n [9, 11, 80, 80, 16, 16, 16, 16, 16, 15, 100, 11],\n [7, 10, 11, 13, 14, 14, 15, 14, 14, 13, 11, 10]])\n\n ex2 = np.array([[5, 7, 9, 11, 12, 12, 12, 12, 12, 11, 9, 7],\n [7, 10, 11, 13, 14, 14, 15, 14, 14, 13, 11, 10],\n [9, 40, 40, 40, 40, 16, 16, 16, 16, 60, 60, 11],\n [11, 13, 15, 16, 17, 18, 18, 18, 17, 60, 60, 13],\n [12, 14, 16, 17, 18, 19, 19, 19, 18, 17, 16, 14],\n [12, 14, 16, 18, 19, 19, 19, 19, 19, 18, 16, 14],\n [12, 15, 16, 18, 19, 19, 19, 19, 19, 18, 16, 15],\n [12, 14, 16, 18, 19, 19, 19, 19, 19, 18, 16, 14],\n [12, 14, 16, 17, 18, 19, 19, 19, 18, 17, 16, 14],\n [11, 13, 80, 80, 17, 18, 18, 18, 17, 100, 100, 13],\n [9, 11, 80, 80, 16, 16, 16, 16, 16, 15, 100, 11],\n [7, 10, 11, 13, 14, 14, 15, 14, 14, 13, 11, 10]])\n\n ex4 = np.array([[5, 7, 9, 11, 12, 12, 12, 12, 12, 11, 9, 7],\n [7, 10, 11, 13, 14, 14, 15, 14, 14, 13, 11, 10],\n [9, 40, 40, 40, 40, 16, 16, 16, 16, 18, 18, 11],\n [11, 13, 15, 16, 17, 18, 18, 18, 17, 18, 18, 13],\n [12, 14, 16, 17, 18, 19, 19, 19, 18, 17, 16, 14],\n [12, 14, 16, 18, 19, 19, 19, 19, 19, 18, 16, 14],\n [12, 15, 16, 18, 19, 19, 19, 19, 19, 18, 16, 15],\n [12, 14, 16, 18, 19, 19, 19, 19, 19, 18, 16, 14],\n [12, 14, 16, 17, 18, 19, 19, 19, 18, 17, 16, 14],\n [11, 13, 18, 18, 17, 18, 18, 18, 17, 18, 18, 13],\n [9, 11, 18, 18, 16, 16, 16, 16, 16, 15, 18, 11],\n [7, 10, 11, 13, 14, 14, 15, 14, 14, 13, 11, 10]])\n\n # _full_type_test makes a test with many image types.\n _full_type_test(img, 2, ex2, diameter_opening, connectivity=2)\n _full_type_test(img, 4, ex4, diameter_opening, connectivity=2)\n\n P, S = max_tree(img, connectivity=2)\n _full_type_test(img, 4, ex4, diameter_opening,\n parent=P, tree_traverser=S)\n\n def test_local_maxima(self):\n \"local maxima for various data types\"\n data = np.array([[10, 11, 13, 14, 14, 15, 14, 14, 13, 11],\n [11, 13, 15, 16, 16, 16, 16, 16, 15, 13],\n [13, 15, 40, 40, 18, 18, 18, 60, 60, 15],\n [14, 16, 40, 40, 19, 19, 19, 60, 60, 16],\n [14, 16, 18, 19, 19, 19, 19, 19, 18, 16],\n [15, 16, 18, 19, 19, 20, 19, 19, 18, 16],\n [14, 16, 18, 19, 19, 19, 19, 19, 18, 16],\n [14, 16, 80, 80, 19, 19, 19, 100, 100, 16],\n [13, 15, 80, 80, 18, 18, 18, 100, 100, 15],\n [11, 13, 15, 16, 16, 16, 16, 16, 15, 13]],\n dtype=np.uint8)\n expected_result = np.array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 1, 1, 0, 0, 0, 1, 1, 0],\n [0, 0, 1, 1, 0, 0, 0, 1, 1, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 1, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 1, 1, 0, 0, 0, 1, 1, 0],\n [0, 0, 1, 1, 0, 0, 0, 1, 1, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]],\n dtype=np.uint64)\n for dtype in [np.uint8, np.uint64, np.int8, np.int64]:\n\n test_data = data.astype(dtype)\n out = max_tree_local_maxima(test_data, connectivity=1)\n out_bin = out > 0\n assert_array_equal(expected_result, out_bin)\n assert out.dtype == expected_result.dtype\n assert np.max(out) == 5\n\n P, S = max_tree(test_data)\n out = max_tree_local_maxima(test_data,\n parent=P,\n tree_traverser=S)\n\n assert_array_equal(expected_result, out_bin)\n\n assert out.dtype == expected_result.dtype\n assert np.max(out) == 5\n\n def test_extrema_float(self):\n \"specific tests for float type\"\n data = np.array([[0.10, 0.11, 0.13, 0.14, 0.14, 0.15, 0.14,\n 0.14, 0.13, 0.11],\n [0.11, 0.13, 0.15, 0.16, 0.16, 0.16, 0.16,\n 0.16, 0.15, 0.13],\n [0.13, 0.15, 0.40, 0.40, 0.18, 0.18, 0.18,\n 0.60, 0.60, 0.15],\n [0.14, 0.16, 0.40, 0.40, 0.19, 0.19, 0.19,\n 0.60, 0.60, 0.16],\n [0.14, 0.16, 0.18, 0.19, 0.19, 0.19, 0.19,\n 0.19, 0.18, 0.16],\n [0.15, 0.182, 0.18, 0.19, 0.204, 0.20, 0.19,\n 0.19, 0.18, 0.16],\n [0.14, 0.16, 0.18, 0.19, 0.19, 0.19, 0.19,\n 0.19, 0.18, 0.16],\n [0.14, 0.16, 0.80, 0.80, 0.19, 0.19, 0.19,\n 4.0, 1.0, 0.16],\n [0.13, 0.15, 0.80, 0.80, 0.18, 0.18, 0.18,\n 1.0, 1.0, 0.15],\n [0.11, 0.13, 0.15, 0.16, 0.16, 0.16, 0.16,\n 0.16, 0.15, 0.13]],\n dtype=np.float32)\n\n expected_result = np.array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 1, 1, 0, 0, 0, 1, 1, 0],\n [0, 0, 1, 1, 0, 0, 0, 1, 1, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 1, 0, 0, 1, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 1, 1, 0, 0, 0, 1, 0, 0],\n [0, 0, 1, 1, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]],\n dtype=np.uint8)\n\n # test for local maxima\n out = max_tree_local_maxima(data, connectivity=1)\n out_bin = out > 0\n assert_array_equal(expected_result, out_bin)\n assert np.max(out) == 6\n\n def test_3d(self):\n \"\"\"tests the detection of maxima in 3D.\"\"\"\n img = np.zeros((8, 8, 8), dtype=np.uint8)\n local_maxima = np.zeros((8, 8, 8), dtype=np.uint64)\n\n # first maximum: only one pixel\n img[1, 1:3, 1:3] = 100\n img[2, 2, 2] = 200\n img[3, 1:3, 1:3] = 100\n local_maxima[2, 2, 2] = 1\n\n # second maximum: three pixels in z-direction\n img[5:8, 1, 1] = 200\n local_maxima[5:8, 1, 1] = 1\n\n # third: two maxima in 0 and 3.\n img[0, 5:8, 5:8] = 200\n img[1, 6, 6] = 100\n img[2, 5:7, 5:7] = 200\n img[0:3, 5:8, 5:8] += 50\n local_maxima[0, 5:8, 5:8] = 1\n local_maxima[2, 5:7, 5:7] = 1\n\n # four : one maximum in the corner of the square\n img[6:8, 6:8, 6:8] = 200\n img[7, 7, 7] = 255\n local_maxima[7, 7, 7] = 1\n\n out = max_tree_local_maxima(img)\n out_bin = out > 0\n assert_array_equal(local_maxima, out_bin)\n assert np.max(out) == 5\n"
] | [
[
"numpy.array",
"numpy.max",
"numpy.zeros"
]
] |
hw07216/imaginaire | [
"d82c87aced50afd44fd162491ba5b59056b74034"
] | [
"imaginaire/losses/feature_matching.py"
] | [
"# Copyright (C) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.\n#\n# This work is made available under the Nvidia Source Code License-NC.\n# To view a copy of this license, check out LICENSE.md\nimport torch.nn as nn\n\n\nclass FeatureMatchingLoss(nn.Module):\n r\"\"\"Compute feature matching loss\"\"\"\n def __init__(self, criterion='l1'):\n super(FeatureMatchingLoss, self).__init__()\n if criterion == 'l1':\n self.criterion = nn.L1Loss()\n elif criterion == 'l2' or criterion == 'mse':\n self.criterion = nn.MSELoss()\n else:\n raise ValueError('Criterion %s is not recognized' % criterion)\n\n def forward(self, fake_features, real_features):\n r\"\"\"Return the target vector for the binary cross entropy loss\n computation.\n\n Args:\n fake_features (list of lists): Discriminator features of fake images.\n real_features (list of lists): Discriminator features of real images.\n\n Returns:\n (tensor): Loss value.\n \"\"\"\n num_d = len(fake_features)\n dis_weight = 1.0 / num_d\n loss = fake_features[0][0].new_tensor(0)\n for i in range(num_d):\n for j in range(len(fake_features[i])):\n tmp_loss = self.criterion(fake_features[i][j],\n real_features[i][j].detach())\n loss += dis_weight * tmp_loss\n return loss\n"
] | [
[
"torch.nn.L1Loss",
"torch.nn.MSELoss"
]
] |
GZHoffie/analytics-zoo | [
"d0258aa113ffd1a5c4927376fb32b09fb0baf73c",
"d0258aa113ffd1a5c4927376fb32b09fb0baf73c"
] | [
"pyzoo/zoo/zouwu/model/Seq2Seq.py",
"pyzoo/zoo/zouwu/model/MTNet_keras.py"
] | [
"#\n# Copyright 2018 Analytics Zoo Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nfrom tensorflow.keras.models import Model\nfrom tensorflow.keras.layers import Input, LSTM, Dense\nimport tensorflow.keras as keras\n\nfrom zoo.automl.model.abstract import BaseModel\nfrom zoo.automl.common.util import *\nfrom zoo.automl.common.metrics import Evaluator\n\n\nclass LSTMSeq2Seq(BaseModel):\n\n def __init__(self, check_optional_config=True, future_seq_len=2):\n \"\"\"\n Constructor of LSTM Seq2Seq model\n \"\"\"\n self.model = None\n self.past_seq_len = None\n self.future_seq_len = future_seq_len\n self.feature_num = None\n self.target_col_num = None\n self.metric = None\n self.latent_dim = None\n self.batch_size = None\n self.check_optional_config = check_optional_config\n\n def _build_train(self, mc=False, **config):\n \"\"\"\n build LSTM Seq2Seq model\n :param config:\n :return:\n \"\"\"\n super()._check_config(**config)\n self.metric = config.get('metric', 'mean_squared_error')\n self.latent_dim = config.get('latent_dim', 128)\n self.dropout = config.get('dropout', 0.2)\n self.lr = config.get('lr', 0.001)\n # for restore in continuous training\n self.batch_size = config.get('batch_size', 64)\n training = True if mc else None\n\n # Define an input sequence and process it.\n self.encoder_inputs = Input(shape=(None, self.feature_num), name=\"encoder_inputs\")\n encoder = LSTM(units=self.latent_dim,\n dropout=self.dropout,\n return_state=True,\n name=\"encoder_lstm\")\n encoder_outputs, state_h, state_c = encoder(self.encoder_inputs, training=training)\n # We discard `encoder_outputs` and only keep the states.\n self.encoder_states = [state_h, state_c]\n\n # Set up the decoder, using `encoder_states` as initial state.\n self.decoder_inputs = Input(shape=(None, self.target_col_num), name=\"decoder_inputs\")\n # We set up our decoder to return full output sequences,\n # and to return internal states as well. We don't use the\n # return states in the training model, but we will use them in inference.\n self.decoder_lstm = LSTM(self.latent_dim,\n dropout=self.dropout,\n return_sequences=True,\n return_state=True,\n name=\"decoder_lstm\")\n decoder_outputs, _, _ = self.decoder_lstm(self.decoder_inputs,\n training=training,\n initial_state=self.encoder_states)\n\n self.decoder_dense = Dense(self.target_col_num, name=\"decoder_dense\")\n decoder_outputs = self.decoder_dense(decoder_outputs)\n\n # Define the model that will turn\n # `encoder_input_data` & `decoder_input_data` into `decoder_target_data`\n self.model = Model([self.encoder_inputs, self.decoder_inputs], decoder_outputs)\n self.model.compile(loss='mse',\n metrics=[self.metric],\n optimizer=keras.optimizers.RMSprop(lr=self.lr))\n return self.model\n\n def _restore_model(self):\n self.encoder_inputs = self.model.input[0] # input_1\n encoder_outputs, state_h_enc, state_c_enc = self.model.layers[2].output # lstm_1\n self.encoder_states = [state_h_enc, state_c_enc]\n\n self.decoder_inputs = self.model.input[1] # input_2\n self.decoder_lstm = self.model.layers[3]\n\n self.decoder_dense = self.model.layers[4]\n\n def _build_inference(self, mc=False):\n training = True if mc else None\n # from our previous model - mapping encoder sequence to state vectors\n encoder_model = Model(self.encoder_inputs, self.encoder_states)\n\n # A modified version of the decoding stage that takes in predicted target inputs\n # and encoded state vectors, returning predicted target outputs and decoder state vectors.\n # We need to hang onto these state vectors to run the next step of the inference loop.\n decoder_state_input_h = Input(shape=(self.latent_dim,))\n decoder_state_input_c = Input(shape=(self.latent_dim,))\n decoder_states_inputs = [decoder_state_input_h, decoder_state_input_c]\n\n decoder_outputs, state_h, state_c = self.decoder_lstm(self.decoder_inputs,\n training=training,\n initial_state=decoder_states_inputs)\n decoder_states = [state_h, state_c]\n\n decoder_outputs = self.decoder_dense(decoder_outputs)\n decoder_model = Model([self.decoder_inputs] + decoder_states_inputs,\n [decoder_outputs] + decoder_states)\n return encoder_model, decoder_model\n\n def _decode_sequence(self, input_seq, mc=False):\n encoder_model, decoder_model = self._build_inference(mc=mc)\n # Encode the input as state vectors.\n states_value = encoder_model.predict(input_seq)\n\n # Generate empty target sequence of length 1.\n target_seq = np.zeros((len(input_seq), 1, self.target_col_num))\n\n # Populate the first target sequence with end of encoding series value\n target_seq[:, 0] = input_seq[:, -1, :self.target_col_num]\n\n # Sampling loop for a batch of sequences - we will fill decoded_seq with predictions\n # (to simplify, here we assume a batch of size 1).\n\n decoded_seq = np.zeros((len(input_seq), self.future_seq_len, self.target_col_num))\n\n for i in range(self.future_seq_len):\n output, h, c = decoder_model.predict([target_seq] + states_value)\n\n decoded_seq[:, i] = output[:, 0]\n\n # Update the target sequence (of length 1).\n target_seq = np.zeros((len(input_seq), 1, self.target_col_num))\n target_seq[:, 0] = output[:, 0]\n\n # Update states\n states_value = [h, c]\n\n return decoded_seq\n\n def _get_decoder_inputs(self, x, y):\n \"\"\"\n lagged target series for teacher forcing\n decoder_input data is one timestamp ahead of y\n :param x: 3-d array in format of (sample_num, past_sequence_len, feature_num)\n :param y: 3-d array in format of (sample_num, future_sequence_len, target_col_num)\n Need to expand dimension if y is a 2-d array with one target col\n :return: 3-d array of decoder inputs\n \"\"\"\n decoder_input_data = np.zeros(y.shape)\n decoder_input_data[1:, ] = y[:-1, ]\n decoder_input_data[0, 0] = x[-1, -1, :self.target_col_num]\n decoder_input_data[0, 1:] = y[0, :-1]\n\n return decoder_input_data\n\n def _get_len(self, x, y):\n self.past_seq_len = x.shape[1]\n self.feature_num = x.shape[2]\n # self.future_seq_len = y.shape[1]\n self.target_col_num = y.shape[2]\n\n def _expand_y(self, y):\n \"\"\"\n expand dims for y.\n :param y:\n :return:\n \"\"\"\n while len(y.shape) < 3:\n y = np.expand_dims(y, axis=2)\n return y\n\n def _pre_processing(self, x, y, validation_data):\n \"\"\"\n pre_process input data.\n 1. expand dims for y and val_y\n 2. get decoder inputs for train data\n 3. get decoder inputs for validation data\n :param x: train_x\n :param y: train_y\n :param validation_data:\n :return: network input\n \"\"\"\n y = self._expand_y(y)\n self._get_len(x, y)\n decoder_input_data = self._get_decoder_inputs(x, y)\n if validation_data is not None:\n val_x, val_y = validation_data\n val_y = self._expand_y(val_y)\n val_decoder_input = self._get_decoder_inputs(val_x, val_y)\n validation_data = ([val_x, val_decoder_input], val_y)\n return x, y, decoder_input_data, validation_data\n\n def fit_eval(self, data, validation_data=None, mc=False, verbose=0, **config):\n \"\"\"\n fit for one iteration\n :param data: could be a tuple with numpy ndarray with form (x, y)\n x: 3-d array in format (no. of samples, past sequence length, 2+feature length),\n in the last dimension, the 1st col is the time index (data type needs to be numpy datetime\n type, e.g. \"datetime64\"),\n the 2nd col is the target value (data type should be numeric)\n y: 2-d numpy array in format (no. of samples, future sequence length)\n if future sequence length > 1,\n or 1-d numpy array in format (no. of samples, ) if future sequence length = 1\n :param validation_data: tuple in format (x_test,y_test), data used for validation.\n If this is specified, validation result will be the optimization target for automl.\n Otherwise, train metric will be the optimization target.\n :param config: optimization hyper parameters\n :return: the resulting metric\n \"\"\"\n x, y = data[0], data[1]\n x, y, decoder_input_data, validation_data = self._pre_processing(x, y, validation_data)\n\n # if model is not initialized, __build the model\n if self.model is None:\n self._build_train(mc=mc, **config)\n\n # batch_size = config.get('batch_size', 64)\n # lr = self.lr\n # name = \"seq2seq-batch_size-{}-epochs-{}-lr-{}-time-{}\"\\\n # .format(batch_size, epochs, lr, time())\n # tensorboard = TensorBoard(log_dir=\"logs/\" + name)\n\n hist = self.model.fit([x, decoder_input_data], y,\n validation_data=validation_data,\n batch_size=self.batch_size,\n epochs=config.get(\"epochs\", 10),\n verbose=verbose,\n # callbacks=[tensorboard]\n )\n # print(hist.history)\n\n if validation_data is None:\n # get train metrics\n # results = self.model.evaluate(x, y)\n result = hist.history.get(self.metric)[-1]\n else:\n result = hist.history.get('val_' + str(self.metric))[-1]\n return result\n\n def evaluate(self, x, y, metric=['mse']):\n \"\"\"\n Evaluate on x, y\n :param x: input\n :param y: target\n :param metric: a list of metrics in string format\n :return: a list of metric evaluation results\n \"\"\"\n y_pred = self.predict(x)\n # y = np.squeeze(y, axis=2)\n if self.target_col_num == 1:\n return [Evaluator.evaluate(m, y, y_pred) for m in metric]\n else:\n return [np.array([Evaluator.evaluate(m, y[:, i, :], y_pred[:, i, :])\n for i in range(self.future_seq_len)])\n for m in metric]\n\n def predict(self, x, mc=False):\n \"\"\"\n Prediction on x.\n :param x: input\n :return: predicted y (expected dimension = 2)\n \"\"\"\n y_pred = self._decode_sequence(x, mc=mc)\n if self.target_col_num == 1:\n y_pred = np.squeeze(y_pred, axis=2)\n return y_pred\n\n def predict_with_uncertainty(self, x, n_iter=100):\n result = np.array([self.predict(x, mc=True) for i in range(n_iter)])\n prediction = result.mean(axis=0)\n uncertainty = result.var(axis=0)\n return prediction, uncertainty\n\n def save(self, model_path, config_path):\n \"\"\"\n save model to file.\n :param model_path: the model file path to be saved to.\n :param config_path: the config file path to be saved to.\n :return:\n \"\"\"\n\n self.model.save(model_path)\n\n config_to_save = {\"past_seq_len\": self.past_seq_len,\n \"feature_num\": self.feature_num,\n \"future_seq_len\": self.future_seq_len,\n \"target_col_num\": self.target_col_num,\n \"metric\": self.metric,\n \"latent_dim\": self.latent_dim,\n \"batch_size\": self.batch_size}\n save_config(config_path, config_to_save)\n\n def restore(self, model_path, **config):\n \"\"\"\n restore model from file\n :param model_path: the model file\n :param config: the trial config\n :return: the restored model\n \"\"\"\n\n self.past_seq_len = config[\"past_seq_len\"]\n self.feature_num = config[\"feature_num\"]\n self.future_seq_len = config[\"future_seq_len\"]\n self.target_col_num = config[\"target_col_num\"]\n self.metric = config[\"metric\"]\n self.latent_dim = config[\"latent_dim\"]\n self.batch_size = config[\"batch_size\"]\n\n self.model = keras.models.load_model(model_path)\n self._restore_model()\n # self.model.load_weights(file_path)\n\n def _get_required_parameters(self):\n return {\n # 'input_shape_x',\n # 'input_shape_y',\n # 'out_units'\n }\n\n def _get_optional_parameters(self):\n return {\n 'past_seq_len'\n 'latent_dim'\n 'dropout',\n 'metric',\n 'lr',\n 'epochs',\n 'batch_size'\n }\n",
"# Copyright 2018 Analytics Zoo Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n#\n# MIT License\n#\n# Copyright (c) 2018 Roland Zimmermann\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n#\nimport numpy as np\nimport time\nfrom tensorflow.keras.models import Model\nfrom tensorflow.keras.layers import *\nfrom tensorflow.keras.initializers import TruncatedNormal, Constant\nimport tensorflow.keras.backend as K\n\nimport tensorflow as tf\nfrom zoo.automl.common.metrics import Evaluator\nfrom zoo.automl.model.abstract import BaseModel\nfrom zoo.automl.common.util import save_config\n\n\nclass AttentionRNNWrapper(Wrapper):\n \"\"\"\n This class is modified based on\n https://github.com/zimmerrol/keras-utility-layer-collection/blob/master/kulc/attention.py.\n The idea of the implementation is based on the paper:\n \"Effective Approaches to Attention-based Neural Machine Translation\" by Luong et al.\n This layer is an attention layer, which can be wrapped around arbitrary RNN layers.\n This way, after each time step an attention vector is calculated\n based on the current output of the LSTM and the entire input time series.\n This attention vector is then used as a weight vector to choose special values\n from the input data. This data is then finally concatenated to the next input time step's\n data. On this a linear transformation in the same space as the input data's space\n is performed before the data is fed into the RNN cell again.\n This technique is similar to the input-feeding method described in the paper cited\n \"\"\"\n\n def __init__(self, layer, weight_initializer=\"glorot_uniform\", **kwargs):\n assert isinstance(layer, RNN)\n self.layer = layer\n self.supports_masking = True\n self.weight_initializer = weight_initializer\n\n super(AttentionRNNWrapper, self).__init__(layer, **kwargs)\n\n def _validate_input_shape(self, input_shape):\n if len(input_shape) != 3:\n raise ValueError(\n \"Layer received an input with shape {0} but expected a Tensor of rank 3.\".format(\n input_shape[0]))\n\n def build(self, input_shape):\n self._validate_input_shape(input_shape)\n\n self.input_spec = InputSpec(shape=input_shape)\n\n if not self.layer.built:\n self.layer.build(input_shape)\n self.layer.built = True\n\n input_dim = input_shape[-1]\n\n if self.layer.return_sequences:\n output_dim = self.layer.compute_output_shape(input_shape)[0][-1]\n else:\n output_dim = self.layer.compute_output_shape(input_shape)[-1]\n\n input_dim = input_dim.value\n output_dim = output_dim.value\n\n self._W1 = self.add_weight(shape=(input_dim, input_dim), name=\"{}_W1\".format(self.name),\n initializer=self.weight_initializer)\n self._W2 = self.add_weight(shape=(output_dim, input_dim), name=\"{}_W2\".format(self.name),\n initializer=self.weight_initializer)\n self._W3 = self.add_weight(shape=(2 * input_dim, input_dim), name=\"{}_W3\".format(self.name),\n initializer=self.weight_initializer)\n self._b2 = self.add_weight(shape=(input_dim,), name=\"{}_b2\".format(self.name),\n initializer=self.weight_initializer)\n self._b3 = self.add_weight(shape=(input_dim,), name=\"{}_b3\".format(self.name),\n initializer=self.weight_initializer)\n self._V = self.add_weight(shape=(input_dim, 1), name=\"{}_V\".format(self.name),\n initializer=self.weight_initializer)\n\n super(AttentionRNNWrapper, self).build()\n\n def compute_output_shape(self, input_shape):\n self._validate_input_shape(input_shape)\n\n return self.layer.compute_output_shape(input_shape)\n\n @property\n def trainable_weights(self):\n return self._trainable_weights + self.layer.trainable_weights\n\n @property\n def non_trainable_weights(self):\n return self._non_trainable_weights + self.layer.non_trainable_weights\n\n def step(self, x, states):\n h = states[1]\n # states[1] necessary?\n\n # equals K.dot(X, self._W1) + self._b2 with X.shape=[bs, T, input_dim]\n total_x_prod = states[-1]\n # comes from the constants (equals the input sequence)\n X = states[-2]\n\n # expand dims to add the vector which is only valid for this time step\n # to total_x_prod which is valid for all time steps\n hw = K.expand_dims(K.dot(h, self._W2), 1)\n additive_atn = total_x_prod + hw\n attention = K.softmax(K.dot(additive_atn, self._V), axis=1)\n x_weighted = K.sum(attention * X, [1])\n\n x = K.dot(K.concatenate([x, x_weighted], 1), self._W3) + self._b3\n\n h, new_states = self.layer.cell.call(x, states[:-2])\n\n return h, new_states\n\n def call(self, x, constants=None, mask=None, initial_state=None):\n # input shape: (n_samples, time (padded with zeros), input_dim)\n input_shape = self.input_spec.shape\n\n if self.layer.stateful:\n initial_states = self.layer.states\n elif initial_state is not None:\n initial_states = initial_state\n if not isinstance(initial_states, (list, tuple)):\n initial_states = [initial_states]\n\n base_initial_state = self.layer.get_initial_state(x)\n if len(base_initial_state) != len(initial_states):\n raise ValueError(\n \"initial_state does not have the correct length. Received length {0} \"\n \"but expected {1}\".format(len(initial_states), len(base_initial_state)))\n else:\n # check the state' shape\n for i in range(len(initial_states)):\n # initial_states[i][j] != base_initial_state[i][j]:\n if not initial_states[i].shape.is_compatible_with(base_initial_state[i].shape):\n raise ValueError(\n \"initial_state does not match the default base state of the layer. \"\n \"Received {0} but expected {1}\".format(\n [x.shape for x in initial_states],\n [x.shape for x in base_initial_state]))\n else:\n initial_states = self.layer.get_initial_state(x)\n\n # print(initial_states)\n\n if not constants:\n constants = []\n\n constants += self.get_constants(x)\n\n last_output, outputs, states = K.rnn(\n self.step,\n x,\n initial_states,\n go_backwards=self.layer.go_backwards,\n mask=mask,\n constants=constants,\n unroll=self.layer.unroll,\n input_length=input_shape[1]\n )\n\n if self.layer.stateful:\n self.updates = []\n for i in range(len(states)):\n self.updates.append((self.layer.states[i], states[i]))\n\n if self.layer.return_sequences:\n output = outputs\n else:\n output = last_output\n\n # Properly set learning phase\n if getattr(last_output, '_uses_learning_phase', False):\n output._uses_learning_phase = True\n for state in states:\n state._uses_learning_phase = True\n\n if self.layer.return_state:\n if not isinstance(states, (list, tuple)):\n states = [states]\n else:\n states = list(states)\n return [output] + states\n else:\n return output\n\n def get_constants(self, x):\n # add constants to speed up calculation\n constants = [x, K.dot(x, self._W1) + self._b2]\n\n return constants\n\n def get_config(self):\n config = {'weight_initializer': self.weight_initializer}\n base_config = super(AttentionRNNWrapper, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n\nclass MTNetKeras(BaseModel):\n\n def __init__(self, check_optional_config=False, future_seq_len=1):\n\n \"\"\"\n Constructor of MTNet model\n \"\"\"\n self.check_optional_config = check_optional_config\n self.config = None\n # config parameter\n self.time_step = None # timestep\n self.cnn_height = None # convolution window size (convolution filter height)` ?\n self.long_num = None # the number of the long-term memory series\n self.ar_window = None # the window size of ar model\n self.feature_num = None # input's variable dimension (convolution filter width)\n self.output_dim = None # output's variable dimension\n self.cnn_hid_size = None\n # last size is equal to en_conv_hidden_size, should be a list\n self.rnn_hid_sizes = None\n self.last_rnn_size = None\n self.cnn_dropout = None\n self.rnn_dropout = None\n self.lr = None\n self.batch_size = None\n self.loss = None\n\n self.saved_configs = {\"cnn_height\", \"long_num\", \"time_step\", \"ar_window\",\n \"cnn_hid_size\", \"rnn_hid_sizes\", \"cnn_dropout\",\n \"rnn_dropout\", \"lr\", \"batch_size\",\n \"epochs\", \"metrics\", \"mc\",\n \"feature_num\", \"output_dim\", \"loss\"}\n self.model = None\n self.metrics = None\n self.mc = None\n self.epochs = None\n\n def apply_config(self, rs=False, config=None):\n super()._check_config(**config)\n if rs:\n config_names = set(config.keys())\n assert config_names.issuperset(self.saved_configs)\n # assert config_names.issuperset(self.lr_decay_configs) or \\\n # config_names.issuperset(self.lr_configs)\n self.epochs = config.get(\"epochs\")\n self.metrics = config.get(\"metrics\", [\"mean_squared_error\"])\n self.mc = config.get(\"mc\")\n self.feature_num = config[\"feature_num\"]\n self.output_dim = config[\"output_dim\"]\n self.time_step = config.get(\"time_step\", 1)\n self.long_num = config.get(\"long_num\", 7)\n self.ar_window = config.get(\"ar_window\", 1)\n self.cnn_height = config.get(\"cnn_height\", 1)\n self.cnn_hid_size = config.get(\"cnn_hid_size\", 32)\n self.rnn_hid_sizes = config.get(\"rnn_hid_sizes\", [16, 32])\n self.last_rnn_size = self.rnn_hid_sizes[-1]\n self.rnn_dropout = config.get(\"rnn_dropout\", 0.2)\n self.cnn_dropout = config.get(\"cnn_dropout\", 0.2)\n self.loss = config.get('loss', \"mae\")\n self.batch_size = config.get(\"batch_size\", 64)\n self.lr = config.get('lr', 0.001)\n self._check_configs()\n\n def _check_configs(self):\n assert self.time_step >= 1, \\\n \"Invalid configuration value. 'time_step' must be larger than 1\"\n assert self.time_step >= self.ar_window, \\\n \"Invalid configuration value. 'ar_window' must not exceed 'time_step'\"\n assert isinstance(self.rnn_hid_sizes, list), \\\n \"Invalid configuration value. 'rnn_hid_sizes' must be a list of integers\"\n # assert self.cnn_hid_size == self.last_rnn_size,\\\n # \"Invalid configuration value. 'cnn_hid_size' must be equal to the last element of \" \\\n # \"'rnn_hid_sizes'\"\n\n def build(self):\n \"\"\"\n build MTNet model\n :param config:\n :return:\n \"\"\"\n training = True if self.mc else None\n # long-term time series historical data inputs\n long_input = Input(shape=(self.long_num, self.time_step, self.feature_num))\n # short-term time series historical data\n short_input = Input(shape=(self.time_step, self.feature_num))\n\n # ------- no-linear component----------------\n # memory and context : (batch, long_num, last_rnn_size)\n memory = self.__encoder(long_input, num=self.long_num, name='memory', training=training)\n # memory = memory_model(long_input)\n context = self.__encoder(long_input, num=self.long_num, name='context', training=training)\n # context = context_model(long_input)\n # query: (batch, 1, last_rnn_size)\n query_input = Reshape((1, self.time_step, self.feature_num),\n name='reshape_query')(short_input)\n query = self.__encoder(query_input, num=1, name='query', training=training)\n # query = query_model(query_input)\n\n # prob = memory * query.T, shape is (long_num, 1)\n query_t = Permute((2, 1))(query)\n prob = Lambda(lambda xy: tf.matmul(xy[0], xy[1]))([memory, query_t])\n prob = Softmax(axis=-1)(prob)\n # out is of the same shape of context: (batch, long_num, last_rnn_size)\n out = multiply([context, prob])\n # concat: (batch, long_num + 1, last_rnn_size)\n\n pred_x = concatenate([out, query], axis=1)\n reshaped_pred_x = Reshape((self.last_rnn_size * (self.long_num + 1),),\n name=\"reshape_pred_x\")(pred_x)\n nonlinear_pred = Dense(units=self.output_dim,\n kernel_initializer=TruncatedNormal(stddev=0.1),\n bias_initializer=Constant(0.1),)(reshaped_pred_x)\n\n # ------------ ar component ------------\n if self.ar_window > 0:\n ar_pred_x = Reshape((self.ar_window * self.feature_num,),\n name=\"reshape_ar\")(short_input[:, -self.ar_window:])\n linear_pred = Dense(units=self.output_dim,\n kernel_initializer=TruncatedNormal(stddev=0.1),\n bias_initializer=Constant(0.1),)(ar_pred_x)\n else:\n linear_pred = 0\n y_pred = Add()([nonlinear_pred, linear_pred])\n self.model = Model(inputs=[long_input, short_input], outputs=y_pred)\n # lr decay\n # def lr_scheduler(epoch, r):\n # max_lr = 0.03\n # min_lr = 0.0001\n # lr = min_lr + (max_lr - min_lr) * math.exp(-epoch / 60)\n # return lr\n # callbacks = [tf.keras.callbacks.LearningRateScheduler(lr_scheduler, verbose=1)]\n # initial_lr = 0.003\n # rate = math.exp(-1 / 60)\n # lr_schedule = tf.keras.optimizers.schedules.ExponentialDecay(\n # initial_lr,\n # decay_steps=249,\n # decay_rate=rate,\n # staircase=True\n # )\n #\n # self.model.compile(loss=\"mae\",\n # metrics=metrics,\n # optimizer=tf.keras.optimizers.Adam(learning_rate=lr_schedule))\n\n self.model.compile(loss=self.loss,\n metrics=self.metrics,\n optimizer=tf.keras.optimizers.Adam(lr=self.lr))\n\n return self.model\n\n def __encoder(self, input, num, name='Encoder', training=None):\n \"\"\"\n Treat batch_size dimension and num dimension as one batch_size dimension\n (batch_size * num).\n :param input: <batch_size, num, time_step, input_dim>\n :param num: the number of input time series data. For short term data, the num is 1.\n :return: the embedded of the input <batch_size, num, last_rnn_hid_size>\n \"\"\"\n # input = Input(shape=(num, self.time_step, self.feature_num))\n batch_size_new = self.batch_size * num\n Tc = self.time_step - self.cnn_height + 1\n\n # CNN\n # reshaped input: (batch_size_new, time_step, feature_num, 1)\n reshaped_input = Lambda(lambda x:\n K.reshape(x, (-1, self.time_step, self.feature_num, 1),),\n name=name+'reshape_cnn')(input)\n # output: <batch_size_new, conv_out, 1, en_conv_hidden_size>\n cnn_out = Conv2D(filters=self.cnn_hid_size,\n kernel_size=(self.cnn_height, self.feature_num),\n padding=\"valid\",\n kernel_initializer=TruncatedNormal(stddev=0.1),\n bias_initializer=Constant(0.1),\n activation=\"relu\")(reshaped_input)\n cnn_out = Dropout(self.cnn_dropout)(cnn_out, training=training)\n\n rnn_input = Lambda(lambda x:\n K.reshape(x, (-1, num, Tc, self.cnn_hid_size)),)(cnn_out)\n\n # use AttentionRNNWrapper\n rnn_cells = [GRUCell(h_size, activation=\"relu\", dropout=self.rnn_dropout)\n for h_size in self.rnn_hid_sizes]\n\n attention_rnn = AttentionRNNWrapper(RNN(rnn_cells),\n weight_initializer=TruncatedNormal(stddev=0.1))\n\n outputs = []\n for i in range(num):\n input_i = rnn_input[:, i]\n # input_i = (batch, conv_hid_size, Tc)\n input_i = Permute((2, 1), input_shape=[Tc, self.cnn_hid_size])(input_i)\n # output = (batch, last_rnn_hid_size)\n output_i = attention_rnn(input_i, training=training)\n # output = (batch, 1, last_rnn_hid_size)\n output_i = Reshape((1, -1))(output_i)\n outputs.append(output_i)\n if len(outputs) > 1:\n output = Lambda(lambda x: concatenate(x, axis=1))(outputs)\n else:\n output = outputs[0]\n return output\n\n def _reshape_input_x(self, x):\n long_term = np.reshape(x[:, : self.time_step * self.long_num],\n [-1, self.long_num, self.time_step, x.shape[-1]])\n short_term = np.reshape(x[:, self.time_step * self.long_num:],\n [-1, self.time_step, x.shape[-1]])\n return long_term, short_term\n\n def _pre_processing(self, x, validation_data=None):\n long_term, short_term = self._reshape_input_x(x)\n if validation_data:\n val_x, val_y = validation_data\n long_val, short_val = self._reshape_input_x(val_x)\n validation_data = ([long_val, short_val], val_y)\n return [long_term, short_term], validation_data\n\n def _add_config_attributes(self, config, **new_attributes):\n # new_attributes are among [\"metrics\", \"epochs\", \"mc\", \"feature_num\", \"output_dim\"]\n if self.config is None:\n self.config = config\n else:\n if config:\n raise ValueError(\"You can only pass new configuations for 'mc', 'epochs' and \"\n \"'metrics' during incremental fitting. \"\n \"Additional configs passed are {}\".format(config))\n\n if new_attributes[\"metrics\"] is None:\n del new_attributes[\"metrics\"]\n self.config.update(new_attributes)\n\n def _check_input(self, x, y):\n input_feature_num = x.shape[-1]\n input_output_dim = y.shape[-1]\n if input_feature_num is None:\n raise ValueError(\"input x is None!\")\n if input_output_dim is None:\n raise ValueError(\"input y is None!\")\n\n if self.feature_num is not None and self.feature_num != input_feature_num:\n raise ValueError(\"input x has different feature number (the shape of last dimension) \"\n \"{} with the fitted model, which is {}.\"\n .format(input_feature_num, self.feature_num))\n if self.output_dim is not None and self.output_dim != input_output_dim:\n raise ValueError(\"input y has different prediction size (the shape of last dimension) \"\n \"of {} with the fitted model, which is {}.\"\n .format(input_output_dim, self.output_dim))\n return input_feature_num, input_output_dim\n\n def fit_eval(self, data, validation_data=None, mc=False, metrics=None,\n epochs=10, verbose=0, **config):\n x, y = data[0], data[1]\n feature_num, output_dim = self._check_input(x, y)\n self._add_config_attributes(config, epochs=epochs, mc=mc, metrics=metrics,\n feature_num=feature_num, output_dim=output_dim)\n self.apply_config(config=self.config)\n processed_x, processed_validation_data = self._pre_processing(x, validation_data)\n\n # if model is not initialized, __build the model\n if self.model is None:\n st = time.time()\n self.build()\n end = time.time()\n if verbose == 1:\n print(\"Build model took {}s\".format(end - st))\n\n st = time.time()\n hist = self.model.fit(processed_x, y, validation_data=processed_validation_data,\n batch_size=self.batch_size,\n epochs=self.epochs,\n verbose=verbose)\n\n if verbose == 1:\n print(\"Fit model took {}s\".format(time.time() - st))\n if validation_data is None:\n # get train metrics\n # results = self.model.evaluate(x, y)\n result = hist.history.get(self.metrics[0])[-1]\n else:\n result = hist.history.get('val_' + str(self.metrics[0]))[-1]\n return result\n\n def evaluate(self, x, y, metrics=['mse']):\n \"\"\"\n Evaluate on x, y\n :param x: input\n :param y: target\n :param metric: a list of metrics in string format\n :return: a list of metric evaluation results\n \"\"\"\n y_pred = self.predict(x)\n if y_pred.shape[1] == 1:\n multioutput = 'uniform_average'\n else:\n multioutput = 'raw_values'\n # y = np.squeeze(y, axis=2)\n return [Evaluator.evaluate(m, y, y_pred, multioutput=multioutput) for m in metrics]\n\n def predict(self, x, mc=False):\n input_x = self._reshape_input_x(x)\n return self.model.predict(input_x)\n\n def predict_with_uncertainty(self, x, n_iter=100):\n result = np.zeros((n_iter,) + (x.shape[0], self.output_dim))\n\n for i in range(n_iter):\n result[i, :, :] = self.predict(x, mc=True)\n\n prediction = result.mean(axis=0)\n uncertainty = result.std(axis=0)\n return prediction, uncertainty\n\n def save(self, model_path, config_path):\n self.model.save_weights(model_path)\n config_to_save = {\"cnn_height\": self.cnn_height,\n \"long_num\": self.long_num,\n \"time_step\": self.time_step,\n \"ar_window\": self.ar_window,\n \"cnn_hid_size\": self.cnn_hid_size,\n \"rnn_hid_sizes\": self.rnn_hid_sizes,\n \"cnn_dropout\": self.cnn_dropout,\n \"rnn_dropout\": self.rnn_dropout,\n \"lr\": self.lr,\n \"batch_size\": self.batch_size,\n # for fit eval\n \"epochs\": self.epochs,\n # todo: can not serialize metrics unless all elements are str\n \"metrics\": self.metrics,\n \"mc\": self.mc,\n \"feature_num\": self.feature_num,\n \"output_dim\": self.output_dim,\n \"loss\": self.loss\n }\n assert set(config_to_save.keys()) == self.saved_configs, \\\n \"The keys in config_to_save is not the same as self.saved_configs.\" \\\n \"Please keep them consistent\"\n # if self.decay_epochs > 0:\n # lr_decay_configs = {\"min_lr\": self.min_lr,\n # \"max_lr\": self.max_lr}\n # assert set(lr_decay_configs.keys()) == self.lr_decay_configs, \\\n # \"The keys in lr_decay_configs is not the same as self.lr_decay_configs.\" \\\n # \"Please keep them consistent\"\n # config_to_save.update(lr_decay_configs)\n # else:\n # lr_configs = {\"lr\": self.lr_value}\n # assert set(lr_configs.keys()) == self.lr_configs, \\\n # \"The keys in lr_configs is not the same as self.lr_configs.\" \\\n # \"Please keep them consistent\"\n # config_to_save.update(lr_configs)\n\n save_config(config_path, config_to_save)\n\n def restore(self, model_path, **config):\n \"\"\"\n restore model from file\n :param model_path: the model file\n :param config: the trial config\n \"\"\"\n self.config = config\n self.apply_config(rs=True, config=config)\n self.build()\n self.model.load_weights(model_path)\n\n def _get_optional_parameters(self):\n return {\n \"batch_size\",\n \"cnn_dropout\",\n \"rnn_dropout\",\n \"time_step\",\n \"cnn_height\",\n \"long_num\",\n \"ar_size\",\n \"loss\",\n \"cnn_hid_size\",\n \"rnn_hid_sizes\",\n \"lr\"\n }\n\n def _get_required_parameters(self):\n return {\n \"feature_num\",\n \"output_dim\"\n }\n"
] | [
[
"tensorflow.keras.models.load_model",
"tensorflow.keras.optimizers.RMSprop",
"tensorflow.keras.models.Model",
"tensorflow.keras.layers.LSTM",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.layers.Input"
],
[
"tensorflow.keras.backend.reshape",
"tensorflow.keras.backend.dot",
"tensorflow.keras.backend.concatenate",
"tensorflow.keras.optimizers.Adam",
"tensorflow.keras.initializers.Constant",
"tensorflow.keras.backend.sum",
"numpy.zeros",
"numpy.reshape",
"tensorflow.keras.backend.rnn",
"tensorflow.keras.initializers.TruncatedNormal",
"tensorflow.matmul",
"tensorflow.keras.models.Model"
]
] |
ILoveRedEd55/AIML_Detection_System | [
"b2fdd8475f069884060f7bb31f41953bae057d7b"
] | [
"lib/src/layers/RNN.py"
] | [
"from src.layers.LayerHelper import *\nfrom settings import LayerSettings as layerSettings\nimport tensorflow as tf\nimport os\nCUDA_VISIBLE_DEVICES=0\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"0\" # set gpu number\n\ndef LSTM(name_, inputTensor_, numberOfOutputs_, isTraining_, dropoutProb_=None):\n\twith tf.name_scope(name_):\n\t\tcell = tf.nn.rnn_cell.LSTMCell(num_units=numberOfOutputs_,\n\t\t\t\t\t\t use_peepholes=True,\n\t\t\t\t\t\t initializer=layerSettings.LSTM_INITIALIZER,\n\t\t\t\t\t\t forget_bias=1.0,\n\t\t\t\t\t\t state_is_tuple=True,\n\t\t\t\t\t\t activation=tf.nn.tanh,\n\t\t\t\t\t\t name=name_+\"_cell\")\n\n\t\tif dropoutProb_ != None:\n\t\t\tdropoutProbTensor = tf.cond(isTraining_, lambda: 0.5, lambda: 1.0)\n\t\t\tcell = tf.nn.rnn_cell.DropoutWrapper(cell,\n\t\t\t\t\t\t\t input_keep_prob=dropoutProbTensor,\n\t\t\t\t\t\t\t output_keep_prob=dropoutProbTensor)\n\n\t\tstatePlaceHolder = tf.nn.rnn_cell.LSTMStateTuple( tf.placeholder(layerSettings.FLOAT_TYPE, [None, numberOfOutputs_]),\n\t\t\t\t\t\t\t\t tf.placeholder(layerSettings.FLOAT_TYPE, [None, numberOfOutputs_]) )\n\n\t\toutputTensor, stateTensor = tf.nn.dynamic_rnn(\tcell=cell,\n\t\t\t\t\t\t\t\tinitial_state=statePlaceHolder,\n\t\t\t\t\t\t\t\tinputs=inputTensor_)\n\n\t\t# Add Regularization Loss\n\t\tfor eachVariable in tf.trainable_variables():\n\t\t\tif name_ in eachVariable.name:\n\t\t\t\tif ('bias' not in eachVariable.name)and(layerSettings.REGULARIZER_WEIGHTS_DECAY != None):\n\t\t\t\t\tregularizationLoss = L2_Regularizer(eachVariable)\n\t\t\t\t\ttf.losses.add_loss(regularizationLoss, loss_collection=tf.GraphKeys.REGULARIZATION_LOSSES)\n\t\t\t\t\t\n\n\treturn outputTensor, stateTensor, statePlaceHolder\n\n"
] | [
[
"tensorflow.placeholder",
"tensorflow.losses.add_loss",
"tensorflow.nn.rnn_cell.LSTMCell",
"tensorflow.trainable_variables",
"tensorflow.nn.dynamic_rnn",
"tensorflow.name_scope",
"tensorflow.nn.rnn_cell.DropoutWrapper",
"tensorflow.cond"
]
] |
tadasdanielius/P5-Vehicle-Detection-And-Tracking | [
"38513e91d863f7fff50703349aacbe5d5bbfae39"
] | [
"sdc/detection/cnn_classifier.py"
] | [
"from keras.preprocessing.image import ImageDataGenerator\nfrom keras.models import Sequential\nfrom keras.layers import Convolution2D, MaxPooling2D\nfrom keras.layers import Activation, Dropout, Flatten, Dense, Lambda, ELU\nfrom keras.optimizers import Adam\nfrom sklearn.model_selection import train_test_split\nfrom keras.models import model_from_json\nfrom sklearn.preprocessing import normalize\nimport cv2\nimport numpy as np\nimport glob\nimport json\nfrom keras.layers import merge\nfrom keras.layers.core import Lambda\nfrom keras.models import Model\n\nimport tensorflow as tf\n\n\ndef make_parallel(model, gpu_count):\n def get_slice(data, idx, parts):\n shape = tf.shape(data)\n size = tf.concat(0, [shape[:1] // parts, shape[1:]])\n stride = tf.concat(0, [shape[:1] // parts, shape[1:] * 0])\n start = stride * idx\n return tf.slice(data, start, size)\n\n outputs_all = []\n for i in range(len(model.outputs)):\n outputs_all.append([])\n\n # Place a copy of the model on each GPU, each getting a slice of the batch\n for i in range(gpu_count):\n with tf.device('/gpu:%d' % i):\n with tf.name_scope('tower_%d' % i) as scope:\n\n inputs = []\n # Slice each input into a piece for processing on this GPU\n for x in model.inputs:\n input_shape = tuple(x.get_shape().as_list())[1:]\n slice_n = Lambda(get_slice, output_shape=input_shape, arguments={'idx': i, 'parts': gpu_count})(x)\n inputs.append(slice_n)\n\n outputs = model(inputs)\n\n if not isinstance(outputs, list):\n outputs = [outputs]\n\n # Save all the outputs for merging back together later\n for l in range(len(outputs)):\n outputs_all[l].append(outputs[l])\n\n # merge outputs on CPU\n with tf.device('/cpu:0'):\n merged = []\n for outputs in outputs_all:\n merged.append(merge(outputs, mode='concat', concat_axis=0))\n\n return Model(input=model.inputs, output=merged)\n\n\nclass CNNClassifier:\n def __init__(self):\n self.classifier = None\n\n def get_model(self, parallel=False):\n model = Sequential()\n #model.add(Lambda(lambda x: x / 127.5 - 1., input_shape=(64, 64, 3)))\n model.add(Convolution2D(8, 8, 8, subsample=(4, 4), border_mode=\"same\", activation='elu', name='Conv1'))\n model.add(Convolution2D(16, 5, 5, subsample=(2, 2), border_mode=\"same\", activation='elu', name='Conv2'))\n model.add(Convolution2D(32, 5, 5, subsample=(2, 2), border_mode=\"same\", activation='elu', name='Conv3'))\n model.add(Flatten())\n model.add(ELU())\n model.add(Dense(1024, activation='elu'))\n model.add(Dropout(.5))\n model.add(ELU())\n model.add(Dense(512, activation='elu'))\n model.add(Dropout(.5))\n model.add(Dense(1, name='output'))\n model.add(Activation('sigmoid'))\n if parallel:\n model = make_parallel(model, 2)\n #model.compile(optimizer='sgd', loss='binary_crossentropy', metrics=['accuracy'])\n self.model = model\n return model\n\n def _model(self):\n img_width, img_height = 64, 64\n model = Sequential()\n model.add(Convolution2D(8, 3, 3, input_shape=(img_width, img_height, 3)))\n model.add(Activation('elu'))\n model.add(MaxPooling2D(pool_size=(2, 2)))\n\n #model.add(Convolution2D(16, 3, 3))\n #model.add(Activation('elu'))\n #model.add(MaxPooling2D(pool_size=(2, 2)))\n\n #model.add(Convolution2D(32, 3, 3))\n #model.add(Activation('elu'))\n #model.add(MaxPooling2D(pool_size=(2, 2)))\n\n model.add(Flatten())\n model.add(Dense(512))\n model.add(Dropout(0.5))\n model.add(Dense(1, activation='sigmoid'))\n #model = make_parallel(model, 2)\n self.model = model\n\n def compile(self):\n self.model.compile(loss='binary_crossentropy',\n optimizer='rmsprop', class_mode='binary',\n metrics=['accuracy'])\n\n def save(self):\n model_json = self.model.to_json()\n with open(\"./model.json\", \"w\") as json_file:\n json.dump(model_json, json_file)\n self.model.save_weights(\"./model.h5\")\n print(\"Saved model to disk\")\n\n def load(self):\n with open('./model.json', 'r') as jfile:\n self.model = model_from_json(json.load(jfile))\n\n self.compile()\n self.model.load_weights('./model.h5')\n\n def get_list(self):\n vehicles = np.array(glob.glob('training_data/vehicles/*/*'))\n y_vehicles = np.zeros(vehicles.shape) + 1\n non_vehicles = np.array(glob.glob('training_data/non-vehicles/*/*'))\n y_non_vehicles = np.zeros(non_vehicles.shape)\n X_data = np.concatenate((vehicles, non_vehicles))\n Y_data = np.concatenate((y_vehicles, y_non_vehicles))\n return X_data, Y_data\n\n def predict(self, image):\n #img = np.copy(image)\n #img = cv2.resize(img, (64, 64))\n x = image[None, :, :, :]\n result = self.model.predict(x, 1)\n return result\n\n def train(self, file_list, labels, test_size=0.2, nb_epoch=30, batch_size=128):\n X_train, X_test, Y_train, Y_test = train_test_split(file_list, labels, test_size=test_size, random_state=100)\n\n test_images = build_images(X_test)\n train_images = build_images(X_train)\n\n train_datagen = ImageDataGenerator(\n rescale=1. / 255,\n shear_range=0.05,\n zoom_range=0.05,\n width_shift_range=0.1,\n height_shift_range=0.1,\n rotation_range=5,\n horizontal_flip=True)\n test_datagen = ImageDataGenerator(rescale=1. / 255)\n train_generator = train_datagen.flow(train_images, Y_train, batch_size)\n test_generator = test_datagen.flow(test_images, Y_test, batch_size)\n\n nb_train_samples = (batch_size-1)*100\n nb_validation_samples = (batch_size-1)*20\n\n #self.get_model(parallel=False)\n self._model()\n self.compile()\n\n self.model.fit_generator(\n train_generator,\n samples_per_epoch=nb_train_samples,\n nb_epoch=nb_epoch, show_accuracy=True,\n validation_data=test_generator,\n nb_val_samples=nb_validation_samples)\n\ndef build_images(x):\n images = np.zeros((len(x), 64, 64, 3))\n for idx, img_fname in enumerate(x):\n im = cv2.imread(img_fname)\n im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)\n im = cv2.resize(im, (64, 64), interpolation=cv2.INTER_AREA)\n images[idx] = im\n return images\n\ndef do_all(nb_epoch=30, batch_size=256):\n clf = CNNClassifier()\n x, y = clf.get_list()\n clf.train(x, y, nb_epoch=nb_epoch, batch_size=batch_size)\n clf.save()\n\n"
] | [
[
"tensorflow.shape",
"numpy.zeros",
"tensorflow.device",
"tensorflow.name_scope",
"tensorflow.slice",
"tensorflow.concat",
"numpy.concatenate",
"sklearn.model_selection.train_test_split"
]
] |
ManjunathaPatkar/Machine-Learning | [
"f1c6ec1a9f802f6e88ed67c0da6c1e9373790537"
] | [
"Machine Learning A-Z Template Folder/Part 2 - Regression/Section 5 - Multiple Linear Regression/data_preprocessing_template.py"
] | [
"# Data Preprocessing Template\n\n# Importing the libraries\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\n# Importing the dataset\ndataset = pd.read_csv('50_Startups.csv')\nX = dataset.iloc[:, :-1].values\ny = dataset.iloc[:, 4].values\n\n#encoding independent variable state\n#from sklearn.preprocessing import LabelEncoder, OneHotEncoder\n#labelencoder_X = LabelEncoder()\n#X[:, 3] = labelencoder_X.fit_transform(X[:, 3])\n#onehotencoder = OneHotEncoder(categorical_features = [3])\n#X = onehotencoder.fit_transform(X).toarray()\n\n\nfrom sklearn.preprocessing import LabelEncoder,OneHotEncoder\nfrom sklearn.compose import ColumnTransformer\nct = ColumnTransformer([(\"State\", OneHotEncoder(), [3])], remainder = 'passthrough')\nX= ct.fit_transform(X)\n\n#avoiding the dummy variable trap\nX=X[:,1:]\n\n# Splitting the dataset into the Training set and Test set\nfrom sklearn.model_selection import train_test_split\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0)\n\n# Feature Scaling\n\"\"\"from sklearn.preprocessing import StandardScaler\nsc_X = StandardScaler()\nX_train = sc_X.fit_transform(X_train)\nX_test = sc_X.transform(X_test)\nsc_y = StandardScaler()\ny_train = sc_y.fit_transform(y_train)\"\"\"\n\n\n#fitting multiple linear regression to the training set\nfrom sklearn.linear_model import LinearRegression\nregressor=LinearRegression()\nregressor.fit(X_train,y_train)\n\n\n#Predicting the test set results\ny_pred=regressor.predict(X_test)\n\n#Building the optimal model using backward elimination\nimport statsmodels.api as sm\nX=np.append(arr=np.ones((50,1)).astype(int),values=X,axis=1)\n\n#X_opt=X[:,[0,1,2,3,4,5]]\nX_opt = np.array(X[:, [0, 1, 2, 3, 4, 5]], dtype=float)\nregressor_OLS=sm.OLS(endog=y,exog=X_opt).fit()\nregressor_OLS.summary()\n\n\nX_opt = np.array(X[:, [0, 1, 3, 4, 5]], dtype=float)\nregressor_OLS=sm.OLS(endog=y,exog=X_opt).fit()\nregressor_OLS.summary()\n\nX_opt = np.array(X[:, [0, 3, 4, 5]], dtype=float)\nregressor_OLS=sm.OLS(endog=y,exog=X_opt).fit()\nregressor_OLS.summary()\n\n\nX_opt = np.array(X[:, [0, 3, 5]], dtype=float)\nregressor_OLS=sm.OLS(endog=y,exog=X_opt).fit()\nregressor_OLS.summary()\n\n\nX_opt = np.array(X[:, [0, 3]], dtype=float)\nregressor_OLS=sm.OLS(endog=y,exog=X_opt).fit()\nregressor_OLS.summary()\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n"
] | [
[
"numpy.ones",
"pandas.read_csv",
"sklearn.linear_model.LinearRegression",
"numpy.array",
"sklearn.model_selection.train_test_split",
"sklearn.preprocessing.OneHotEncoder"
]
] |
marijnfs/onnxruntime | [
"6e1eb4b0efca9644c5f8979fbded9416fdd722dc"
] | [
"tools/ci_build/build.py"
] | [
"#!/usr/bin/env python3\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n\nimport argparse\nimport glob\nimport multiprocessing\nimport os\nimport re\nimport shutil\nimport subprocess\nimport sys\nimport hashlib\nfrom logger import log\n\n\nclass BaseError(Exception):\n \"\"\"Base class for errors originating from build.py.\"\"\"\n pass\n\n\nclass BuildError(BaseError):\n \"\"\"Error from running build steps.\"\"\"\n\n def __init__(self, *messages):\n super().__init__(\"\\n\".join(messages))\n\n\nclass UsageError(BaseError):\n \"\"\"Usage related error.\"\"\"\n\n def __init__(self, message):\n super().__init__(message)\n\n\ndef _check_python_version():\n # According to the BUILD.md, python 3.5+ is required:\n # Python 2 is definitely not supported and it should be safer to consider\n # it won't run with python 4:\n if sys.version_info[0] != 3:\n raise BuildError(\n \"Bad python major version: expecting python 3, found version \"\n \"'{}'\".format(sys.version))\n if sys.version_info[1] < 5:\n raise BuildError(\n \"Bad python minor version: expecting python 3.5+, found version \"\n \"'{}'\".format(sys.version))\n\n\n_check_python_version()\n\n\ndef parse_arguments():\n parser = argparse.ArgumentParser(\n description=\"ONNXRuntime CI build driver.\",\n usage=\"\"\" # noqa\n Default behavior is --update --build --test for native architecture builds.\n Default behavior is --update --build for cross-compiled builds.\n\n The Update phase will update git submodules, and run cmake to generate makefiles.\n The Build phase will build all projects.\n The Test phase will run all unit tests, and optionally the ONNX tests.\n\n Use the individual flags to only run the specified stages.\n \"\"\")\n # Main arguments\n parser.add_argument(\n \"--build_dir\", required=True, help=\"Path to the build directory.\")\n parser.add_argument(\n \"--config\", nargs=\"+\", default=[\"Debug\"],\n choices=[\"Debug\", \"MinSizeRel\", \"Release\", \"RelWithDebInfo\"],\n help=\"Configuration(s) to build.\")\n parser.add_argument(\n \"--update\", action='store_true', help=\"Update makefiles.\")\n parser.add_argument(\"--build\", action='store_true', help=\"Build.\")\n parser.add_argument(\n \"--clean\", action='store_true',\n help=\"Run 'cmake --build --target clean' for the selected config/s.\")\n parser.add_argument(\n \"--parallel\", action='store_true', help=\"\"\"Use parallel build.\n The build setup doesn't get all dependencies right, so --parallel\n only works if you're just rebuilding ONNXRuntime code. If you've\n done an update that fetched external dependencies you have to build\n without --parallel the first time. Once that's done , run with\n \"--build --parallel --test\" to just build in\n parallel and run tests.\"\"\")\n parser.add_argument(\"--test\", action='store_true', help=\"Run unit tests.\")\n parser.add_argument(\n \"--skip_tests\", action='store_true', help=\"Skip all tests.\")\n\n # Training options\n parser.add_argument(\n \"--enable_nvtx_profile\", action='store_true', help=\"Enable NVTX profile in ORT.\")\n parser.add_argument(\n \"--enable_training\", action='store_true', help=\"Enable training in ORT.\")\n parser.add_argument(\n \"--enable_training_python_frontend_e2e_tests\", action=\"store_true\",\n help=\"Enable the pytorch frontend training tests.\")\n parser.add_argument(\n \"--enable_training_pipeline_e2e_tests\", action=\"store_true\",\n help=\"Enable the pipeline c++ e2e tests.\")\n parser.add_argument(\n \"--use_horovod\", action='store_true', help=\"Enable Horovod.\")\n parser.add_argument(\n \"--mpi_home\", help=\"Path to MPI installation dir\")\n parser.add_argument(\n \"--nccl_home\", help=\"Path to NCCL installation dir\")\n\n # enable ONNX tests\n parser.add_argument(\n \"--enable_onnx_tests\", action='store_true',\n help=\"\"\"When running the Test phase, run onnx_test_running against\n available test data directories.\"\"\")\n parser.add_argument(\"--path_to_protoc_exe\", help=\"Path to protoc exe.\")\n parser.add_argument(\n \"--fuzz_testing\", action='store_true', help=\"Enable Fuzz testing of the onnxruntime.\")\n parser.add_argument(\n \"--enable_symbolic_shape_infer_tests\", action='store_true',\n help=\"\"\"When running the Test phase, run symbolic shape inference against\n available test data directories.\"\"\")\n\n # generate documentaiton\n parser.add_argument(\n \"--gen_doc\", action='store_true',\n help=\"Generate documentation on contrib ops\")\n\n # CUDA related\n parser.add_argument(\"--use_cuda\", action='store_true', help=\"Enable CUDA.\")\n parser.add_argument(\n \"--cuda_version\", help=\"The version of CUDA toolkit to use. \"\n \"Auto-detect if not specified. e.g. 9.0\")\n parser.add_argument(\n \"--cuda_home\", help=\"Path to CUDA home.\"\n \"Read from CUDA_HOME environment variable if --use_cuda is true and \"\n \"--cuda_home is not specified.\")\n parser.add_argument(\n \"--cudnn_home\", help=\"Path to CUDNN home. \"\n \"Read from CUDNN_HOME environment variable if --use_cuda is true and \"\n \"--cudnn_home is not specified.\")\n\n # Python bindings\n parser.add_argument(\n \"--enable_pybind\", action='store_true', help=\"Enable Python Bindings.\")\n parser.add_argument(\n \"--build_wheel\", action='store_true', help=\"Build Python Wheel.\")\n parser.add_argument(\n \"--wheel_name_suffix\", help=\"Suffix to append to created wheel names. \"\n \"This value is currently only used for nightly builds.\")\n parser.add_argument(\n \"--numpy_version\", help=\"Installs a specific version of numpy \"\n \"before building the python binding.\")\n parser.add_argument(\n \"--skip-keras-test\", action='store_true',\n help=\"Skip tests with Keras if keras is installed\")\n\n # C-Sharp bindings\n parser.add_argument(\n \"--build_csharp\", action='store_true',\n help=\"Build C#.Net DLL and NuGet package. This should be only used in CI pipelines. \"\n \"For building C# bindings and packaging them into nuget package use --build_nuget arg.\")\n\n parser.add_argument(\n \"--build_nuget\", action='store_true',\n help=\"Build C#.Net DLL and NuGet package on the local machine. \"\n \"Currently only Windows and Linux platforms are supported.\")\n\n # Java bindings\n parser.add_argument(\n \"--build_java\", action='store_true', help=\"Build Java bindings.\")\n\n # Node.js binding\n parser.add_argument(\n \"--build_nodejs\", action='store_true',\n help=\"Build Node.js binding and NPM package.\")\n\n # Build a shared lib\n parser.add_argument(\n \"--build_shared_lib\", action='store_true',\n help=\"Build a shared library for the ONNXRuntime.\")\n\n # Build options\n parser.add_argument(\n \"--cmake_extra_defines\", nargs=\"+\",\n help=\"Extra definitions to pass to CMake during build system \"\n \"generation. These are just CMake -D options without the leading -D.\")\n parser.add_argument(\n \"--target\",\n help=\"Build a specific target, e.g. winml_dll\")\n parser.add_argument(\n \"--x86\", action='store_true',\n help=\"Create x86 makefiles. Requires --update and no existing cache \"\n \"CMake setup. Delete CMakeCache.txt if needed\")\n parser.add_argument(\n \"--arm\", action='store_true',\n help=\"Create ARM makefiles. Requires --update and no existing cache \"\n \"CMake setup. Delete CMakeCache.txt if needed\")\n parser.add_argument(\n \"--arm64\", action='store_true',\n help=\"Create ARM64 makefiles. Requires --update and no existing cache \"\n \"CMake setup. Delete CMakeCache.txt if needed\")\n parser.add_argument(\n \"--msvc_toolset\", help=\"MSVC toolset to use. e.g. 14.11\")\n parser.add_argument(\"--android\", action='store_true', help='Build for Android')\n parser.add_argument(\n \"--android_abi\", default=\"arm64-v8a\",\n choices=[\"armeabi-v7a\", \"arm64-v8a\", \"x86\", \"x86_64\"],\n help=\"Specify the target Android Application Binary Interface (ABI)\")\n parser.add_argument(\"--android_api\", type=int, default=27, help='Android API Level, e.g. 21')\n parser.add_argument(\"--android_sdk_path\", type=str, help='Path to the Android SDK')\n parser.add_argument(\"--android_ndk_path\", default=\"\", help=\"Path to the Android NDK\")\n parser.add_argument(\"--android_cpp_shared\", action=\"store_true\",\n help=\"Build with shared libc++ instead of the default static libc++.\")\n parser.add_argument(\"--test_binary_size\", action=\"store_true\",\n help=\"If enabled, build will fail when the built binary size is larger than the threshold. \"\n \"This only applies to Android Minimal build for now.\")\n\n parser.add_argument(\"--ios\", action='store_true', help=\"build for ios\")\n parser.add_argument(\n \"--ios_sysroot\", default=\"\",\n help=\"Specify the location name of the macOS platform SDK to be used\")\n parser.add_argument(\n \"--ios_toolchain_dir\", default=\"\",\n help=\"Path to ios toolchain binaries\")\n parser.add_argument(\n \"--ios_toolchain_file\", default=\"\",\n help=\"Path to ios toolchain file, \"\n \"or cmake/onnxruntime_ios.toolchain.cmake will be used\")\n parser.add_argument(\n \"--xcode_code_signing_team_id\", default=\"\",\n help=\"The development team ID used for code signing in Xcode\")\n parser.add_argument(\n \"--use_xcode\", action='store_true',\n help=\"Use Xcode as cmake generator, this is only supported on MacOS.\")\n parser.add_argument(\n \"--osx_arch\", default=\"arm64\", choices=[\"arm64\", \"x86_64\"],\n help=\"Specify the Target specific architectures for macOS and iOS, This is only supported on MacOS\")\n parser.add_argument(\n \"--apple_deploy_target\", type=str,\n help=\"Specify the minimum version of the target platform \"\n \"(e.g. macOS or iOS)\"\n \"This is only supported on MacOS\")\n\n # Arguments needed by CI\n parser.add_argument(\n \"--cmake_path\", default=\"cmake\", help=\"Path to the CMake program.\")\n parser.add_argument(\n \"--ctest_path\", default=\"ctest\", help=\"Path to the CTest program.\")\n parser.add_argument(\n \"--skip_submodule_sync\", action='store_true', help=\"Don't do a \"\n \"'git submodule update'. Makes the Update phase faster.\")\n parser.add_argument(\n \"--use_vstest\", action='store_true',\n help=\"Use use_vstest for running unitests.\")\n parser.add_argument(\n \"--use_jemalloc\", action='store_true', help=\"Use jemalloc.\")\n parser.add_argument(\n \"--use_mimalloc\", default=['none'],\n choices=['none', 'stl', 'arena', 'all'], help=\"Use mimalloc.\")\n parser.add_argument(\n \"--use_openblas\", action='store_true', help=\"Build with OpenBLAS.\")\n parser.add_argument(\n \"--use_dnnl\", action='store_true', help=\"Build with DNNL.\")\n parser.add_argument(\n \"--use_mklml\", action='store_true', help=\"Build with MKLML.\")\n parser.add_argument(\n \"--use_featurizers\", action='store_true',\n help=\"Build with ML Featurizer support.\")\n parser.add_argument(\n \"--use_ngraph\", action='store_true', help=\"Build with nGraph.\")\n parser.add_argument(\n \"--use_openvino\", nargs=\"?\", const=\"CPU_FP32\",\n choices=[\"CPU_FP32\", \"GPU_FP32\", \"GPU_FP16\", \"VAD-M_FP16\",\n \"MYRIAD_FP16\", \"VAD-F_FP32\"],\n help=\"Build with OpenVINO for specific hardware.\")\n parser.add_argument(\n \"--use_nnapi\", action='store_true', help=\"Build with NNAPI support.\")\n parser.add_argument(\n \"--use_rknpu\", action='store_true', help=\"Build with RKNPU.\")\n parser.add_argument(\n \"--use_preinstalled_eigen\", action='store_true',\n help=\"Use pre-installed Eigen.\")\n parser.add_argument(\"--eigen_path\", help=\"Path to pre-installed Eigen.\")\n parser.add_argument(\n \"--use_openmp\", action='store_true', help=\"Build with OpenMP\")\n parser.add_argument(\n \"--enable_msinternal\", action=\"store_true\",\n help=\"Enable for Microsoft internal builds only.\")\n parser.add_argument(\"--llvm_path\", help=\"Path to llvm dir\")\n parser.add_argument(\n \"--use_vitisai\", action='store_true', help=\"Build with Vitis-AI\")\n parser.add_argument(\n \"--use_nuphar\", action='store_true', help=\"Build with nuphar\")\n parser.add_argument(\n \"--use_tensorrt\", action='store_true', help=\"Build with TensorRT\")\n parser.add_argument(\n \"--tensorrt_home\", help=\"Path to TensorRT installation dir\")\n parser.add_argument(\n \"--use_migraphx\", action='store_true', help=\"Build with MIGraphX\")\n parser.add_argument(\n \"--migraphx_home\", help=\"Path to MIGraphX installation dir\")\n parser.add_argument(\n \"--use_full_protobuf\", action='store_true',\n help=\"Use the full protobuf library\")\n\n parser.add_argument(\n \"--skip_onnx_tests\", action='store_true', help=\"Explicitly disable \"\n \"all onnx related tests. Note: Use --skip_tests to skip all tests.\")\n parser.add_argument(\n \"--skip_winml_tests\", action='store_true',\n help=\"Explicitly disable all WinML related tests\")\n parser.add_argument(\n \"--skip_nodejs_tests\", action='store_true',\n help=\"Explicitly disable all Node.js binding tests\")\n parser.add_argument(\n \"--enable_msvc_static_runtime\", action='store_true',\n help=\"Enable static linking of MSVC runtimes.\")\n parser.add_argument(\n \"--enable_language_interop_ops\", action='store_true',\n help=\"Enable operator implemented in language other than cpp\")\n parser.add_argument(\n \"--cmake_generator\",\n choices=['Visual Studio 15 2017', 'Visual Studio 16 2019', 'Ninja'],\n default='Visual Studio 15 2017' if is_windows() else None,\n help=\"Specify the generator that CMake invokes. \"\n \"This is only supported on Windows\")\n parser.add_argument(\n \"--enable_multi_device_test\", action='store_true',\n help=\"Test with multi-device. Mostly used for multi-device GPU\")\n parser.add_argument(\n \"--use_dml\", action='store_true', help=\"Build with DirectML.\")\n parser.add_argument(\n \"--use_winml\", action='store_true', help=\"Build with WinML.\")\n parser.add_argument(\n \"--winml_root_namespace_override\", type=str,\n help=\"Specify the namespace that WinML builds into.\")\n parser.add_argument(\n \"--use_telemetry\", action='store_true',\n help=\"Only official builds can set this flag to enable telemetry.\")\n parser.add_argument(\n \"--enable_wcos\", action='store_true',\n help=\"Build for Windows Core OS.\")\n parser.add_argument(\n \"--enable_windows_store\", action='store_true',\n help=\"Build for Windows Store\")\n parser.add_argument(\n \"--enable_lto\", action='store_true',\n help=\"Enable Link Time Optimization\")\n parser.add_argument(\n \"--use_acl\", nargs=\"?\", const=\"ACL_1905\",\n choices=[\"ACL_1902\", \"ACL_1905\", \"ACL_1908\", \"ACL_2002\"],\n help=\"Build with ACL for ARM architectures.\")\n parser.add_argument(\n \"--use_armnn\", action='store_true',\n help=\"Enable ArmNN Execution Provider.\")\n parser.add_argument(\n \"--armnn_relu\", action='store_true',\n help=\"Use the Relu operator implementation from the ArmNN EP.\")\n parser.add_argument(\n \"--armnn_bn\", action='store_true',\n help=\"Use the Batch Normalization operator implementation from the ArmNN EP.\")\n parser.add_argument(\n \"--build_micro_benchmarks\", action='store_true',\n help=\"Build ONNXRuntime micro-benchmarks.\")\n\n # options to reduce binary size\n parser.add_argument(\"--minimal_build\", action='store_true',\n help=\"Create a build that only supports ORT format models. \"\n \"See /docs/ONNX_Runtime_Format_Model_Usage.md for more information. \"\n \"RTTI is automatically disabled in a minimal build.\")\n parser.add_argument(\"--include_ops_by_model\", type=str, help=\"include ops from model(s) under designated path.\")\n parser.add_argument(\"--include_ops_by_config\", type=str,\n help=\"include ops from config file. \"\n \"See /docs/Reduced_Operator_Kernel_build.md for more information.\")\n\n parser.add_argument(\"--disable_contrib_ops\", action='store_true',\n help=\"Disable contrib ops (reduces binary size)\")\n parser.add_argument(\"--disable_ml_ops\", action='store_true',\n help=\"Disable traditional ML ops (reduces binary size)\")\n parser.add_argument(\"--disable_rtti\", action='store_true', help=\"Disable RTTI (reduces binary size)\")\n parser.add_argument(\"--disable_exceptions\", action='store_true',\n help=\"Disable exceptions to reduce binary size. Requires --minimal_build.\")\n parser.add_argument(\"--disable_ort_format_load\", action='store_true',\n help='Disable support for loading ORT format models in a non-minimal build.')\n\n return parser.parse_args()\n\n\ndef resolve_executable_path(command_or_path):\n \"\"\"Returns the absolute path of an executable.\"\"\"\n executable_path = shutil.which(command_or_path)\n if executable_path is None:\n raise BuildError(\"Failed to resolve executable path for \"\n \"'{}'.\".format(command_or_path))\n return os.path.realpath(executable_path)\n\n\ndef is_windows():\n return sys.platform.startswith(\"win\")\n\n\ndef is_macOS():\n return sys.platform.startswith(\"darwin\")\n\n\ndef is_linux():\n return sys.platform.startswith(\"linux\")\n\n\ndef get_linux_distro():\n try:\n with open('/etc/os-release', 'r') as f:\n dist_info = dict(\n line.strip().split('=', 1) for line in f.readlines())\n return dist_info.get('NAME', '').strip('\"'), dist_info.get(\n 'VERSION', '').strip('\"')\n except (IOError, ValueError):\n return '', ''\n\n\ndef is_ubuntu_1604():\n dist, ver = get_linux_distro()\n return dist == 'Ubuntu' and ver.startswith('16.04')\n\n\ndef get_config_build_dir(build_dir, config):\n # build directory per configuration\n return os.path.join(build_dir, config)\n\n\ndef run_subprocess(args, cwd=None, capture=False, dll_path=None,\n shell=False, env={}):\n log.info(\"Running subprocess in '{0}'\\n{1}\".format(\n cwd or os.getcwd(), args))\n my_env = os.environ.copy()\n if dll_path:\n if is_windows():\n my_env[\"PATH\"] = dll_path + os.pathsep + my_env[\"PATH\"]\n else:\n if \"LD_LIBRARY_PATH\" in my_env:\n my_env[\"LD_LIBRARY_PATH\"] += os.pathsep + dll_path\n else:\n my_env[\"LD_LIBRARY_PATH\"] = dll_path\n\n stdout, stderr = (subprocess.PIPE, subprocess.STDOUT) if capture else (\n None, None)\n my_env.update(env)\n completed_process = subprocess.run(\n args, cwd=cwd, check=True, stdout=stdout, stderr=stderr,\n env=my_env, shell=shell)\n log.debug(\"Subprocess completed. Return code=\" +\n str(completed_process.returncode))\n return completed_process\n\n\ndef update_submodules(source_dir):\n run_subprocess([\"git\", \"submodule\", \"sync\", \"--recursive\"], cwd=source_dir)\n run_subprocess([\"git\", \"submodule\", \"update\", \"--init\", \"--recursive\"],\n cwd=source_dir)\n\n\ndef is_docker():\n path = '/proc/self/cgroup'\n return (\n os.path.exists('/.dockerenv') or\n os.path.isfile(path) and any('docker' in line for line in open(path))\n )\n\n\ndef is_sudo():\n return 'SUDO_UID' in os.environ.keys()\n\n\ndef install_apt_package(package):\n have = package in str(run_subprocess(\n [\"apt\", \"list\", \"--installed\", package], capture=True).stdout)\n if not have:\n if is_sudo():\n run_subprocess(['apt-get', 'install', '-y', package])\n else:\n raise BuildError(package + \" APT package missing. Please re-run \"\n \"this script using sudo to install.\")\n\n\ndef install_ubuntu_deps(args):\n \"\"\"Check if the necessary Ubuntu dependencies are installed.\n Not required on docker. Provide help output if missing.\"\"\"\n\n # check we need the packages first\n if not (args.enable_pybind or args.use_openblas):\n return\n\n # not needed on docker as packages are pre-installed\n if not is_docker():\n try:\n if args.enable_pybind:\n install_apt_package(\"python3\")\n\n if args.use_openblas:\n install_apt_package(\"libopenblas-dev\")\n\n except Exception as e:\n raise BuildError(\"Error setting up required APT packages. \"\n \"{}\".format(str(e)))\n\n\ndef install_python_deps(numpy_version=\"\"):\n dep_packages = ['setuptools', 'wheel', 'pytest']\n dep_packages.append('numpy=={}'.format(numpy_version) if numpy_version\n else 'numpy>=1.16.6')\n dep_packages.append('sympy>=1.1')\n dep_packages.append('packaging')\n dep_packages.append('cerberus')\n run_subprocess([sys.executable, '-m', 'pip', 'install', '--trusted-host',\n 'files.pythonhosted.org'] + dep_packages)\n\n\n# We need to install Torch to test certain functionalities of the ORT Python package\ndef install_torch():\n # Command works for both Windows\n run_subprocess([sys.executable, '-m', 'pip', 'install', '--trusted-host',\n 'files.pythonhosted.org', 'torch===1.5.1+cu101', 'torchvision===0.6.1+cu101',\n '-f', 'https://download.pytorch.org/whl/torch_stable.html'])\n\n\ndef check_md5(filename, expected_md5):\n if not os.path.exists(filename):\n return False\n hash_md5 = hashlib.md5()\n BLOCKSIZE = 1024*64\n with open(filename, \"rb\") as f:\n buf = f.read(BLOCKSIZE)\n while len(buf) > 0:\n hash_md5.update(buf)\n buf = f.read(BLOCKSIZE)\n hex = hash_md5.hexdigest()\n if hex != expected_md5:\n log.info('md5 mismatch, expect %s, got %s' % (expected_md5, hex))\n os.remove(filename)\n return False\n return True\n\n\ndef setup_test_data(build_dir, configs):\n # create a shortcut for test models if there is a 'models'\n # folder in build_dir\n if is_windows():\n src_model_dir = os.path.join(build_dir, 'models')\n if os.path.exists('C:\\\\local\\\\models') and not os.path.exists(\n src_model_dir):\n log.debug(\"creating shortcut %s -> %s\" % (\n 'C:\\\\local\\\\models', src_model_dir))\n run_subprocess(['mklink', '/D', '/J', src_model_dir,\n 'C:\\\\local\\\\models'], shell=True)\n for config in configs:\n config_build_dir = get_config_build_dir(build_dir, config)\n os.makedirs(config_build_dir, exist_ok=True)\n dest_model_dir = os.path.join(config_build_dir, 'models')\n if os.path.exists('C:\\\\local\\\\models') and not os.path.exists(\n dest_model_dir):\n log.debug(\"creating shortcut %s -> %s\" % (\n 'C:\\\\local\\\\models', dest_model_dir))\n run_subprocess(['mklink', '/D', '/J', dest_model_dir,\n 'C:\\\\local\\\\models'], shell=True)\n elif os.path.exists(src_model_dir) and not os.path.exists(\n dest_model_dir):\n log.debug(\"creating shortcut %s -> %s\" % (\n src_model_dir, dest_model_dir))\n run_subprocess(['mklink', '/D', '/J', dest_model_dir,\n src_model_dir], shell=True)\n\n\ndef use_dev_mode(args):\n if args.use_acl:\n return 'OFF'\n if args.use_armnn:\n return 'OFF'\n if args.ios and is_macOS():\n return 'OFF'\n return 'ON'\n\n\ndef generate_build_tree(cmake_path, source_dir, build_dir, cuda_home, cudnn_home,\n mpi_home, nccl_home, tensorrt_home, migraphx_home,\n path_to_protoc_exe, configs, cmake_extra_defines, args, cmake_extra_args):\n log.info(\"Generating CMake build tree\")\n cmake_dir = os.path.join(source_dir, \"cmake\")\n # TODO: fix jemalloc build so it does not conflict with onnxruntime\n # shared lib builds. (e.g. onnxuntime_pybind)\n # for now, disable jemalloc if pybind is also enabled.\n cmake_args = [\n cmake_path, cmake_dir,\n \"-Donnxruntime_RUN_ONNX_TESTS=\" + (\n \"ON\" if args.enable_onnx_tests else \"OFF\"),\n \"-Donnxruntime_BUILD_WINML_TESTS=\" + (\n \"OFF\" if args.skip_winml_tests else \"ON\"),\n \"-Donnxruntime_GENERATE_TEST_REPORTS=ON\",\n \"-Donnxruntime_DEV_MODE=\" + use_dev_mode(args),\n \"-DPYTHON_EXECUTABLE=\" + sys.executable,\n \"-Donnxruntime_USE_CUDA=\" + (\"ON\" if args.use_cuda else \"OFF\"),\n \"-Donnxruntime_CUDNN_HOME=\" + (cudnn_home if args.use_cuda else \"\"),\n \"-Donnxruntime_USE_FEATURIZERS=\" + (\n \"ON\" if args.use_featurizers else \"OFF\"),\n \"-Donnxruntime_CUDA_HOME=\" + (cuda_home if args.use_cuda else \"\"),\n \"-Donnxruntime_USE_JEMALLOC=\" + (\"ON\" if args.use_jemalloc else \"OFF\"),\n \"-Donnxruntime_USE_MIMALLOC_STL_ALLOCATOR=\" + (\n \"ON\" if args.use_mimalloc == \"stl\" or\n args.use_mimalloc == \"all\" else \"OFF\"),\n \"-Donnxruntime_USE_MIMALLOC_ARENA_ALLOCATOR=\" + (\n \"ON\" if args.use_mimalloc == \"arena\" or\n args.use_mimalloc == \"all\" else \"OFF\"),\n \"-Donnxruntime_ENABLE_PYTHON=\" + (\n \"ON\" if args.enable_pybind else \"OFF\"),\n \"-Donnxruntime_BUILD_CSHARP=\" + (\"ON\" if args.build_csharp else \"OFF\"),\n \"-Donnxruntime_BUILD_JAVA=\" + (\"ON\" if args.build_java else \"OFF\"),\n \"-Donnxruntime_BUILD_NODEJS=\" + (\"ON\" if args.build_nodejs else \"OFF\"),\n \"-Donnxruntime_BUILD_SHARED_LIB=\" + (\n \"ON\" if args.build_shared_lib else \"OFF\"),\n \"-Donnxruntime_USE_EIGEN_FOR_BLAS=\" + (\n \"OFF\" if args.use_openblas else \"ON\"),\n \"-Donnxruntime_USE_OPENBLAS=\" + (\"ON\" if args.use_openblas else \"OFF\"),\n \"-Donnxruntime_USE_DNNL=\" + (\"ON\" if args.use_dnnl else \"OFF\"),\n \"-Donnxruntime_USE_MKLML=\" + (\"ON\" if args.use_mklml else \"OFF\"),\n \"-Donnxruntime_USE_NGRAPH=\" + (\"ON\" if args.use_ngraph else \"OFF\"),\n \"-Donnxruntime_USE_NNAPI_BUILTIN=\" + (\"ON\" if args.use_nnapi else \"OFF\"),\n \"-Donnxruntime_USE_RKNPU=\" + (\"ON\" if args.use_rknpu else \"OFF\"),\n \"-Donnxruntime_USE_OPENMP=\" + (\n \"ON\" if args.use_openmp and not (\n args.use_nnapi or (args.use_mklml and (is_macOS() or is_windows())) or args.use_ngraph or\n args.android or (args.ios and is_macOS())\n or args.use_rknpu)\n else \"OFF\"),\n \"-Donnxruntime_USE_TVM=\" + (\"ON\" if args.use_nuphar else \"OFF\"),\n \"-Donnxruntime_USE_LLVM=\" + (\"ON\" if args.use_nuphar else \"OFF\"),\n \"-Donnxruntime_ENABLE_MICROSOFT_INTERNAL=\" + (\n \"ON\" if args.enable_msinternal else \"OFF\"),\n \"-Donnxruntime_USE_VITISAI=\" + (\"ON\" if args.use_vitisai else \"OFF\"),\n \"-Donnxruntime_USE_NUPHAR=\" + (\"ON\" if args.use_nuphar else \"OFF\"),\n \"-Donnxruntime_USE_TENSORRT=\" + (\"ON\" if args.use_tensorrt else \"OFF\"),\n \"-Donnxruntime_TENSORRT_HOME=\" + (\n tensorrt_home if args.use_tensorrt else \"\"),\n # set vars for migraphx\n \"-Donnxruntime_USE_MIGRAPHX=\" + (\"ON\" if args.use_migraphx else \"OFF\"),\n \"-Donnxruntime_MIGRAPHX_HOME=\" + (migraphx_home if args.use_migraphx else \"\"),\n # By default - we currently support only cross compiling for\n # ARM/ARM64 (no native compilation supported through this\n # script).\n \"-Donnxruntime_CROSS_COMPILING=\" + (\n \"ON\" if args.arm64 or args.arm else \"OFF\"),\n \"-Donnxruntime_DISABLE_CONTRIB_OPS=\" + (\"ON\" if args.disable_contrib_ops else \"OFF\"),\n \"-Donnxruntime_DISABLE_ML_OPS=\" + (\"ON\" if args.disable_ml_ops else \"OFF\"),\n \"-Donnxruntime_DISABLE_RTTI=\" + (\"ON\" if args.disable_rtti else \"OFF\"),\n \"-Donnxruntime_DISABLE_EXCEPTIONS=\" + (\"ON\" if args.disable_exceptions else \"OFF\"),\n \"-Donnxruntime_DISABLE_ORT_FORMAT_LOAD=\" + (\"ON\" if args.disable_ort_format_load else \"OFF\"),\n \"-Donnxruntime_MINIMAL_BUILD=\" + (\"ON\" if args.minimal_build else \"OFF\"),\n \"-Donnxruntime_REDUCED_OPS_BUILD=\" + (\n \"ON\" if args.include_ops_by_config or args.include_ops_by_model else \"OFF\"),\n \"-Donnxruntime_MSVC_STATIC_RUNTIME=\" + (\n \"ON\" if args.enable_msvc_static_runtime else \"OFF\"),\n # enable pyop if it is nightly build\n \"-Donnxruntime_ENABLE_LANGUAGE_INTEROP_OPS=\" + (\n \"ON\" if args.enable_language_interop_ops else \"OFF\"),\n \"-Donnxruntime_USE_DML=\" + (\"ON\" if args.use_dml else \"OFF\"),\n \"-Donnxruntime_USE_WINML=\" + (\"ON\" if args.use_winml else \"OFF\"),\n \"-Donnxruntime_USE_TELEMETRY=\" + (\n \"ON\" if args.use_telemetry else \"OFF\"),\n \"-Donnxruntime_ENABLE_LTO=\" + (\"ON\" if args.enable_lto else \"OFF\"),\n \"-Donnxruntime_USE_ACL=\" + (\"ON\" if args.use_acl else \"OFF\"),\n \"-Donnxruntime_USE_ACL_1902=\" + (\n \"ON\" if args.use_acl == \"ACL_1902\" else \"OFF\"),\n \"-Donnxruntime_USE_ACL_1905=\" + (\n \"ON\" if args.use_acl == \"ACL_1905\" else \"OFF\"),\n \"-Donnxruntime_USE_ACL_1908=\" + (\n \"ON\" if args.use_acl == \"ACL_1908\" else \"OFF\"),\n \"-Donnxruntime_USE_ACL_2002=\" + (\n \"ON\" if args.use_acl == \"ACL_2002\" else \"OFF\"),\n \"-Donnxruntime_USE_ARMNN=\" + (\n \"ON\" if args.use_armnn else \"OFF\"),\n \"-Donnxruntime_ARMNN_RELU_USE_CPU=\" + (\n \"OFF\" if args.armnn_relu else \"ON\"),\n \"-Donnxruntime_ARMNN_BN_USE_CPU=\" + (\n \"OFF\" if args.armnn_bn else \"ON\"),\n # Training related flags\n \"-Donnxruntime_ENABLE_NVTX_PROFILE=\" + (\n \"ON\" if args.enable_nvtx_profile else \"OFF\"),\n \"-Donnxruntime_ENABLE_TRAINING=\" + (\n \"ON\" if args.enable_training else \"OFF\"),\n \"-Donnxruntime_USE_HOROVOD=\" + (\n \"ON\" if args.use_horovod else \"OFF\"),\n \"-Donnxruntime_BUILD_BENCHMARKS=\" + (\n \"ON\" if args.build_micro_benchmarks else \"OFF\")\n ]\n\n if mpi_home and os.path.exists(mpi_home):\n cmake_args += [\"-Donnxruntime_MPI_HOME=\" + mpi_home]\n\n if nccl_home and os.path.exists(nccl_home):\n cmake_args += [\"-Donnxruntime_NCCL_HOME=\" + nccl_home]\n\n if args.winml_root_namespace_override:\n cmake_args += [\"-Donnxruntime_WINML_NAMESPACE_OVERRIDE=\" +\n args.winml_root_namespace_override]\n if args.use_openvino:\n cmake_args += [\"-Donnxruntime_USE_OPENVINO=ON\",\n \"-Donnxruntime_USE_OPENVINO_MYRIAD=\" + (\n \"ON\" if args.use_openvino == \"MYRIAD_FP16\" else \"OFF\"),\n \"-Donnxruntime_USE_OPENVINO_GPU_FP32=\" + (\n \"ON\" if args.use_openvino == \"GPU_FP32\" else \"OFF\"),\n \"-Donnxruntime_USE_OPENVINO_GPU_FP16=\" + (\n \"ON\" if args.use_openvino == \"GPU_FP16\" else \"OFF\"),\n \"-Donnxruntime_USE_OPENVINO_CPU_FP32=\" + (\n \"ON\" if args.use_openvino == \"CPU_FP32\" else \"OFF\"),\n \"-Donnxruntime_USE_OPENVINO_VAD_M=\" + (\n \"ON\" if args.use_openvino == \"VAD-M_FP16\" else \"OFF\"),\n \"-Donnxruntime_USE_OPENVINO_VAD_F=\" + (\n \"ON\" if args.use_openvino == \"VAD-F_FP32\" else \"OFF\"),\n \"-Donnxruntime_USE_OPENVINO_BINARY=\" + (\n \"ON\" if args.use_openvino else \"OFF\")]\n # temp turn on only for linux gpu build\n if not is_windows():\n if args.use_cuda:\n cmake_args += [\n \"-Donnxruntime_USE_FULL_PROTOBUF=ON\"]\n\n # nGraph, TensorRT and OpenVINO providers currently only supports\n # full_protobuf option.\n if (args.use_full_protobuf or args.use_ngraph or args.use_tensorrt or\n args.use_openvino or args.use_vitisai or args.gen_doc):\n cmake_args += [\n \"-Donnxruntime_USE_FULL_PROTOBUF=ON\",\n \"-DProtobuf_USE_STATIC_LIBS=ON\"\n ]\n\n if args.use_nuphar and args.llvm_path is not None:\n cmake_args += [\"-DLLVM_DIR=%s\" % args.llvm_path]\n\n if args.use_cuda and not is_windows():\n nvml_stub_path = cuda_home + \"/lib64/stubs\"\n cmake_args += [\"-DCUDA_CUDA_LIBRARY=\" + nvml_stub_path]\n\n if args.use_preinstalled_eigen:\n cmake_args += [\"-Donnxruntime_USE_PREINSTALLED_EIGEN=ON\",\n \"-Deigen_SOURCE_PATH=\" + args.eigen_path]\n\n if args.android:\n cmake_args += [\n \"-DCMAKE_TOOLCHAIN_FILE=\" + args.android_ndk_path +\n \"/build/cmake/android.toolchain.cmake\",\n \"-DANDROID_PLATFORM=android-\" + str(args.android_api),\n \"-DANDROID_ABI=\" + str(args.android_abi)\n ]\n\n if args.android_cpp_shared:\n cmake_args += [\"-DANDROID_STL=c++_shared\"]\n\n if args.ios:\n if is_macOS():\n needed_args = [\n args.use_xcode,\n args.ios_sysroot,\n args.apple_deploy_target,\n ]\n arg_names = [\n \"--use_xcode \" +\n \"<need use xcode to cross build iOS on MacOS>\",\n \"--ios_sysroot \" +\n \"<the location or name of the macOS platform SDK>\",\n \"--apple_deploy_target \" +\n \"<the minimum version of the target platform>\",\n ]\n if not all(needed_args):\n raise BuildError(\n \"iOS build on MacOS canceled due to missing arguments: \" +\n ', '.join(\n val for val, cond in zip(arg_names, needed_args)\n if not cond))\n cmake_args += [\n \"-DCMAKE_SYSTEM_NAME=iOS\",\n \"-Donnxruntime_BUILD_SHARED_LIB=ON\",\n \"-DCMAKE_OSX_SYSROOT=\" + args.ios_sysroot,\n \"-DCMAKE_OSX_ARCHITECTURES=\" + args.osx_arch,\n \"-DCMAKE_OSX_DEPLOYMENT_TARGET=\" + args.apple_deploy_target,\n # we do not need protoc binary for ios cross build\n \"-Dprotobuf_BUILD_PROTOC_BINARIES=OFF\",\n \"-DCMAKE_TOOLCHAIN_FILE=\" + (\n args.ios_toolchain_file if args.ios_toolchain_file\n else \"../cmake/onnxruntime_ios.toolchain.cmake\")\n ]\n # Code sign the binaries, if the code signing development team id is provided\n if args.xcode_code_signing_team_id:\n cmake_args += [\"-DCMAKE_XCODE_ATTRIBUTE_DEVELOPMENT_TEAM=\" + args.xcode_code_signing_team_id]\n else:\n # TODO: the cross compiling on Linux is not officially supported by Apple\n # and is already broken with the latest codebase, so it should be removed.\n # We are cross compiling on Linux\n needed_args = [\n args.ios_sysroot,\n args.arm64 or args.arm,\n args.ios_toolchain_dir\n ]\n arg_names = [\n \"--ios_sysroot <path to sysroot>\",\n \"--arm or --arm64\",\n \"--ios_toolchain_dir <path to toolchain>\"\n ]\n if not all(needed_args):\n raise BuildError(\n \"iOS build canceled due to missing arguments: \" +\n ', '.join(\n val for val, cond in zip(arg_names, needed_args)\n if not cond))\n compilers = sorted(\n glob.glob(args.ios_toolchain_dir + \"/bin/*-clang*\"))\n os.environ[\"PATH\"] = os.path.join(\n args.ios_toolchain_dir, \"bin\") + os.pathsep + os.environ.get(\n \"PATH\", \"\")\n os.environ[\"LD_LIBRARY_PATH\"] = os.path.join(\n args.ios_toolchain_dir, \"/lib\") + os.pathsep + os.environ.get(\n \"LD_LIBRARY_PATH\", \"\")\n if len(compilers) != 2:\n raise BuildError(\n \"error identifying compilers in ios_toolchain_dir\")\n cmake_args += [\n \"-DCMAKE_OSX_ARCHITECTURES=\" +\n (\"arm64\" if args.arm64 else \"arm\"),\n \"-DCMAKE_SYSTEM_NAME=iOSCross\",\n \"-Donnxruntime_BUILD_UNIT_TESTS=OFF\",\n \"-DCMAKE_OSX_SYSROOT=\" + args.ios_sysroot,\n \"-DCMAKE_C_COMPILER=\" + compilers[0],\n \"-DCMAKE_CXX_COMPILER=\" + compilers[1]\n ]\n\n if path_to_protoc_exe:\n cmake_args += [\n \"-DONNX_CUSTOM_PROTOC_EXECUTABLE=%s\" % path_to_protoc_exe]\n\n if args.fuzz_testing:\n if not (args.build_shared_lib and\n is_windows() and\n args.cmake_generator == 'Visual Studio 16 2019' and\n args.use_full_protobuf):\n raise BuildError(\n \"Fuzz test has only be tested with build shared libs option using MSVC on windows\")\n cmake_args += [\n \"-Donnxruntime_BUILD_UNIT_TESTS=ON\",\n \"-Donnxruntime_FUZZ_TEST=ON\",\n \"-Donnxruntime_USE_FULL_PROTOBUF=ON\"]\n\n if args.gen_doc:\n cmake_args += [\"-Donnxruntime_PYBIND_EXPORT_OPSCHEMA=ON\"]\n else:\n cmake_args += [\"-Donnxruntime_PYBIND_EXPORT_OPSCHEMA=OFF\"]\n\n cmake_args += [\"-D{}\".format(define) for define in cmake_extra_defines]\n\n cmake_args += cmake_extra_args\n\n # ADO pipelines will store the pipeline build number\n # (e.g. 191101-2300.1.master) and source version in environment\n # variables. If present, use these values to define the\n # WinML/ORT DLL versions.\n build_number = os.getenv('Build_BuildNumber')\n source_version = os.getenv('Build_SourceVersion')\n if build_number and source_version:\n build_matches = re.fullmatch(\n r\"(\\d\\d)(\\d\\d)(\\d\\d)(\\d\\d)\\.(\\d+)\", build_number)\n if build_matches:\n YY = build_matches.group(2)\n MM = build_matches.group(3)\n DD = build_matches.group(4)\n\n # Get ORT major and minor number\n with open(os.path.join(source_dir, 'VERSION_NUMBER')) as f:\n first_line = f.readline()\n ort_version_matches = re.match(r\"(\\d+).(\\d+)\", first_line)\n if not ort_version_matches:\n raise BuildError(\"Couldn't read version from VERSION_FILE\")\n ort_major = ort_version_matches.group(1)\n ort_minor = ort_version_matches.group(2)\n # Example (BuildNumber: 191101-2300.1.master,\n # SourceVersion: 0bce7ae6755c792eda558e5d27ded701707dc404)\n # MajorPart = 1\n # MinorPart = 0\n # BuildPart = 1911\n # PrivatePart = 123\n # String = 191101-2300.1.master.0bce7ae\n cmake_args += [\n \"-DVERSION_MAJOR_PART={}\".format(ort_major),\n \"-DVERSION_MINOR_PART={}\".format(ort_minor),\n \"-DVERSION_BUILD_PART={}\".format(YY),\n \"-DVERSION_PRIVATE_PART={}{}\".format(MM, DD),\n \"-DVERSION_STRING={}.{}.{}.{}\".format(\n ort_major, ort_minor, build_number,\n source_version[0:7])\n ]\n\n for config in configs:\n config_build_dir = get_config_build_dir(build_dir, config)\n os.makedirs(config_build_dir, exist_ok=True)\n if args.use_nuphar:\n os.environ[\"PATH\"] = os.path.join(\n config_build_dir, \"external\", \"tvm\",\n config) + os.pathsep + os.path.dirname(sys.executable) + os.pathsep + os.environ[\"PATH\"]\n\n run_subprocess(\n cmake_args + [\n \"-Donnxruntime_ENABLE_MEMLEAK_CHECKER=\" +\n (\"ON\" if config.lower() == 'debug' and not args.use_nuphar and not\n args.use_ngraph and not args.use_openvino and not\n args.enable_msvc_static_runtime\n else \"OFF\"), \"-DCMAKE_BUILD_TYPE={}\".format(config)],\n cwd=config_build_dir)\n\n\ndef clean_targets(cmake_path, build_dir, configs):\n for config in configs:\n log.info(\"Cleaning targets for %s configuration\", config)\n build_dir2 = get_config_build_dir(build_dir, config)\n cmd_args = [cmake_path,\n \"--build\", build_dir2,\n \"--config\", config,\n \"--target\", \"clean\"]\n\n run_subprocess(cmd_args)\n\n\ndef build_targets(args, cmake_path, build_dir, configs, parallel, target=None):\n for config in configs:\n log.info(\"Building targets for %s configuration\", config)\n build_dir2 = get_config_build_dir(build_dir, config)\n cmd_args = [cmake_path,\n \"--build\", build_dir2,\n \"--config\", config]\n if target:\n cmd_args.extend(['--target', target])\n\n build_tool_args = []\n if parallel:\n num_cores = str(multiprocessing.cpu_count())\n if is_windows() and args.cmake_generator != 'Ninja':\n build_tool_args += [\n \"/maxcpucount:\" + num_cores,\n # if nodeReuse is true, msbuild processes will stay around for a bit after the build completes\n \"/nodeReuse:False\",\n ]\n elif (is_macOS() and args.use_xcode):\n # CMake will generate correct build tool args for Xcode\n cmd_args += [\"--parallel\", num_cores]\n elif args.cmake_generator != 'Ninja':\n build_tool_args += [\"-j\" + num_cores]\n\n if build_tool_args:\n cmd_args += [\"--\"]\n cmd_args += build_tool_args\n\n env = {}\n if args.android:\n env['ANDROID_SDK_ROOT'] = args.android_sdk_path\n\n run_subprocess(cmd_args, env=env)\n\n\ndef add_dir_if_exists(directory, dir_list):\n if os.path.isdir(directory):\n dir_list.append(directory)\n\n\ndef setup_cuda_vars(args):\n cuda_home = \"\"\n cudnn_home = \"\"\n\n if args.use_cuda:\n cuda_home = args.cuda_home if args.cuda_home else os.getenv(\n \"CUDA_HOME\")\n cudnn_home = args.cudnn_home if args.cudnn_home else os.getenv(\n \"CUDNN_HOME\")\n\n cuda_home_valid = (cuda_home is not None and os.path.exists(cuda_home))\n cudnn_home_valid = (cudnn_home is not None and os.path.exists(\n cudnn_home))\n\n if not cuda_home_valid or not cudnn_home_valid:\n raise BuildError(\n \"cuda_home and cudnn_home paths must be specified and valid.\",\n \"cuda_home='{}' valid={}. cudnn_home='{}' valid={}\"\n .format(\n cuda_home, cuda_home_valid, cudnn_home, cudnn_home_valid))\n\n return cuda_home, cudnn_home\n\n\ndef setup_tensorrt_vars(args):\n tensorrt_home = \"\"\n if args.use_tensorrt:\n tensorrt_home = (args.tensorrt_home if args.tensorrt_home\n else os.getenv(\"TENSORRT_HOME\"))\n tensorrt_home_valid = (tensorrt_home is not None and\n os.path.exists(tensorrt_home))\n if not tensorrt_home_valid:\n raise BuildError(\n \"tensorrt_home paths must be specified and valid.\",\n \"tensorrt_home='{}' valid={}.\"\n .format(tensorrt_home, tensorrt_home_valid))\n\n # Set maximum workspace size in byte for\n # TensorRT (1GB = 1073741824 bytes).\n os.environ[\"ORT_TENSORRT_MAX_WORKSPACE_SIZE\"] = \"1073741824\"\n\n # Set maximum number of iterations to detect unsupported nodes\n # and partition the models for TensorRT.\n os.environ[\"ORT_TENSORRT_MAX_PARTITION_ITERATIONS\"] = \"1000\"\n\n # Set minimum subgraph node size in graph partitioning\n # for TensorRT.\n os.environ[\"ORT_TENSORRT_MIN_SUBGRAPH_SIZE\"] = \"1\"\n\n # Set FP16 flag\n os.environ[\"ORT_TENSORRT_FP16_ENABLE\"] = \"0\"\n\n return tensorrt_home\n\n\ndef setup_migraphx_vars(args):\n\n migraphx_home = None\n\n if (args.use_migraphx):\n print(\"migraphx_home = {}\".format(args.migraphx_home))\n migraphx_home = args.migraphx_home or os.getenv(\"MIGRAPHX_HOME\") or None\n\n migraphx_home_not_valid = (migraphx_home and not os.path.exists(migraphx_home))\n\n if (migraphx_home_not_valid):\n raise BuildError(\"migraphx_home paths must be specified and valid.\",\n \"migraphx_home='{}' valid={}.\"\n .format(migraphx_home, migraphx_home_not_valid))\n return migraphx_home or ''\n\n\ndef setup_dml_build(args, cmake_path, build_dir, configs):\n if args.use_dml:\n for config in configs:\n # Run the RESTORE_PACKAGES target to perform the initial\n # NuGet setup.\n cmd_args = [cmake_path,\n \"--build\", get_config_build_dir(build_dir, config),\n \"--config\", config,\n \"--target\", \"RESTORE_PACKAGES\"]\n run_subprocess(cmd_args)\n\n\ndef adb_push(src, dest, **kwargs):\n return run_subprocess(['adb', 'push', src, dest], **kwargs)\n\n\ndef adb_shell(*args, **kwargs):\n return run_subprocess(['adb', 'shell', *args], **kwargs)\n\n\ndef run_android_tests(args, source_dir, config, cwd):\n if args.android_abi == 'x86_64':\n run_subprocess(os.path.join(\n source_dir, 'tools', 'ci_build', 'github', 'android',\n 'start_android_emulator.sh'))\n adb_push('testdata', '/data/local/tmp/', cwd=cwd)\n adb_push(\n os.path.join(source_dir, 'cmake', 'external', 'onnx', 'onnx', 'backend', 'test'),\n '/data/local/tmp/', cwd=cwd)\n adb_push('onnxruntime_test_all', '/data/local/tmp/', cwd=cwd)\n adb_push('onnx_test_runner', '/data/local/tmp/', cwd=cwd)\n adb_shell('cd /data/local/tmp && /data/local/tmp/onnxruntime_test_all')\n if args.use_nnapi:\n adb_shell('cd /data/local/tmp && /data/local/tmp/onnx_test_runner -e nnapi /data/local/tmp/test')\n else:\n adb_shell('cd /data/local/tmp && /data/local/tmp/onnx_test_runner /data/local/tmp/test')\n # run shared_lib_test if necessary\n if args.build_shared_lib:\n adb_push('libonnxruntime.so', '/data/local/tmp/', cwd=cwd)\n adb_push('onnxruntime_shared_lib_test', '/data/local/tmp/', cwd=cwd)\n adb_shell(\n 'cd /data/local/tmp && ' +\n 'export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/data/local/tmp && ' +\n '/data/local/tmp/onnxruntime_shared_lib_test')\n elif args.android_abi == 'arm64-v8a':\n # For Android arm64 abi we are only verify the size of the binary generated by minimal build config\n # Will fail the build if the shared_lib size is larger than the threshold\n if args.minimal_build and config == 'MinSizeRel' and args.build_shared_lib and args.test_binary_size:\n # set current size limit to 1100KB\n bin_size_threshold = 1100000\n bin_actual_size = os.path.getsize(os.path.join(cwd, 'libonnxruntime.so'))\n log.info('Android arm64 minsizerel libonnxruntime.so size [' + str(bin_actual_size) + 'B]')\n # Write the binary size to a file for uploading later\n with open(os.path.join(cwd, 'binary_size_data.txt'), 'w') as file:\n file.writelines([\n 'os,arch,build_config,size\\n',\n 'android,arm64-v8a,minimal-baseline,' + str(bin_actual_size) + '\\n'\n ])\n if bin_actual_size > bin_size_threshold:\n raise BuildError('Android arm64 minsizerel libonnxruntime.so size [' + str(bin_actual_size) +\n 'B] is bigger than threshold [' + str(bin_size_threshold) + 'B]')\n\n\ndef run_ios_tests(args, source_dir, config, cwd):\n cpr = run_subprocess([\"xcodebuild\", \"test\", \"-project\", \"./onnxruntime.xcodeproj\",\n \"-configuration\", config,\n \"-scheme\", \"onnxruntime_test_all_xc\", \"-destination\",\n \"platform=iOS Simulator,OS=latest,name=iPhone SE (2nd generation)\"], cwd=cwd)\n if cpr.returncode == 0:\n cpr = run_subprocess([\"xcodebuild\", \"test\", \"-project\", \"./onnxruntime.xcodeproj\",\n \"-configuration\", config,\n \"-scheme\", \"onnxruntime_shared_lib_test_xc\", \"-destination\",\n \"platform=iOS Simulator,OS=latest,name=iPhone SE (2nd generation)\"], cwd=cwd)\n cpr.check_returncode()\n\n\ndef run_orttraining_test_orttrainer_frontend_separately(cwd):\n class TestNameCollecterPlugin:\n def __init__(self):\n self.collected = set()\n\n def pytest_collection_modifyitems(self, items):\n for item in items:\n print('item.name: ', item.name)\n test_name = item.name\n start = test_name.find('[')\n if start > 0:\n test_name = test_name[:start]\n self.collected.add(test_name)\n\n import pytest\n\n plugin = TestNameCollecterPlugin()\n test_script_filename = os.path.join(cwd, \"orttraining_test_orttrainer_frontend.py\")\n pytest.main(['--collect-only', test_script_filename], plugins=[plugin])\n\n for test_name in plugin.collected:\n run_subprocess([\n sys.executable, '-m', 'pytest',\n 'orttraining_test_orttrainer_frontend.py', '-v', '-k', test_name], cwd=cwd)\n\n\ndef run_training_python_frontend_tests(cwd):\n run_subprocess([sys.executable, 'onnxruntime_test_ort_trainer.py'], cwd=cwd)\n run_subprocess([sys.executable, 'onnxruntime_test_training_unit_tests.py'], cwd=cwd)\n run_subprocess([\n sys.executable, 'orttraining_test_transformers.py',\n 'BertModelTest.test_for_pretraining_full_precision_list_input'], cwd=cwd)\n run_subprocess([\n sys.executable, 'orttraining_test_transformers.py',\n 'BertModelTest.test_for_pretraining_full_precision_dict_input'], cwd=cwd)\n run_subprocess([\n sys.executable, 'orttraining_test_transformers.py',\n 'BertModelTest.test_for_pretraining_full_precision_list_and_dict_input'], cwd=cwd)\n\n # TODO: use run_orttraining_test_orttrainer_frontend_separately to work around a sporadic segfault.\n # shall revert to run_subprocess call once the segfault issue is resolved.\n run_orttraining_test_orttrainer_frontend_separately(cwd)\n # run_subprocess([sys.executable, '-m', 'pytest', '-sv', 'orttraining_test_orttrainer_frontend.py'], cwd=cwd)\n\n run_subprocess([sys.executable, '-m', 'pytest', '-sv', 'orttraining_test_orttrainer_bert_toy_onnx.py'], cwd=cwd)\n\n\ndef run_training_python_frontend_e2e_tests(cwd):\n # frontend tests are to be added here:\n log.info(\"Running python frontend e2e tests.\")\n\n import torch\n ngpus = torch.cuda.device_count()\n if ngpus > 1:\n bert_pretrain_script = 'orttraining_run_bert_pretrain.py'\n log.debug('RUN: mpirun -n {} ''-x' 'NCCL_DEBUG=INFO'' {} {} {}'.format(\n ngpus, sys.executable, bert_pretrain_script, 'ORTBertPretrainTest.test_pretrain_throughput'))\n run_subprocess([\n 'mpirun', '-n', str(ngpus), '-x', 'NCCL_DEBUG=INFO', sys.executable,\n bert_pretrain_script, 'ORTBertPretrainTest.test_pretrain_throughput'], cwd=cwd)\n\n log.debug('RUN: mpirun -n {} ''-x' 'NCCL_DEBUG=INFO'' {} {} {}'.format(\n ngpus, sys.executable, bert_pretrain_script, 'ORTBertPretrainTest.test_pretrain_convergence'))\n run_subprocess([\n 'mpirun', '-n', str(ngpus), '-x', 'NCCL_DEBUG=INFO', sys.executable,\n bert_pretrain_script, 'ORTBertPretrainTest.test_pretrain_convergence'], cwd=cwd)\n\n # a long run\n log.debug('RUN: mpirun -n {} ''-x' 'NCCL_DEBUG=INFO'' {} {}'.format(\n ngpus, sys.executable, bert_pretrain_script))\n run_subprocess([\n 'mpirun', '-n', str(ngpus), '-x', 'NCCL_DEBUG=INFO', sys.executable,\n bert_pretrain_script], cwd=cwd)\n\n log.debug('RUN: mpirun -n {} {} orttraining_run_glue.py'.format(ngpus, sys.executable))\n run_subprocess([\n 'mpirun', '-n', str(ngpus), '-x', 'NCCL_DEBUG=INFO', sys.executable, 'orttraining_run_glue.py'], cwd=cwd)\n\n # with orttraining_run_glue.py.\n # 1. we like to force to use single GPU (with CUDA_VISIBLE_DEVICES)\n # for fine-tune tests.\n # 2. need to run test separately (not to mix between fp16\n # and full precision runs. this need to be investigated).\n run_subprocess(\n [sys.executable, 'orttraining_run_glue.py', 'ORTGlueTest.test_bert_with_mrpc', '-v'],\n cwd=cwd, env={'CUDA_VISIBLE_DEVICES': '0'})\n\n run_subprocess(\n [sys.executable, 'orttraining_run_glue.py', 'ORTGlueTest.test_bert_fp16_with_mrpc', '-v'],\n cwd=cwd, env={'CUDA_VISIBLE_DEVICES': '0'})\n\n run_subprocess(\n [sys.executable, 'orttraining_run_glue.py', 'ORTGlueTest.test_roberta_with_mrpc', '-v'],\n cwd=cwd, env={'CUDA_VISIBLE_DEVICES': '0'})\n\n run_subprocess(\n [sys.executable, 'orttraining_run_glue.py', 'ORTGlueTest.test_roberta_fp16_with_mrpc', '-v'],\n cwd=cwd, env={'CUDA_VISIBLE_DEVICES': '0'})\n\n run_subprocess(\n [sys.executable, 'orttraining_run_multiple_choice.py', 'ORTMultipleChoiceTest.test_bert_fp16_with_swag', '-v'],\n cwd=cwd, env={'CUDA_VISIBLE_DEVICES': '0'})\n\n run_subprocess([sys.executable, 'onnxruntime_test_ort_trainer_with_mixed_precision.py'], cwd=cwd)\n\n run_subprocess([\n sys.executable, 'orttraining_test_transformers.py',\n 'BertModelTest.test_for_pretraining_mixed_precision'], cwd=cwd)\n\n # this test is not stable. need to skip to unblock release\n # run_subprocess([\n # sys.executable, 'orttraining_test_transformers.py',\n # 'BertModelTest.test_for_pretraining_mixed_precision_with_gradient_accumulation'], cwd=cwd)\n\n\ndef run_training_pipeline_e2e_tests(cwd):\n # pipeline tests are to be added here:\n log.info(\"Running pipeline e2e tests.\")\n\n import torch\n ngpus = torch.cuda.device_count()\n\n command = ['./onnxruntime_training_bert',\n '--ort_log_severity', '1',\n '--optimizer=Lamb',\n '--learning_rate=3e-3',\n '--max_seq_length=128',\n '--max_predictions_per_seq=20',\n '--warmup_ratio=0.2843',\n '--warmup_mode=Poly',\n '--model_name', '/bert_ort/bert_models/nv/bert-large/' +\n 'bert-large-uncased_L_24_H_1024_A_16_V_30528_S_512_Dp_0.1_optimized_layer_norm_opset12',\n '--train_data_dir', '/bert_data/128/books_wiki_en_corpus/train',\n '--test_data_dir', '/bert_data/128/books_wiki_en_corpus/test',\n '--display_loss_steps', '1',\n '--use_nccl',\n '--use_mixed_precision',\n '--allreduce_in_fp16',\n '--gradient_accumulation_steps', '48',\n '--num_train_steps', '96',\n '--train_batch_size', '50']\n\n # TODO: currently the CI machine only has 4 GPUs for parallel tests.\n # Fill in more pipeline partition options when the machine has different GPUs counts.\n if ngpus != 4:\n return\n\n # Test 4-way pipeline parallel\n pp_command = ['mpirun', '-n', str(ngpus)] + command + ['--pipeline_parallel_size', '4', '--cut_group_info',\n '1149:407-1219/1341/1463/1585/1707/1829,' +\n '1881:407-1951/2073/2195/2317/2439/2561,' +\n '2613:407-2683/2805/2927/3049/3171/3293']\n command_str = ', '.join(pp_command)\n log.debug('RUN: ' + command_str)\n run_subprocess(pp_command, cwd=cwd)\n\n # Test 2-way data parallel + 2-way pipeline parallel\n pp_dp_command = ['mpirun', '-n', str(ngpus)]\n pp_dp_command = pp_dp_command + command\n pp_dp_command = pp_dp_command + ['--data_parallel_size', '2', '--pipeline_parallel_size',\n '2', '--cut_group_info',\n '1881:407-1951/2073/2195/2317/2439/2561/2683/2805/2927/3049/3171/3293']\n command_str = ', '.join(pp_dp_command)\n log.debug('RUN: ' + command_str)\n run_subprocess(pp_dp_command, cwd=cwd)\n\n\ndef run_onnxruntime_tests(args, source_dir, ctest_path, build_dir, configs):\n for config in configs:\n log.info(\"Running tests for %s configuration\", config)\n cwd = get_config_build_dir(build_dir, config)\n\n if args.enable_training and args.use_cuda and args.enable_training_python_frontend_e2e_tests:\n # run frontend tests for orttraining-linux-gpu-frontend_test-ci-pipeline.\n # this is not a PR merge test so skip other non-frontend tests.\n run_training_python_frontend_e2e_tests(cwd=cwd)\n run_training_python_frontend_tests(cwd=cwd)\n continue\n\n if args.enable_training and args.use_cuda and args.enable_training_pipeline_e2e_tests:\n # run distributed pipeline test on 4-GPU CI machine.\n run_training_pipeline_e2e_tests(cwd=cwd)\n continue\n\n if args.android:\n run_android_tests(args, source_dir, config, cwd)\n continue\n elif args.ios:\n run_ios_tests(args, source_dir, config, cwd)\n continue\n dll_path_list = []\n if args.use_nuphar:\n dll_path_list.append(os.path.join(\n build_dir, config, \"external\", \"tvm\", config))\n if args.use_tensorrt:\n dll_path_list.append(os.path.join(args.tensorrt_home, 'lib'))\n if args.use_mklml:\n dll_path_list.append(os.path.join(build_dir, config, \"mklml\", \"src\", \"project_mklml\", \"lib\"))\n if not is_windows():\n # A workaround for making libonnxruntime_providers_shared.so loadable.\n dll_path_list.append(os.path.join(build_dir, config))\n\n dll_path = None\n if len(dll_path_list) > 0:\n dll_path = os.pathsep.join(dll_path_list)\n\n if ctest_path is None:\n # Get the \"Google Test Adapter\" for vstest.\n if not os.path.exists(os.path.join(cwd,\n 'googletestadapter.0.17.1')):\n run_subprocess(\n ['nuget.exe', 'restore',\n os.path.join(source_dir, 'packages.config'),\n '-ConfigFile', os.path.join(source_dir, 'NuGet.config'),\n '-PackagesDirectory', cwd])\n cwd2 = os.path.join(cwd, config)\n executables = ['onnxruntime_test_all.exe']\n if args.build_shared_lib:\n executables.append('onnxruntime_shared_lib_test.exe')\n executables.append('onnxruntime_global_thread_pools_test.exe')\n run_subprocess(\n ['vstest.console.exe', '--parallel',\n '--TestAdapterPath:..\\\\googletestadapter.0.17.1\\\\build\\\\_common', # noqa\n '/Logger:trx', '/Enablecodecoverage', '/Platform:x64',\n \"/Settings:%s\" % os.path.join(\n source_dir, 'cmake\\\\codeconv.runsettings')] + executables,\n cwd=cwd2, dll_path=dll_path)\n else:\n ctest_cmd = [ctest_path, \"--build-config\", config, \"--verbose\", \"--timeout\", \"3600\"]\n run_subprocess(ctest_cmd, cwd=cwd, dll_path=dll_path)\n\n if args.enable_pybind:\n # Disable python tests for TensorRT because many tests are\n # not supported yet.\n if args.use_tensorrt:\n return\n\n # Disable python tests in a reduced build as we don't know which ops have been included and which\n # models can run\n if args.include_ops_by_model or args.include_ops_by_config or args.minimal_build:\n return\n\n if is_windows():\n cwd = os.path.join(cwd, config)\n\n run_subprocess([sys.executable, 'onnxruntime_test_python.py'], cwd=cwd, dll_path=dll_path)\n\n if args.enable_symbolic_shape_infer_tests:\n run_subprocess([sys.executable, 'onnxruntime_test_python_symbolic_shape_infer.py'],\n cwd=cwd, dll_path=dll_path)\n\n # For CUDA enabled builds test IOBinding feature\n if args.use_cuda:\n # We need to have Torch installed to test the IOBinding feature\n # which currently uses Torch's allocator to allocate GPU memory for testing\n log.info(\"Testing IOBinding feature\")\n run_subprocess([sys.executable, 'onnxruntime_test_python_iobinding.py'], cwd=cwd, dll_path=dll_path)\n\n if not args.disable_ml_ops:\n run_subprocess([sys.executable, 'onnxruntime_test_python_mlops.py'], cwd=cwd, dll_path=dll_path)\n\n if args.enable_training and args.use_cuda:\n # run basic frontend tests\n run_training_python_frontend_tests(cwd=cwd)\n\n try:\n import onnx # noqa\n onnx_test = True\n except ImportError as error:\n log.exception(error)\n log.warning(\"onnx is not installed. The ONNX tests will be skipped.\")\n onnx_test = False\n\n if onnx_test:\n run_subprocess([sys.executable, 'onnxruntime_test_python_backend.py'], cwd=cwd, dll_path=dll_path)\n\n if not args.disable_ml_ops:\n run_subprocess([sys.executable, 'onnxruntime_test_python_backend_mlops.py'],\n cwd=cwd, dll_path=dll_path)\n\n run_subprocess([sys.executable,\n os.path.join(source_dir, 'onnxruntime', 'test', 'onnx', 'gen_test_models.py'),\n '--output_dir', 'test_models'], cwd=cwd)\n\n if not args.skip_onnx_tests:\n run_subprocess([os.path.join(cwd, 'onnx_test_runner'), 'test_models'], cwd=cwd)\n if config != 'Debug':\n run_subprocess([sys.executable, 'onnx_backend_test_series.py'], cwd=cwd, dll_path=dll_path)\n\n if not args.skip_keras_test:\n try:\n import onnxmltools # noqa\n import keras # noqa\n onnxml_test = True\n except ImportError:\n log.warning(\n \"onnxmltools and keras are not installed. \"\n \"The keras tests will be skipped.\")\n onnxml_test = False\n if onnxml_test:\n run_subprocess(\n [sys.executable, 'onnxruntime_test_python_keras.py'],\n cwd=cwd, dll_path=dll_path)\n\n\ndef nuphar_run_python_tests(build_dir, configs):\n \"\"\"nuphar temporary function for running python tests separately\n as it requires ONNX 1.5.0\n \"\"\"\n for config in configs:\n if config == 'Debug':\n continue\n cwd = get_config_build_dir(build_dir, config)\n if is_windows():\n cwd = os.path.join(cwd, config)\n dll_path = os.path.join(build_dir, config, \"external\", \"tvm\", config)\n # install onnx for shape inference in testing Nuphar scripts\n # this needs to happen after onnx_test_data preparation which\n # uses onnx 1.3.0\n run_subprocess(\n [sys.executable, '-m', 'pip', 'install', '--user', 'onnx==1.5.0'])\n run_subprocess(\n [sys.executable, 'onnxruntime_test_python_nuphar.py'],\n cwd=cwd, dll_path=dll_path)\n\n\ndef run_nodejs_tests(nodejs_binding_dir):\n args = ['npm', 'test', '--', '--timeout=2000']\n if is_windows():\n args = ['cmd', '/c'] + args\n run_subprocess(args, cwd=nodejs_binding_dir)\n\n\ndef build_python_wheel(\n source_dir, build_dir, configs, use_cuda, use_ngraph, use_dnnl,\n use_tensorrt, use_openvino, use_nuphar, use_vitisai, use_acl, use_armnn, use_dml,\n wheel_name_suffix, enable_training, nightly_build=False, featurizers_build=False, use_ninja=False):\n for config in configs:\n cwd = get_config_build_dir(build_dir, config)\n if is_windows() and not use_ninja:\n cwd = os.path.join(cwd, config)\n\n args = [sys.executable, os.path.join(source_dir, 'setup.py'),\n 'bdist_wheel']\n\n # We explicitly override the platform tag in the name of the generated build wheel\n # so that we can install the wheel on Mac OS X versions 10.12+.\n # Without this explicit override, we will something like this while building on MacOS 10.14 -\n # [WARNING] MACOSX_DEPLOYMENT_TARGET is set to a lower value (10.12)\n # than the version on which the Python interpreter was compiled (10.14) and will be ignored.\n # Since we need to support 10.12+, we explicitly override the platform tag.\n # See PR #3626 for more details\n if is_macOS():\n args += ['-p', 'macosx_10_12_x86_64']\n\n # Any combination of the following arguments can be applied\n if nightly_build:\n args.append('--nightly_build')\n if featurizers_build:\n args.append(\"--use_featurizers\")\n if wheel_name_suffix:\n args.append('--wheel_name_suffix={}'.format(wheel_name_suffix))\n if enable_training:\n args.append(\"--enable_training\")\n\n # The following arguments are mutually exclusive\n if use_tensorrt:\n args.append('--use_tensorrt')\n elif use_cuda:\n args.append('--use_cuda')\n elif use_ngraph:\n args.append('--use_ngraph')\n elif use_openvino:\n args.append('--use_openvino')\n elif use_dnnl:\n args.append('--use_dnnl')\n elif use_nuphar:\n args.append('--use_nuphar')\n elif use_vitisai:\n args.append('--use_vitisai')\n elif use_acl:\n args.append('--use_acl')\n elif use_armnn:\n args.append('--use_armnn')\n elif use_dml:\n args.append('--use_dml')\n\n run_subprocess(args, cwd=cwd)\n\n\ndef derive_linux_build_property():\n if is_windows():\n return \"/p:IsLinuxBuild=\\\"false\\\"\"\n else:\n return \"/p:IsLinuxBuild=\\\"true\\\"\"\n\n\ndef build_nuget_package(source_dir, build_dir, configs, use_cuda, use_openvino, use_tensorrt, use_dnnl, use_mklml):\n if not (is_windows() or is_linux()):\n raise BuildError(\n 'Currently csharp builds and nuget package creation is only supportted '\n 'on Windows and Linux platforms.')\n\n csharp_build_dir = os.path.join(source_dir, 'csharp')\n is_linux_build = derive_linux_build_property()\n\n # derive package name and execution provider based on the build args\n execution_provider = \"/p:ExecutionProvider=\\\"None\\\"\"\n package_name = \"/p:OrtPackageId=\\\"Microsoft.ML.OnnxRuntime\\\"\"\n if use_openvino:\n execution_provider = \"/p:ExecutionProvider=\\\"openvino\\\"\"\n package_name = \"/p:OrtPackageId=\\\"Microsoft.ML.OnnxRuntime.OpenVino\\\"\"\n elif use_tensorrt:\n execution_provider = \"/p:ExecutionProvider=\\\"tensorrt\\\"\"\n package_name = \"/p:OrtPackageId=\\\"Microsoft.ML.OnnxRuntime.TensorRT\\\"\"\n elif use_dnnl:\n execution_provider = \"/p:ExecutionProvider=\\\"dnnl\\\"\"\n package_name = \"/p:OrtPackageId=\\\"Microsoft.ML.OnnxRuntime.DNNL\\\"\"\n elif use_cuda:\n package_name = \"/p:OrtPackageId=\\\"Microsoft.ML.OnnxRuntime.Gpu\\\"\"\n elif use_mklml:\n package_name = \"/p:OrtPackageId=\\\"Microsoft.ML.OnnxRuntime.MKLML\\\"\"\n else:\n pass\n\n # set build directory based on build_dir arg\n native_dir = os.path.normpath(os.path.join(source_dir, build_dir))\n ort_build_dir = \"/p:OnnxRuntimeBuildDirectory=\\\"\" + native_dir + \"\\\"\"\n\n # dotnet restore\n cmd_args = [\"dotnet\", \"restore\", \"OnnxRuntime.CSharp.sln\", \"--configfile\", \"Nuget.CSharp.config\"]\n run_subprocess(cmd_args, cwd=csharp_build_dir)\n\n # build csharp bindings and create nuget package for each config\n for config in configs:\n if is_linux():\n native_build_dir = os.path.join(native_dir, config)\n cmd_args = [\"make\", \"install\", \"DESTDIR=.//nuget-staging\"]\n run_subprocess(cmd_args, cwd=native_build_dir)\n\n configuration = \"/p:Configuration=\\\"\" + config + \"\\\"\"\n\n cmd_args = [\"dotnet\", \"msbuild\", \"OnnxRuntime.CSharp.sln\", configuration, package_name, is_linux_build,\n ort_build_dir]\n run_subprocess(cmd_args, cwd=csharp_build_dir)\n\n cmd_args = [\n \"dotnet\", \"msbuild\", \"OnnxRuntime.CSharp.proj\", \"/t:CreatePackage\",\n package_name, configuration, execution_provider, is_linux_build, ort_build_dir]\n run_subprocess(cmd_args, cwd=csharp_build_dir)\n\n\ndef run_csharp_tests(source_dir, build_dir, use_cuda, use_openvino, use_tensorrt, use_dnnl):\n # Currently only running tests on windows.\n if not is_windows():\n return\n csharp_source_dir = os.path.join(source_dir, 'csharp')\n is_linux_build = derive_linux_build_property()\n\n # define macros based on build args\n macros = \"\"\n if use_openvino:\n macros += \"USE_OPENVINO;\"\n if use_tensorrt:\n macros += \"USE_TENSORRT;\"\n if use_dnnl:\n macros += \"USE_DNNL;\"\n if use_cuda:\n macros += \"USE_CUDA;\"\n\n define_constants = \"\"\n if macros != \"\":\n define_constants = \"/p:DefineConstants=\\\"\" + macros + \"\\\"\"\n\n # set build directory based on build_dir arg\n native_build_dir = os.path.normpath(os.path.join(source_dir, build_dir))\n ort_build_dir = \"/p:OnnxRuntimeBuildDirectory=\\\"\" + native_build_dir + \"\\\"\"\n\n # Skip pretrained models test. Only run unit tests as part of the build\n # add \"--verbosity\", \"detailed\" to this command if required\n cmd_args = [\"dotnet\", \"test\", \"test\\\\Microsoft.ML.OnnxRuntime.Tests\\\\Microsoft.ML.OnnxRuntime.Tests.csproj\",\n \"--filter\", \"FullyQualifiedName!=Microsoft.ML.OnnxRuntime.Tests.InferenceTest.TestPreTrainedModels\",\n is_linux_build, define_constants, ort_build_dir]\n run_subprocess(cmd_args, cwd=csharp_source_dir)\n\n\ndef build_protoc_for_host(cmake_path, source_dir, build_dir, args):\n if (args.arm or args.arm64 or args.enable_windows_store) and (not is_windows() and not args.ios):\n raise BuildError(\n 'Currently only support building protoc for Windows host while '\n 'cross-compiling for ARM/ARM64/Store and linux cross-compiling iOS')\n\n log.info(\n \"Building protoc for host to be used in cross-compiled build process\")\n protoc_build_dir = os.path.join(os.getcwd(), build_dir, 'host_protoc')\n os.makedirs(protoc_build_dir, exist_ok=True)\n # Generate step\n cmd_args = [\n cmake_path,\n os.path.join(source_dir, 'cmake', 'external', 'protobuf', 'cmake'),\n '-Dprotobuf_BUILD_TESTS=OFF',\n '-Dprotobuf_WITH_ZLIB_DEFAULT=OFF',\n '-Dprotobuf_BUILD_SHARED_LIBS=OFF'\n ]\n\n is_ninja = args.cmake_generator == 'Ninja'\n if args.cmake_generator is not None and not (is_macOS() and args.use_xcode):\n cmd_args += ['-G', args.cmake_generator]\n if is_windows():\n if not is_ninja:\n cmd_args += ['-T', 'host=x64']\n elif is_macOS():\n if args.use_xcode:\n cmd_args += ['-G', 'Xcode']\n # CMake < 3.18 has a bug setting system arch to arm64 (if not specified) for Xcode 12,\n # protoc for host should be built using host architecture\n # Explicitly specify the CMAKE_OSX_ARCHITECTURES for x86_64 Mac.\n import platform\n if platform.machine() == 'x86_64':\n cmd_args += ['-DCMAKE_OSX_ARCHITECTURES=x86_64']\n\n run_subprocess(cmd_args, cwd=protoc_build_dir)\n # Build step\n cmd_args = [cmake_path,\n \"--build\", protoc_build_dir,\n \"--config\", \"Release\",\n \"--target\", \"protoc\"]\n run_subprocess(cmd_args)\n\n # Absolute protoc path is needed for cmake\n config_dir = ''\n suffix = ''\n\n if (is_windows() and not is_ninja) or (is_macOS() and args.use_xcode):\n config_dir = 'Release'\n\n if is_windows():\n suffix = '.exe'\n\n expected_protoc_path = os.path.join(protoc_build_dir, config_dir, 'protoc' + suffix)\n\n if not os.path.exists(expected_protoc_path):\n raise BuildError(\"Couldn't find {}. Host build of protoc failed.\".format(expected_protoc_path))\n\n return expected_protoc_path\n\n\ndef generate_documentation(source_dir, build_dir, configs):\n operator_doc_path = os.path.join(source_dir, 'docs', 'ContribOperators.md')\n opkernel_doc_path = os.path.join(source_dir, 'docs', 'OperatorKernels.md')\n for config in configs:\n # Copy the gen_contrib_doc.py.\n shutil.copy(\n os.path.join(source_dir, 'tools', 'python', 'gen_contrib_doc.py'),\n os.path.join(build_dir, config))\n shutil.copy(\n os.path.join(source_dir, 'tools', 'python', 'gen_opkernel_doc.py'),\n os.path.join(build_dir, config))\n run_subprocess(\n [sys.executable,\n 'gen_contrib_doc.py',\n '--output_path', operator_doc_path],\n cwd=os.path.join(build_dir, config))\n run_subprocess(\n [sys.executable,\n 'gen_opkernel_doc.py',\n '--output_path', opkernel_doc_path],\n cwd=os.path.join(build_dir, config))\n docdiff = ''\n try:\n docdiff = subprocess.check_output(['git', 'diff', opkernel_doc_path])\n except subprocess.CalledProcessError:\n print('git diff returned non-zero error code')\n if len(docdiff) > 0:\n # Show warning instead of throwing exception, because it is\n # dependent on build configuration for including\n # execution propviders\n log.warning(\n 'The updated opkernel document file ' + str(opkernel_doc_path) +\n ' is different from the checked in version. Consider '\n 'regenerating the file with CPU, DNNL and CUDA providers enabled.')\n log.debug('diff:\\n' + str(docdiff))\n\n docdiff = ''\n try:\n docdiff = subprocess.check_output(['git', 'diff', operator_doc_path])\n except subprocess.CalledProcessError:\n print('git diff returned non-zero error code')\n if len(docdiff) > 0:\n raise BuildError(\n 'The updated operator document file ' +\n str(operator_doc_path) + ' must be checked in.\\n diff:\\n' +\n str(docdiff))\n\n\ndef main():\n args = parse_arguments()\n cmake_extra_defines = (args.cmake_extra_defines\n if args.cmake_extra_defines else [])\n cross_compiling = args.arm or args.arm64 or args.android\n\n # If there was no explicit argument saying what to do, default\n # to update, build and test (for native builds).\n if not (args.update or args.clean or args.build or args.test):\n log.debug(\n \"Defaulting to running update, build \"\n \"[and test for native builds].\")\n args.update = True\n args.build = True\n if cross_compiling:\n args.test = args.android_abi == 'x86_64' or args.android_abi == 'arm64-v8a'\n else:\n args.test = True\n\n if args.skip_tests:\n args.test = False\n\n if args.include_ops_by_model or args.include_ops_by_config:\n from exclude_unused_ops import exclude_unused_ops\n models_path = args.include_ops_by_model if args.include_ops_by_model else ''\n config_path = args.include_ops_by_config if args.include_ops_by_config else ''\n exclude_unused_ops(models_path, config_path, use_cuda=args.use_cuda)\n\n if args.use_tensorrt:\n args.use_cuda = True\n\n if args.build_wheel or args.gen_doc:\n args.enable_pybind = True\n\n if args.build_csharp or args.build_nuget or args.build_java or args.build_nodejs:\n args.build_shared_lib = True\n\n if args.build_nuget and cross_compiling:\n raise BuildError('Currently nuget package creation is not supported while cross-compiling')\n\n if args.enable_pybind and args.disable_exceptions:\n raise BuildError('Python bindings require exceptions to be enabled.')\n\n if args.minimal_build and args.disable_ort_format_load:\n raise BuildError('Minimal build requires loading ORT format models.')\n\n # Disabling unit tests for VAD-F as FPGA only supports\n # models with NCHW layout\n if args.use_openvino == \"VAD-F_FP32\":\n args.test = False\n\n configs = set(args.config)\n\n # setup paths and directories\n cmake_path = resolve_executable_path(args.cmake_path)\n ctest_path = None if args.use_vstest else resolve_executable_path(\n args.ctest_path)\n build_dir = args.build_dir\n script_dir = os.path.realpath(os.path.dirname(__file__))\n source_dir = os.path.normpath(os.path.join(script_dir, \"..\", \"..\"))\n\n # if using cuda, setup cuda paths and env vars\n cuda_home, cudnn_home = setup_cuda_vars(args)\n\n mpi_home = args.mpi_home\n nccl_home = args.nccl_home\n\n # if using tensorrt, setup tensorrt paths\n tensorrt_home = setup_tensorrt_vars(args)\n\n # if using migraphx, setup migraphx paths\n migraphx_home = setup_migraphx_vars(args)\n\n os.makedirs(build_dir, exist_ok=True)\n\n log.info(\"Build started\")\n if args.update:\n cmake_extra_args = []\n path_to_protoc_exe = args.path_to_protoc_exe\n if not args.skip_submodule_sync:\n update_submodules(source_dir)\n if is_windows():\n if args.cmake_generator == 'Ninja':\n if args.x86 or args.arm or args.arm64:\n raise BuildError(\n \"To cross-compile with Ninja, load the toolset \"\n \"environment for the target processor (e.g. Cross \"\n \"Tools Command Prompt for VS)\")\n cmake_extra_args = ['-G', args.cmake_generator]\n elif args.x86:\n cmake_extra_args = [\n '-A', 'Win32', '-T', 'host=x64', '-G', args.cmake_generator\n ]\n elif args.arm or args.arm64:\n # Cross-compiling for ARM(64) architecture\n # First build protoc for host to use during cross-compilation\n if path_to_protoc_exe is None:\n path_to_protoc_exe = build_protoc_for_host(\n cmake_path, source_dir, build_dir, args)\n if args.arm:\n cmake_extra_args = ['-A', 'ARM']\n else:\n cmake_extra_args = ['-A', 'ARM64']\n cmake_extra_args += ['-G', args.cmake_generator]\n # Cannot test on host build machine for cross-compiled\n # builds (Override any user-defined behaviour for test if any)\n if args.test:\n log.info(\n \"Cannot test on host build machine for cross-compiled \"\n \"ARM(64) builds. Will skip test running after build.\")\n args.test = False\n else:\n if (args.msvc_toolset == '14.16' and\n args.cmake_generator == 'Visual Studio 16 2019'):\n # CUDA 10.0 requires _MSC_VER >= 1700 and\n # _MSC_VER < 1920, aka Visual Studio version\n # in [2012, 2019). In VS2019, we have to use\n # Side-by-side minor version MSVC toolsets from\n # Visual Studio 2017 14.16 is MSVC version\n # 141 is MSVC Toolset Version\n # Cuda VS extension should be installed to\n # C:\\Program Files (x86)\\Microsoft Visual\n # Studio\\2019\\Enterprise\\MSBuild\\Microsoft\\VC\\v160\\BuildCustomizations # noqa\n toolset = 'v141,host=x64,version=' + args.msvc_toolset\n elif args.msvc_toolset:\n toolset = 'host=x64,version=' + args.msvc_toolset\n else:\n toolset = 'host=x64'\n if args.cuda_version:\n toolset += ',cuda=' + args.cuda_version\n cmake_extra_args = [\n '-A', 'x64', '-T', toolset, '-G', args.cmake_generator\n ]\n if args.enable_windows_store:\n cmake_extra_args.append(\n '-DCMAKE_TOOLCHAIN_FILE=' + os.path.join(\n source_dir, 'cmake', 'store_toolchain.cmake'))\n if args.enable_wcos:\n cmake_extra_args.append('-DCMAKE_USER_MAKE_RULES_OVERRIDE=wcos_rules_override.cmake')\n elif args.cmake_generator is not None and not (is_macOS() and args.use_xcode):\n cmake_extra_args += ['-G', args.cmake_generator]\n elif is_macOS() and args.use_xcode:\n cmake_extra_args += ['-G', 'Xcode']\n\n if (args.android or args.ios or args.enable_windows_store) and args.path_to_protoc_exe is None:\n # Cross-compiling for Android and iOS\n path_to_protoc_exe = build_protoc_for_host(\n cmake_path, source_dir, build_dir, args)\n\n if is_ubuntu_1604():\n if (args.arm or args.arm64):\n raise BuildError(\n \"Only Windows ARM(64) cross-compiled builds supported \"\n \"currently through this script\")\n install_ubuntu_deps(args)\n if not is_docker() and not args.use_acl and not args.use_armnn:\n install_python_deps()\n if args.enable_pybind and is_windows():\n install_python_deps(args.numpy_version)\n if args.enable_onnx_tests:\n setup_test_data(build_dir, configs)\n generate_build_tree(\n cmake_path, source_dir, build_dir, cuda_home, cudnn_home, mpi_home, nccl_home,\n tensorrt_home, migraphx_home, path_to_protoc_exe, configs, cmake_extra_defines,\n args, cmake_extra_args)\n\n if args.clean:\n clean_targets(cmake_path, build_dir, configs)\n\n # if using DML, perform initial nuget package restore\n setup_dml_build(args, cmake_path, build_dir, configs)\n\n if args.build:\n build_targets(args, cmake_path, build_dir, configs, args.parallel, args.target)\n\n if args.test:\n run_onnxruntime_tests(args, source_dir, ctest_path, build_dir, configs)\n\n # run nuphar python tests last, as it installs ONNX 1.5.0\n if args.enable_pybind and not args.skip_onnx_tests and args.use_nuphar:\n nuphar_run_python_tests(build_dir, configs)\n\n # run node.js binding tests\n if args.build_nodejs and not args.skip_nodejs_tests:\n nodejs_binding_dir = os.path.normpath(os.path.join(source_dir, \"nodejs\"))\n run_nodejs_tests(nodejs_binding_dir)\n\n if args.build:\n if args.build_wheel:\n nightly_build = bool(os.getenv('NIGHTLY_BUILD') == '1')\n build_python_wheel(\n source_dir,\n build_dir,\n configs,\n args.use_cuda,\n args.use_ngraph,\n args.use_dnnl,\n args.use_tensorrt,\n args.use_openvino,\n args.use_nuphar,\n args.use_vitisai,\n args.use_acl,\n args.use_armnn,\n args.use_dml,\n args.wheel_name_suffix,\n args.enable_training,\n nightly_build=nightly_build,\n featurizers_build=args.use_featurizers,\n use_ninja=(args.cmake_generator == 'Ninja')\n )\n if args.build_nuget:\n build_nuget_package(\n source_dir,\n build_dir,\n configs,\n args.use_cuda,\n args.use_openvino,\n args.use_tensorrt,\n args.use_dnnl,\n args.use_mklml\n )\n\n if args.test and args.build_nuget:\n run_csharp_tests(\n source_dir,\n build_dir,\n args.use_cuda,\n args.use_openvino,\n args.use_tensorrt,\n args.use_dnnl)\n\n if args.gen_doc and (args.build or args.test):\n generate_documentation(source_dir, build_dir, configs)\n\n log.info(\"Build complete\")\n\n\nif __name__ == \"__main__\":\n try:\n sys.exit(main())\n except BaseError as e:\n log.error(str(e))\n sys.exit(1)\n"
] | [
[
"torch.cuda.device_count"
]
] |
insilicomedicine/TRIP | [
"5e7b9da298aa47a71c71e1144ff1d8e538dbccaa"
] | [
"core/learnable_priors/normal_prior.py"
] | [
"import torch\nfrom torch import nn\nfrom torch.distributions import MultivariateNormal\n\nclass Normal(nn.Module):\n def __init__(self, num_vars=100):\n super(Normal, self).__init__()\n\n self.num_vars = num_vars\n\n self.means = nn.Parameter(torch.zeros(num_vars))\n self.std = nn.Parameter(torch.eye(num_vars))\n\n def log_prob(self, x):\n distr = MultivariateNormal(self.means, self.std)\n return distr.log_prob(x)\n\n def sample(self, num_samples):\n distr = MultivariateNormal(self.means, self.std)\n return distr.sample_n(num_samples)\n"
] | [
[
"torch.zeros",
"torch.eye",
"torch.distributions.MultivariateNormal"
]
] |
Ejjaffe/dit | [
"c9d206f03d1de5a0a298b1d0ea9d79ea5e789ee1"
] | [
"dit/math/ops.py"
] | [
"\"\"\"\nClasses to contextualize math operations in log vs linear space.\n\n\"\"\"\nfrom types import MethodType\n\nimport numpy as np\n\nfrom ..exceptions import InvalidBase\n\n\n__all__ = (\n 'get_ops',\n 'LinearOperations',\n 'LogOperations',\n)\n\n\n# For 2.x, these are ascii strings. For 3.x these are unicode strings.\nacceptable_base_strings = {'linear', 'e'}\n\n\ndef get_ops(base):\n \"\"\"\n Returns an *Operations instance, depending on the base.\n\n Parameters\n ----------\n base : float, 'linear', 'e'\n The base for the Operations instance.\n\n \"\"\"\n # Let's not initialize unless we have to.\n if base in cache:\n ops = cache[base]\n else:\n # This assumes that 'linear' is in cache.\n ops = LogOperations(base)\n cache[base] = ops\n return ops\n\n\ndef exp_func(b):\n \"\"\"\n Returns a base-`b` exponential function.\n\n Parameters\n ----------\n b : positive float or 'e'\n The base of the desired exponential function.\n\n Returns\n -------\n exp : function\n The base-`b` exponential function. The returned function will operate\n elementwise on NumPy arrays, but note, it is not a ufunc.\n\n Examples\n --------\n >>> exp2 = exp_func(2)\n >>> exp2(1)\n 2.0\n >>> exp3 = exp_func(3)\n >>> exp3(1)\n 3.0\n\n Raises\n ------\n InvalidBase\n If the base is less than zero or equal to one.\n\n \"\"\"\n from dit.utils import is_string_like\n\n if is_string_like(b) and b not in acceptable_base_strings:\n raise InvalidBase(msg=b)\n\n if b == 'linear':\n exp = lambda x: x # pragma: no branch\n elif b == 2:\n exp = np.exp2\n elif b == 10:\n exp = lambda x: 10**x\n elif b == 'e' or np.isclose(b, np.e):\n exp = np.exp\n else:\n if b <= 0 or b == 1:\n raise InvalidBase(b)\n\n def exp(x, base=b):\n \"\"\"\n Return `base`**`x`\n\n Parameters\n ----------\n x : float\n The number to exponentiate\n base : float\n The base of the exponential\n\n Returns\n -------\n p : float\n `base`**`x`\n \"\"\"\n return base**np.asarray(x)\n\n return exp\n\n\ndef log_func(b):\n \"\"\"\n Returns a base-`b` logarithm function.\n\n Parameters\n ----------\n b : positive float or 'e'\n The base of the desired logarithm function.\n\n Returns\n -------\n log : function\n The base-`b` logarithm function. The returned function will operate\n elementwise on NumPy arrays, but note, it is not a ufunc.\n\n Examples\n --------\n >>> log2 = log_func(2)\n >>> log2(2)\n 1.0\n >>> log3 = log_func(3)\n >>> log3(3)\n 1.0\n\n Raises\n ------\n InvalidBase\n If the base is less than zero or equal to one.\n\n \"\"\"\n from dit.utils import is_string_like\n\n if is_string_like(b) and b not in acceptable_base_strings:\n raise InvalidBase(msg=b)\n\n if b == 'linear':\n log = lambda x: x # pragma: no branch\n elif b == 2:\n log = np.log2\n elif b == 10:\n log = np.log10\n elif b == 'e' or np.isclose(b, np.e):\n log = np.log\n else:\n if b <= 0 or b == 1:\n raise InvalidBase(b)\n\n Z = np.log(b)\n\n def log(x, func=np.log):\n \"\"\"\n Return the log of `x`\n\n Parameters\n ----------\n x : float\n The value to take the log of\n func : function\n A logarithm function\n\n Returns\n -------\n log : float\n The logarithm of `x` in base `b` (from outer scope)\n \"\"\"\n return func(x) / Z\n\n return log\n\n\nclass Operations(object):\n \"\"\"\n Base class which implements certain math operations.\n\n For example, regular addition with log probabilities is handled specially.\n\n While we could implement many more operations, we do not. Their usage\n is uncommon and their implementation would be slower as well. For example,\n subtraction with log probabailities must go as:\n\n .. math::\n log_2(x-y) = log_2(x) + log_2(1 - 2^[ log_2(y) - log_2(x) ])\n\n Note that if :math:`y > x`, then :math:`log(y) > log(x)` and the inner term\n of the second logarithm will be less than 0, yielding NaN.\n\n \"\"\"\n\n ### Do we allow base == 'e' or should we convert to its numerical value?\n ### Ans: We store whatever was specified but provide get_base() with an\n ### option to return a numerical base.\n\n one = None\n zero = None\n base = None\n exp = None\n log = None\n\n def get_base(self, numerical=False):\n \"\"\"\n Returns the base in which operations take place.\n\n For linear-based operations, the result is 'linear'.\n\n Parameters\n ----------\n numerical : bool\n If `True`, then if the base is 'e', it is returned as a float.\n\n \"\"\"\n if numerical and self.base == 'e':\n base = np.exp(1)\n else:\n base = self.base\n return base\n\n def is_null(self, p):\n \"\"\"\n Returns `True` if `p` is a null probability.\n\n Parameters\n ----------\n p : float\n The probability to be tested.\n\n \"\"\"\n return np.isclose(self.zero, p)\n\n def is_null_exact(self, p):\n \"\"\"\n Returns `True` if `p` is exactly a null probability.\n\n Parameters\n ----------\n p : float\n The probability to be tested.\n\n \"\"\"\n return self.zero == p\n\n def add(self, x, y):\n \"\"\" Abstract base class \"\"\"\n raise NotImplementedError\n\n def add_inplace(self, x, y):\n \"\"\" Abstract base class \"\"\"\n raise NotImplementedError\n\n def add_reduce(self, x):\n \"\"\" Abstract base class \"\"\"\n raise NotImplementedError\n\n def mult(self, x, y):\n \"\"\" Abstract base class \"\"\"\n raise NotImplementedError\n\n def mult_inplace(self, x, y):\n \"\"\" Abstract base class \"\"\"\n raise NotImplementedError\n\n def mult_reduce(self, x):\n \"\"\" Abstract base class \"\"\"\n raise NotImplementedError\n\n def invert(self, x):\n \"\"\" Abstract base class \"\"\"\n raise NotImplementedError\n\n def normalize(self, x):\n \"\"\" Abstract base class \"\"\"\n raise NotImplementedError\n\n\nclass LinearOperations(Operations):\n \"\"\"\n The class of operations on linear values.\n \"\"\"\n\n one = 1\n zero = 0\n base = 'linear'\n\n # If the functions below are standard Python functions (as opposed to\n # NumPy ufuncs), then they will be treated as unbound methods for the class.\n # During instantiation, they are bound to the instance (since before\n # instantiation they are class methods) and thus, we are left with\n # bound methods (undesirably). If we had modified these attributes in the\n # __init__ function, then they would not be bound (or even unbound methods)\n # but functions instead (desirably). This is precisely what LogOperations\n # does, which is why it does not have this issue. An alternative approach\n # is to explicitly declare these functions to be static methods, as we\n # do below.\n #\n exp = staticmethod(exp_func(base))\n log = staticmethod(log_func(base))\n\n def add(self, x, y):\n \"\"\"\n Add the arrays element-wise. Neither x nor y will be modified.\n\n Assumption: :math:`y >= 0`.\n\n Operation: :math:`z[i] = x[i] + y[i]`\n\n Parameters\n ----------\n x, y : NumPy arrays, shape (n,)\n The arrays to add.\n\n Returns\n -------\n z : NumPy array, shape (n,)\n The resultant array.\n\n \"\"\"\n z = x + y\n return z\n\n def add_inplace(self, x, y):\n \"\"\"\n Adds `y` to `x`, in-place. `x` will be modified, but `y` will not.\n\n Assumption: :math:`y >= 0`.\n\n Operation: :math:`x[i] += y[i]`\n\n Parameters\n ----------\n x, y : NumPy arrays, shape (n,)\n The arrays to add.\n\n Returns\n -------\n x : NumPy array, shape (n,)\n The resultant array.\n\n \"\"\"\n x += y\n return x\n\n def add_reduce(self, x, axis=None):\n \"\"\"\n Performs an `addition' reduction on `x`.\n\n Assumption: :math:`y >= 0`.\n\n Operation: :math:`z = \\\\sum_i x[i]`\n\n Returns\n -------\n z : float\n The summation of the elements in `x`.\n\n \"\"\"\n z = x.sum(axis=axis)\n return z\n\n def mult(self, x, y):\n \"\"\"\n Multiplies the arrays element-wise. Neither x nor y will be modified.\n\n Operation: :math:`z[i] = x[i] * y[i]`\n\n Parameters\n ----------\n x, y : NumPy arrays, shape (n,)\n The arrays to multiply.\n\n Returns\n -------\n z : NumPy array, shape (n,)\n The resultant array.\n\n \"\"\"\n z = x * y\n return z\n\n def mult_inplace(self, x, y):\n \"\"\"\n Multiplies `y` to `x`, in-place. `x` will be modified, but `y` will not.\n\n Operation: :math:`x[i] *= y[i]`\n\n Parameters\n ----------\n x, y : NumPy arrays, shape (n,)\n The arrays to multiply.\n\n Returns\n -------\n x : NumPy array, shape (n,)\n The resultant array.\n\n \"\"\"\n x *= y\n return x\n\n def mult_reduce(self, x, axis=None):\n \"\"\"\n Performs an `multiplication' reduction on `x`.\n\n Operation: :math:`z = \\\\prod_i x[i]`\n\n Returns\n -------\n z : float\n The product of the elements in `x`.\n\n \"\"\"\n z = np.prod(x, axis=axis)\n return z\n\n def invert(self, x):\n \"\"\"\n Returns the element-wise multiplicative inverse of x.\n\n Operation: :math:`z[i] = 1/x[i]`\n\n Parameters\n ----------\n x : NumPy array, shape (n,)\n The array to invert.\n\n Returns\n -------\n z : NumPy array, shape (n,)\n The inverted array.\n\n \"\"\"\n z = 1 / x\n return z\n\n def normalize(self, x, axis=None):\n \"\"\"\n Returns a normalized version of x.\n\n Operation: :math:`z[i] = x[i] / sum(x)`\n\n If x is 2D and axis is None, then normalization is over all elements.\n Use axis=-1 to normalize each row of x.\n\n Parameters\n ----------\n x : NumPy array, shape (n,)\n The array to normalize.\n\n Returns\n -------\n z : NumPy array, shape (n,)\n The normalized array.\n\n \"\"\"\n z = x / x.sum(axis=None)\n return z\n\n\ndef set_add(ops):\n \"\"\"\n Set the add method on the LogOperations instance.\n\n \"\"\"\n # To preserve numerical accuracy, we must make use of a logaddexp\n # function. These functions only exist in Numpy for base-e and base-2.\n # For all other bases, we must convert and then convert back.\n\n # In each case, we use default arguments to make the function that we\n # are calling 'local'.\n base = ops.base\n if base == 2:\n def add(self, x, y, func=np.logaddexp2):\n return func(x, y)\n elif base == 'e' or np.isclose(base, np.e):\n def add(self, x, y, func=np.logaddexp):\n return func(x, y)\n else:\n # No need to optimize this...\n def add(self, x, y):\n # Convert log_b probabilities to log_2 probabilities.\n x2 = x * np.log2(base)\n y2 = y * np.log2(base)\n z = np.logaddexp2(x2, y2)\n # Convert log_2 probabilities to log_b probabilities.\n z *= self.log(2)\n return z\n\n add.__doc__ = \"\"\"\n Add the arrays element-wise. Neither x nor y will be modified.\n\n Assumption: y <= 0.\n\n Parameters\n ----------\n x, y : NumPy arrays, shape (n,)\n The arrays to add.\n\n Returns\n -------\n z : NumPy array, shape (n,)\n The resultant array.\n\n \"\"\"\n ops.add = MethodType(add, ops)\n\n\ndef set_add_inplace(ops):\n \"\"\"\n Set the add_inplace method on the LogOperations instance.\n\n \"\"\"\n base = ops.base\n if base == 2:\n def add_inplace(self, x, y, func=np.logaddexp2):\n return func(x, y, x)\n elif base == 'e' or np.isclose(base, np.e):\n def add_inplace(self, x, y, func=np.logaddexp):\n return func(x, y, x)\n else:\n def add_inplace(self, x, y):\n x *= np.log2(base)\n y2 = y * np.log2(base)\n np.logaddexp2(x, y2, x)\n x *= self.log(2)\n return x\n\n add_inplace.__doc__ = \"\"\"\n Adds `y` to `x`, in-place. `x` will be modified, but `y` will not.\n\n Assumption: :math:`y <= 0`.\n\n Parameters\n ----------\n x, y : NumPy arrays, shape (n,)\n The arrays to add.\n\n Returns\n -------\n x : NumPy array, shape (n,)\n The resultant array.\n\n \"\"\"\n ops.add_inplace = MethodType(add_inplace, ops)\n\n\ndef set_add_reduce(ops):\n \"\"\"\n Set the add_reduce method on the LogOperations instance.\n\n \"\"\"\n # https://github.com/numpy/numpy/issues/4599\n base = ops.base\n if base == 2:\n def add_reduce(self, x, axis=None, func=np.logaddexp2):\n if len(x) == 0:\n # Since logaddexp.identity is None, we handle it separately.\n z = self.zero\n else:\n # Note, we are converting to a NumPy array, if necessary.\n z = func.reduce(x, axis=axis, dtype=float)\n return z\n\n elif base == 'e' or np.isclose(base, np.e):\n def add_reduce(self, x, axis=None, func=np.logaddexp):\n if len(x) == 0:\n # Since logaddexp.identity is None, we handle it separately.\n z = self.zero\n else:\n # Note, we are converting to a NumPy array, if necessary.\n z = func.reduce(x, axis=axis, dtype=float)\n return z\n\n else:\n def add_reduce(self, x, axis=None):\n if len(x) == 0:\n # Since logaddexp.identity is None, we handle it separately.\n z = self.zero\n else:\n # Note, we are converting to a NumPy array, if necessary.\n # Change the base-2, add, and then convert back.\n x2 = x * np.log2(base)\n z = np.logaddexp2.reduce(x2, axis=axis, dtype=float)\n z /= np.log2(base)\n return z\n\n add_reduce.__doc__ = \"\"\"\n Performs an `addition' reduction on `x`.\n\n Assumption: :math:`y <= 0`.\n\n Returns\n -------\n z : float\n The summation of the elements in `x`.\n\n \"\"\"\n ops.add_reduce = MethodType(add_reduce, ops)\n\n\nclass LogOperations(Operations):\n\n one = None\n zero = None\n base = None\n exp = None\n log = None\n\n def __init__(self, base):\n \"\"\"\n Initialize the log operation manager.\n\n Parameters\n ----------\n base : float\n The base of the logarithm.\n\n \"\"\"\n self.set_base(base)\n\n def set_base(self, base):\n \"\"\"\n Change the base of the logarithm.\n\n Parameters\n ----------\n base : float\n The base of the logarithm.\n\n \"\"\"\n self.base = base\n self.exp = exp_func(base)\n self.log = log_func(base)\n # Note: When base < 1, zero == +inf. When base > 1, zero == -inf.\n self.one = self.log(1)\n self.zero = self.log(0)\n\n # Update the add methods.\n set_add(self)\n set_add_inplace(self)\n set_add_reduce(self)\n\n def mult(self, x, y):\n \"\"\"\n Multiplies the arrays element-wise. Neither `x` nor `y` will be modified.\n\n Parameters\n ----------\n x, y : NumPy arrays, shape (n,)\n The arrays to multiply.\n\n Returns\n -------\n z : NumPy array, shape (n,)\n The resultant array.\n\n \"\"\"\n z = x + y\n return z\n\n def mult_inplace(self, x, y):\n \"\"\"\n Multiplies `y` to `x`, in-place. `x` will be modified, but `y` will not.\n\n Parameters\n ----------\n x, y : NumPy arrays, shape (n,)\n The arrays to multiply.\n\n Returns\n -------\n x : NumPy array, shape (n,)\n The resultant array.\n\n \"\"\"\n x += y\n return x\n\n def mult_reduce(self, x, axis=None):\n \"\"\"\n Performs an `multiplication' reduction on `x`.\n\n Returns\n -------\n z : float\n The product of the elements in `x`.\n\n \"\"\"\n # The identity for addition in NumPy is zero.\n # This corresponds to an identity of 1 for log operations, and this is\n # exactly the desired identity for multiplying probabilities.\n z = x.sum(axis=axis)\n return z\n\n def invert(self, x):\n \"\"\"\n Returns the element-wise multiplicative inverse of `x`: :math:`1/x`.\n\n Parameters\n ----------\n x : NumPy array, shape (n,)\n The array to invert.\n\n Returns\n -------\n z : NumPy array, shape (n,)\n The inverted array.\n\n \"\"\"\n z = -x\n return z\n\n def normalize(self, x, axis=None):\n \"\"\"\n Returns a normalized version of `x`.\n\n Non-log equivalent operation: :math:`z[i] = x[i] / sum(x)`\n\n If `x` is 2D and axis is None, then normalization is over all elements.\n Use axis=-1 to normalize each row of `x`.\n\n Parameters\n ----------\n x : NumPy array, shape (n,)\n The array to normalize.\n\n Returns\n -------\n z : NumPy array, shape (n,)\n The normalized array.\n\n \"\"\"\n # The API way would be: mult(x, invert( add_reduce(x) ))\n # We'll avoid some of those function calls.\n z = x - self.add_reduce(x, axis=axis)\n return z\n\n\ncache = {\n 'linear': LinearOperations(),\n 2: LogOperations(2),\n 'e': LogOperations('e')\n}\n"
] | [
[
"numpy.log2",
"numpy.logaddexp2.reduce",
"numpy.isclose",
"numpy.asarray",
"numpy.exp",
"numpy.prod",
"numpy.log",
"numpy.logaddexp2"
]
] |
NingAnMe/voxelmorph | [
"3a1a4c2f456af2dba5552efc1b08c68af38e54dc"
] | [
"scripts/sphere/register.py"
] | [
"#!/usr/bin/env python\n\n\"\"\"\nExample script to register two volumes with VoxelMorph models.\n\nPlease make sure to use trained models appropriately. Let's say we have a model trained to register \na scan (moving) to an atlas (fixed). To register a scan to the atlas and save the warp field, run:\n\n register.py --moving moving.nii.gz --fixed fixed.nii.gz --model model.pt \n --moved moved.nii.gz --warp warp.nii.gz\n\nThe source and target input images are expected to be affinely registered.\n\nIf you use this code, please cite the following, and read function docs for further info/citations\n VoxelMorph: A Learning Framework for Deformable Medical Image Registration \n G. Balakrishnan, A. Zhao, M. R. Sabuncu, J. Guttag, A.V. Dalca. \n IEEE TMI: Transactions on Medical Imaging. 38(8). pp 1788-1800. 2019. \n\n or\n\n Unsupervised Learning for Probabilistic Diffeomorphic Registration for Images and Surfaces\n A.V. Dalca, G. Balakrishnan, J. Guttag, M.R. Sabuncu. \n MedIA: Medical Image Analysis. (57). pp 226-236, 2019 \n\nCopyright 2020 Adrian V. Dalca\n\nLicensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in \ncompliance with the License. You may obtain a copy of the License at\n\nhttp://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software distributed under the License is\ndistributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or \nimplied. See the License for the specific language governing permissions and limitations under \nthe License.\n\"\"\"\n\nimport os\nimport argparse\nimport matplotlib.pyplot as plt\n# third party\nimport numpy as np\nimport nibabel as nib\nimport torch\nfrom scipy.interpolate import RegularGridInterpolator\nfrom astropy.coordinates import cartesian_to_spherical, spherical_to_cartesian\n\n# import voxelmorph with sphere backend\nos.environ['VXM_BACKEND'] = 'sphere'\nimport voxelmorph as vxm # nopep8\nimport math\n\n# parse commandline args\nparser = argparse.ArgumentParser()\nparser.add_argument('--moving', required=True, help='moving image (source) filename')\nparser.add_argument('--fixed', required=True, help='fixed image (target) filename')\nparser.add_argument('--moved', help='warped image output filename')\nparser.add_argument('--model', required=True, help='pytorch model for nonlinear registration')\n# parser.add_argument('--normalize_type', default='std', help='select the data normalization processing type')\nparser.add_argument('--warp', help='output warp deformation filename')\nparser.add_argument('--sphere_sub', help='sphere_sub image filename')\nparser.add_argument('--sphere_atlas', help='sphere_atlas image filename')\nparser.add_argument('--sphere_reg', help='sphere.reg image output filename')\nparser.add_argument('--sulc_sub', help='silc_sub image filename')\nparser.add_argument('--sulc_atlas', help='silc_atlas image filename')\nparser.add_argument('--sphere_freesurfer', help='sphere_freesurfer image filename')\nparser.add_argument('--plot_image', help='show time image output filename')\nparser.add_argument('--plot_image_dif_1', help='show dif image output filename')\nparser.add_argument('--plot_image_dif_2', help='show dif image output filename')\nparser.add_argument('-g', '--gpu', help='GPU number(s) - if not supplied, CPU is used')\nparser.add_argument('--multichannel', action='store_true',\n help='specify that data has multiple channels')\n\nargs = parser.parse_args()\n\n\ndef meannormalize(sub_data):\n mean = np.mean(sub_data)\n std = np.std(sub_data)\n norm = (sub_data - mean) / std\n return norm, mean, std\n\n\ndef backmeannormalize(input, mean, std):\n output = input * std + mean\n return output\n\n\ndef minmaxnormalize(sub_data):\n zeros = sub_data == 0\n max = np.max(sub_data)\n min = np.min(sub_data)\n norm = (sub_data - min) / (max - min)\n norm[zeros] = 0\n return norm\n\n\ndef backminmaxnormalize(input, max, min):\n output = input * (max - min) + min\n return output\n\n\ndef domainnorm(sub_data):\n domain = 33\n norm = sub_data / domain\n return norm\n\n\ndef backdomainnorm(sub_data):\n domain = 33\n output = sub_data * domain\n return output\n\n\n# def normalize_forword(data, type=\"std\"):\n# if type == \"std\":\n# return meannormalize(data)\n# elif type == \"min_max\":\n# return minmaxnormalize(data)\n# else:\n# raise KeyError(\"type is error\")\n#\n# def normalize_backword(data, a, b, type=\"std\"):\n# if type == \"std\":\n# return backmeannormalize(data, a, b)\n# elif type == \"min_max\":\n# return backminmaxnormalize(data, a, b)\n# else:\n# raise KeyError(\"type is error\")\n\ndef interpolate(warp_file, lh_sphere):\n x = np.linspace(-128, 128, 256) # phi ###\n y = np.linspace(0, 512, 512) # theta ###\n\n # print(warp_file.files)\n warp = warp_file.squeeze()\n warp = warp.permute(0, 2, 1)\n warp = warp.detach().numpy()\n # warp = warp_file['vol']\n # warp = np.moveaxis(warp, 1, -1)\n\n interpolate_function_x = RegularGridInterpolator((x, y), -warp[0]) # x-axis\n interpolate_function_y = RegularGridInterpolator((x, y), -warp[1]) # y-axis\n\n coords, faces = nib.freesurfer.read_geometry(lh_sphere)\n r, phi, theta = cartesian_to_spherical(coords[:, 0], coords[:, 1], coords[:, 2])\n p = phi.degree\n t = theta.degree\n\n theta_bins = 512\n phi_bins = 256\n theta_width = math.degrees(2 * np.pi) / theta_bins\n t /= theta_width\n phi_width = math.degrees(np.pi) / phi_bins\n p /= phi_width\n t = t.reshape(-1, 1)\n p = p.reshape(-1, 1)\n pts = np.concatenate((p, t), axis=1)\n\n new_pts_x = interpolate_function_x(pts)\n new_pts_y = interpolate_function_y(pts)\n x_prime = pts.T[0] + new_pts_x\n y_prime = pts.T[1] + new_pts_y\n\n x_prime *= phi_width\n y_prime *= theta_width\n y_prime = np.clip(y_prime, 0, 360)\n x_prime = np.clip(x_prime, -90, 90)\n\n t_prime = [math.radians(i) for i in y_prime]\n p_prime = [math.radians(i) for i in x_prime]\n t_prime = np.array(t_prime)\n p_prime = np.array(p_prime)\n\n return r, p_prime, t_prime\n\n# save 4 image\ndef save4image(lh_sphere_sub, lh_sphere_atlas, lh_sulc_sub, lh_sulc_atlas, lh_sphere_freesurfer, phi_prime, theta_prime,\n imagesavefilename):\n lh_morph_sulc_sub = nib.freesurfer.read_morph_data(lh_sulc_sub)\n lh_morph_sulc_atlas = nib.freesurfer.read_morph_data(lh_sulc_atlas)\n\n coords_sub, faces_sub = nib.freesurfer.read_geometry(lh_sphere_sub)\n r_sub, phi_sub, theta_sub = cartesian_to_spherical(coords_sub[:, 0], coords_sub[:, 1], coords_sub[:, 2])\n coords_atlas, faces_atlas = nib.freesurfer.read_geometry(lh_sphere_atlas)\n r_atlas, phi_atlas, theta_atlas = cartesian_to_spherical(coords_atlas[:, 0], coords_atlas[:, 1], coords_atlas[:, 2])\n coords_freesurfer, faces_freesurfer = nib.freesurfer.read_geometry(lh_sphere_freesurfer)\n r_reg, phi_reg, theta_reg = cartesian_to_spherical(coords_freesurfer[:, 0], coords_freesurfer[:, 1],\n coords_freesurfer[:, 2])\n\n fig = plt.figure(figsize=(14, 7))\n ax = fig.add_subplot(141)\n ax.scatter(phi_sub.degree, theta_sub.degree, s=0.1,\n c=lh_morph_sulc_sub) # phi.degree: [-90, 90], theta.degree: [0, 360]\n plt.title('Moving')\n\n ax = fig.add_subplot(142)\n ax.scatter(phi_atlas.degree, theta_atlas.degree, s=0.1, c=lh_morph_sulc_atlas)\n plt.title('Fixed')\n\n ax = fig.add_subplot(143)\n phi_prime = [math.degrees(p) for p in phi_prime]\n thtea_prime = [math.degrees(t) for t in theta_prime]\n ax.scatter(phi_prime, thtea_prime, s=0.1, c=lh_morph_sulc_sub) # (256, 512)\n plt.title('Moved')\n\n ax = fig.add_subplot(144)\n ax.scatter(phi_reg.degree, theta_reg.degree, s=0.1, c=lh_morph_sulc_sub) # (256, 512)\n plt.title('Moved FreeSurfer')\n\n plt.savefig(imagesavefilename)\n\n\ndef xyz2degree(lh_sphere, lh_sulc):\n # coords: return (x, y, z) coordinates\n # faces: defining mesh triangles\n coords, faces = nib.freesurfer.read_geometry(lh_sphere)\n\n # (r: radius, phi: latitude, theta: longitude) in radians\n r, phi, theta = cartesian_to_spherical(coords[:, 0], coords[:, 1], coords[:, 2])\n\n lat = phi.degree + 90\n lon = theta.degree\n # resize to (512, 256)\n y_bins = 512\n x_bins = 256\n y_width = math.degrees(2 * np.pi) / y_bins\n ys = lon // y_width\n x_width = math.degrees(np.pi) / x_bins\n xs = lat // x_width\n\n ys = np.clip(ys, 0, 511)\n xs = np.clip(xs, 0, 255)\n\n # load curv and sulc info\n lh_morph_sulc = nib.freesurfer.read_morph_data(lh_sulc)\n xs = xs.astype(np.int32)\n ys = ys.astype(np.int32)\n\n # values store [theta, phi, sulc value, curv value]\n values = np.zeros((512, 256))\n values[ys, xs] = lh_morph_sulc\n # values[1, ys, xs] = lh_morph_curv\n\n return values\n\ndef xyz2degree2(phi, theta, lh_sulc):\n\n lat = phi + 90\n lon = theta\n # resize to (512, 256)\n y_bins = 512\n x_bins = 256\n y_width = math.degrees(2 * np.pi) / y_bins\n ys = lon // y_width\n x_width = math.degrees(np.pi) / x_bins\n xs = lat // x_width\n\n ys = np.clip(ys, 0, 511)\n xs = np.clip(xs, 0, 255)\n\n # load curv and sulc info\n lh_morph_sulc = nib.freesurfer.read_morph_data(lh_sulc)\n xs = xs.astype(np.int32)\n ys = ys.astype(np.int32)\n\n # values store [theta, phi, sulc value, curv value]\n values = np.zeros((512, 256))\n values[ys, xs] = lh_morph_sulc\n # values[1, ys, xs] = lh_morph_curv\n\n return values\n\n# device handling\nif args.gpu and (args.gpu != '-1'):\n device = 'cuda'\n os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu\nelse:\n device = 'cpu'\n os.environ['CUDA_VISIBLE_DEVICES'] = '-1'\n\n# load moving and fixed images\nadd_feat_axis = not args.multichannel\nmoving = vxm.py.utils.load_volfile(args.moving, add_batch_axis=True, add_feat_axis=add_feat_axis)\nfixed, fixed_affine = vxm.py.utils.load_volfile(\n args.fixed, add_batch_axis=True, add_feat_axis=add_feat_axis, ret_affine=True)\n\n# load and set up model\nmodel = vxm.networks.VxmDense.load(args.model, device)\nmodel.to(device)\nmodel.eval()\n\n# set up normalize type\n# normalize_type = args.normalize_type\n# normalize_type = \"min_max\"\n\n# set up tensors and permute\n# moving, a_moving, b_moving = normalize_forword(moving, type=normalize_type)\n# fixed, a_fixed, b_fixed = normalize_forword(fixed, type=normalize_type)\n\n# moving = domainnorm(moving)\nmoving = minmaxnormalize(moving)\nfixed = minmaxnormalize(fixed)\n\ninput_moving = torch.from_numpy(moving).to(device).float().permute(0, 3, 1, 2)\ninput_fixed = torch.from_numpy(fixed).to(device).float().permute(0, 3, 1, 2)\n\n# predict\nmoved, warp = model(input_moving, input_fixed, registration=True)\n# moved = normalize_backword(moved, a_moving, b_moving, type=normalize_type)\n# moved = backdomainnorm(moved)\n\n\nif args.sphere_sub:\n c, faces = nib.freesurfer.read_geometry(args.sphere_sub)\n coords = np.empty(shape=c.shape)\n r, phi_prime, theta_prime = interpolate(warp, args.sphere_sub)\n coords[:, 0], coords[:, 1], coords[:, 2] = spherical_to_cartesian(r, phi_prime, theta_prime)\n nib.freesurfer.io.write_geometry(args.sphere_reg, coords, faces)\n\nif args.plot_image:\n lh_sphere_sub = args.sphere_sub\n lh_sphere_atlas = args.sphere_atlas\n lh_sulc_sub = args.sulc_sub\n lh_sulc_atlas = args.sulc_atlas\n lh_sphere_freesurfer = args.sphere_freesurfer\n imagesavefilename = args.plot_image\n save4image(lh_sphere_sub, lh_sphere_atlas, lh_sulc_sub, lh_sulc_atlas, lh_sphere_freesurfer, phi_prime, theta_prime,\n imagesavefilename)\nif args.plot_image_dif_1 or args.plot_image_dif_2:\n imagesavefilenamedif_1 = args.plot_image_dif_1\n imagesavefilenamedif_2 = args.plot_image_dif_2\n dif_moving = xyz2degree(lh_sphere_sub, lh_sulc_sub)\n dif_moved = xyz2degree2(phi_prime, theta_prime, lh_sulc_sub)\n dif_freesurfer = xyz2degree(lh_sphere_freesurfer, lh_sulc_sub)\n dif_moved_moving = dif_moved - dif_moving\n print(np.nanmax(dif_moved_moving), np.nanmin(dif_moved_moving), np.nanmean(dif_moved_moving))\n dif_freesurfer_moved = dif_freesurfer - dif_moved\n\n plt.figure(figsize=(14, 7))\n plt.imshow(dif_moved_moving)\n plt.title('moved_moving')\n plt.colorbar()\n plt.savefig(imagesavefilenamedif_1)\n\n plt.figure(figsize=(14, 7))\n plt.imshow(dif_freesurfer_moved)\n plt.title('freesurfer_moved')\n plt.colorbar()\n plt.savefig(imagesavefilenamedif_2)\n\n\n# save moved image\nif args.moved:\n moved = moved.detach().cpu().numpy().squeeze()\n vxm.py.utils.save_volfile(moved, args.moved, fixed_affine)\n\n# save warp\nif args.warp:\n warp = warp.detach().cpu().numpy().squeeze()\n vxm.py.utils.save_volfile(warp, args.warp, fixed_affine)\n"
] | [
[
"matplotlib.pyplot.imshow",
"numpy.nanmean",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.title",
"torch.from_numpy",
"numpy.linspace",
"numpy.mean",
"numpy.zeros",
"scipy.interpolate.RegularGridInterpolator",
"numpy.max",
"numpy.min",
"matplotlib.pyplot.colorbar",
"numpy.std",
"numpy.empty",
"numpy.nanmax",
"numpy.nanmin",
"numpy.clip",
"numpy.array",
"numpy.concatenate"
]
] |
meretp/pymor | [
"01876cd39e04bec6d4299f36b59663cd08beafd3",
"0965a5c3d0725466103efae5190493fceb2bf441"
] | [
"src/pymor/reductors/residual.py",
"src/pymor/bindings/ngsolve.py"
] | [
"# This file is part of the pyMOR project (https://www.pymor.org).\n# Copyright 2013-2021 pyMOR developers and contributors. All rights reserved.\n# License: BSD 2-Clause License (https://opensource.org/licenses/BSD-2-Clause)\n\nimport numpy as np\n\nfrom pymor.algorithms.image import estimate_image_hierarchical\nfrom pymor.algorithms.projection import project, project_to_subbasis\nfrom pymor.core.base import BasicObject\nfrom pymor.core.exceptions import ImageCollectionError\nfrom pymor.operators.constructions import ZeroOperator\nfrom pymor.operators.interface import Operator\n\n\nclass ResidualReductor(BasicObject):\n \"\"\"Generic reduced basis residual reductor.\n\n Given an operator and a right-hand side, the residual is given by::\n\n residual.apply(U, mu) == operator.apply(U, mu) - rhs.as_range_array(mu)\n\n When operator maps to functionals instead of vectors, we are interested in the Riesz\n representative of the residual::\n\n residual.apply(U, mu)\n == product.apply_inverse(operator.apply(U, mu) - rhs.as_range_array(mu))\n\n Given a basis `RB` of a subspace of the source space of `operator`, this reductor\n uses :func:`~pymor.algorithms.image.estimate_image_hierarchical` to determine\n a low-dimensional subspace containing the image of the subspace under\n `residual` (resp. `riesz_residual`), computes an orthonormal basis\n `residual_range` for this range space and then returns the Petrov-Galerkin projection ::\n\n projected_residual\n == project(residual, range_basis=residual_range, source_basis=RB)\n\n of the residual operator. Given a reduced basis coefficient vector `u`, w.r.t.\n `RB`, the (dual) norm of the residual can then be computed as ::\n\n projected_residual.apply(u, mu).norm()\n\n Moreover, a `reconstruct` method is provided such that ::\n\n residual_reductor.reconstruct(projected_residual.apply(u, mu))\n == residual.apply(RB.lincomb(u), mu)\n\n Parameters\n ----------\n RB\n |VectorArray| containing a basis of the reduced space onto which to project.\n operator\n See definition of `residual`.\n rhs\n See definition of `residual`. If `None`, zero right-hand side is assumed.\n product\n Inner product |Operator| w.r.t. which to orthonormalize and w.r.t. which to\n compute the Riesz representatives in case `operator` maps to functionals.\n riesz_representatives\n If `True` compute the Riesz representative of the residual.\n \"\"\"\n\n def __init__(self, RB, operator, rhs=None, product=None, riesz_representatives=False):\n assert RB in operator.source\n assert rhs is None \\\n or (rhs.source.is_scalar and rhs.range == operator.range and rhs.linear)\n assert product is None or product.source == product.range == operator.range\n\n self.__auto_init(locals())\n self.residual_range = operator.range.empty()\n self.residual_range_dims = []\n\n def reduce(self):\n if self.residual_range is not False:\n with self.logger.block('Estimating residual range ...'):\n try:\n self.residual_range, self.residual_range_dims = \\\n estimate_image_hierarchical([self.operator], [self.rhs],\n self.RB,\n (self.residual_range, self.residual_range_dims),\n orthonormalize=True, product=self.product,\n riesz_representatives=self.riesz_representatives)\n except ImageCollectionError as e:\n self.logger.warning(f'Cannot compute range of {e.op}. Evaluation will be slow.')\n self.residual_range = False\n\n if self.residual_range is False:\n operator = project(self.operator, None, self.RB)\n return NonProjectedResidualOperator(operator, self.rhs, self.riesz_representatives, self.product)\n\n with self.logger.block('Projecting residual operator ...'):\n if self.riesz_representatives:\n operator = project(self.operator, self.residual_range, self.RB, product=None) # the product cancels out\n rhs = project(self.rhs, self.residual_range, None, product=None)\n else:\n operator = project(self.operator, self.residual_range, self.RB, product=self.product)\n rhs = project(self.rhs, self.residual_range, None, product=self.product)\n\n return ResidualOperator(operator, rhs)\n\n def reconstruct(self, u):\n \"\"\"Reconstruct high-dimensional residual vector from reduced vector `u`.\"\"\"\n if self.residual_range is False:\n if self.product:\n return u * (u.norm() / u.norm(self.product))[0]\n else:\n return u\n else:\n return self.residual_range[:u.dim].lincomb(u.to_numpy())\n\n\nclass ResidualOperator(Operator):\n \"\"\"Instantiated by :class:`ResidualReductor`.\"\"\"\n\n def __init__(self, operator, rhs, name=None):\n self.__auto_init(locals())\n self.source = operator.source\n self.range = operator.range\n self.linear = operator.linear\n self.rhs_vector = rhs.as_range_array() if rhs and not rhs.parametric else None\n\n def apply(self, U, mu=None):\n V = self.operator.apply(U, mu=mu)\n if self.rhs:\n F = self.rhs_vector or self.rhs.as_range_array(mu)\n if len(V) > 1:\n V -= F[[0]*len(V)]\n else:\n V -= F\n return V\n\n def projected_to_subbasis(self, dim_range=None, dim_source=None, name=None):\n return ResidualOperator(project_to_subbasis(self.operator, dim_range, dim_source),\n project_to_subbasis(self.rhs, dim_range, None),\n name=name)\n\n\nclass NonProjectedResidualOperator(ResidualOperator):\n \"\"\"Instantiated by :class:`ResidualReductor`.\n\n Not to be used directly.\n \"\"\"\n\n def __init__(self, operator, rhs, riesz_representatives, product):\n super().__init__(operator, rhs)\n self.__auto_init(locals())\n\n def apply(self, U, mu=None):\n R = super().apply(U, mu=mu)\n if self.product:\n if self.riesz_representatives:\n R_riesz = self.product.apply_inverse(R)\n # divide by norm, except when norm is zero:\n inversel2 = 1./R_riesz.norm()\n inversel2 = np.nan_to_num(inversel2)\n R_riesz.scal(np.sqrt(R_riesz.pairwise_inner(R)) * inversel2)\n return R_riesz\n else:\n # divide by norm, except when norm is zero:\n inversel2 = 1./R.norm()\n inversel2 = np.nan_to_num(inversel2)\n R.scal(np.sqrt(self.product.pairwise_apply2(R, R)) * inversel2)\n return R\n else:\n return R\n\n def projected_to_subbasis(self, dim_range=None, dim_source=None, name=None):\n return self.with_(operator=project_to_subbasis(self.operator, None, dim_source))\n\n\nclass ImplicitEulerResidualReductor(BasicObject):\n \"\"\"Reduced basis residual reductor with mass operator for implicit Euler timestepping.\n\n Given an operator, mass and a functional, the concatenation of residual operator\n with the Riesz isomorphism is given by::\n\n riesz_residual.apply(U, U_old, mu)\n == product.apply_inverse(operator.apply(U, mu) + 1/dt*mass.apply(U, mu)\n - 1/dt*mass.apply(U_old, mu) - rhs.as_vector(mu))\n\n This reductor determines a low-dimensional subspace of the image of a reduced basis space under\n `riesz_residual` using :func:`~pymor.algorithms.image.estimate_image_hierarchical`, computes an\n orthonormal basis `residual_range` of this range space and then returns the Petrov-Galerkin\n projection ::\n\n projected_riesz_residual\n == riesz_residual.projected(range_basis=residual_range, source_basis=RB)\n\n of the `riesz_residual` operator. Given reduced basis coefficient vectors `u` and `u_old`,\n the dual norm of the residual can then be computed as ::\n\n projected_riesz_residual.apply(u, u_old, mu).norm()\n\n Moreover, a `reconstruct` method is provided such that ::\n\n residual_reductor.reconstruct(projected_riesz_residual.apply(u, u_old, mu))\n == riesz_residual.apply(RB.lincomb(u), RB.lincomb(u_old), mu)\n\n Parameters\n ----------\n operator\n See definition of `riesz_residual`.\n mass\n The mass operator. See definition of `riesz_residual`.\n dt\n The time step size. See definition of `riesz_residual`.\n rhs\n See definition of `riesz_residual`. If `None`, zero right-hand side is assumed.\n RB\n |VectorArray| containing a basis of the reduced space onto which to project.\n product\n Inner product |Operator| w.r.t. which to compute the Riesz representatives.\n \"\"\"\n\n def __init__(self, RB, operator, mass, dt, rhs=None, product=None):\n assert RB in operator.source\n assert rhs.source.is_scalar and rhs.range == operator.range and rhs.linear\n assert product is None or product.source == product.range == operator.range\n\n self.__auto_init(locals())\n self.residual_range = operator.range.empty()\n self.residual_range_dims = []\n\n def reduce(self):\n if self.residual_range is not False:\n with self.logger.block('Estimating residual range ...'):\n try:\n self.residual_range, self.residual_range_dims = \\\n estimate_image_hierarchical([self.operator, self.mass], [self.rhs],\n self.RB,\n (self.residual_range, self.residual_range_dims),\n orthonormalize=True, product=self.product,\n riesz_representatives=True)\n except ImageCollectionError as e:\n self.logger.warning(f'Cannot compute range of {e.op}. Evaluation will be slow.')\n self.residual_range = False\n\n if self.residual_range is False:\n operator = project(self.operator, None, self.RB)\n mass = project(self.mass, None, self.RB)\n return NonProjectedImplicitEulerResidualOperator(operator, mass, self.rhs, self.dt, self.product)\n\n with self.logger.block('Projecting residual operator ...'):\n # the product always cancels out\n operator = project(self.operator, self.residual_range, self.RB, product=None)\n mass = project(self.mass, self.residual_range, self.RB, product=None)\n rhs = project(self.rhs, self.residual_range, None, product=None)\n\n return ImplicitEulerResidualOperator(operator, mass, rhs, self.dt)\n\n def reconstruct(self, u):\n \"\"\"Reconstruct high-dimensional residual vector from reduced vector `u`.\"\"\"\n if self.residual_range is False:\n if self.product:\n return u * (u.norm() / u.norm(self.product))[0]\n else:\n return u\n else:\n return self.residual_range[:u.dim].lincomb(u.to_numpy())\n\n\nclass ImplicitEulerResidualOperator(Operator):\n \"\"\"Instantiated by :class:`ImplicitEulerResidualReductor`.\"\"\"\n\n def __init__(self, operator, mass, rhs, dt, name=None):\n self.__auto_init(locals())\n self.source = operator.source\n self.range = operator.range\n self.linear = operator.linear\n self.rhs_vector = rhs.as_range_array() if not rhs.parametric else None\n\n def apply(self, U, U_old, mu=None):\n V = self.operator.apply(U, mu=mu)\n V.axpy(1./self.dt, self.mass.apply(U, mu=mu))\n V.axpy(-1./self.dt, self.mass.apply(U_old, mu=mu))\n if not isinstance(self.rhs, ZeroOperator):\n F = self.rhs_vector or self.rhs.as_range_array(mu)\n if len(V) > 1:\n V -= F[[0]*len(V)]\n else:\n V -= F\n return V\n\n def projected_to_subbasis(self, dim_range=None, dim_source=None, name=None):\n return ImplicitEulerResidualOperator(project_to_subbasis(self.operator, dim_range, dim_source),\n project_to_subbasis(self.mass, dim_range, dim_source),\n project_to_subbasis(self.rhs, dim_range, None),\n self.dt,\n name=name)\n\n\nclass NonProjectedImplicitEulerResidualOperator(ImplicitEulerResidualOperator):\n \"\"\"Instantiated by :class:`ImplicitEulerResidualReductor`.\n\n Not to be used directly.\n \"\"\"\n\n def __init__(self, operator, mass, rhs, dt, product):\n super().__init__(operator, mass, rhs, dt)\n self.product = product\n\n def apply(self, U, U_old, mu=None):\n R = super().apply(U, U_old, mu=mu)\n if self.product:\n R_riesz = self.product.apply_inverse(R)\n # divide by norm, except when norm is zero:\n inversel2 = 1./R_riesz.norm()\n inversel2 = np.nan_to_num(inversel2)\n R_riesz.scal(np.sqrt(R_riesz.pairwise_inner(R)) * inversel2)\n return R_riesz\n else:\n return R\n\n def projected_to_subbasis(self, dim_range=None, dim_source=None, name=None):\n return self.with_(operator=project_to_subbasis(self.operator, None, dim_source),\n mass=project_to_subbasis(self.mass, None, dim_source))\n",
"# This file is part of the pyMOR project (https://www.pymor.org).\n# Copyright 2013-2021 pyMOR developers and contributors. All rights reserved.\n# License: BSD 2-Clause License (https://opensource.org/licenses/BSD-2-Clause)\nfrom pathlib import Path\n\nfrom pymor.core.config import config\nfrom pymor.core.defaults import defaults\nfrom pymor.tools.io import change_to_directory\n\nif config.HAVE_NGSOLVE:\n import ngsolve as ngs\n import numpy as np\n\n from pymor.core.base import ImmutableObject\n from pymor.operators.list import LinearComplexifiedListVectorArrayOperatorBase\n from pymor.vectorarrays.interface import VectorArray\n from pymor.vectorarrays.numpy import NumpyVectorSpace\n from pymor.vectorarrays.list import CopyOnWriteVector, ComplexifiedVector, ComplexifiedListVectorSpace\n\n class NGSolveVectorCommon:\n def amax(self):\n A = np.abs(self.to_numpy())\n max_ind = np.argmax(A)\n max_val = A[max_ind]\n return max_ind, max_val\n\n def dofs(self, dof_indices):\n return self.to_numpy()[dof_indices]\n\n class NGSolveVector(NGSolveVectorCommon, CopyOnWriteVector):\n \"\"\"Wraps a NGSolve BaseVector to make it usable with ListVectorArray.\"\"\"\n\n def __init__(self, impl):\n self.impl = impl\n\n @classmethod\n def from_instance(cls, instance):\n return cls(instance.impl)\n\n def _copy_data(self):\n new_impl = ngs.GridFunction(self.impl.space)\n new_impl.vec.data = self.impl.vec\n self.impl = new_impl\n\n def to_numpy(self, ensure_copy=False):\n if ensure_copy:\n return self.impl.vec.FV().NumPy().copy()\n self._copy_data_if_needed()\n return self.impl.vec.FV().NumPy()\n\n def _scal(self, alpha):\n self.impl.vec.data = float(alpha) * self.impl.vec\n\n def _axpy(self, alpha, x):\n self.impl.vec.data = self.impl.vec + float(alpha) * x.impl.vec\n\n def inner(self, other):\n return self.impl.vec.InnerProduct(other.impl.vec)\n\n def norm(self):\n return self.impl.vec.Norm()\n\n def norm2(self):\n return self.impl.vec.Norm() ** 2\n\n class ComplexifiedNGSolveVector(NGSolveVectorCommon, ComplexifiedVector):\n pass\n\n class NGSolveVectorSpace(ComplexifiedListVectorSpace):\n\n complexified_vector_type = ComplexifiedNGSolveVector\n\n def __init__(self, V, id='STATE'):\n self.__auto_init(locals())\n\n def __eq__(self, other):\n return type(other) is NGSolveVectorSpace and self.V == other.V and self.id == other.id\n\n def __hash__(self):\n return hash(self.V) + hash(self.id)\n\n @property\n def value_dim(self):\n u = self.V.TrialFunction()\n if isinstance(u, list):\n return u[0].dim\n else:\n return u.dim\n\n @property\n def dim(self):\n return self.V.ndofglobal * self.value_dim\n\n @classmethod\n def space_from_vector_obj(cls, vec, id):\n return cls(vec.space, id)\n\n def real_zero_vector(self):\n impl = ngs.GridFunction(self.V)\n return NGSolveVector(impl)\n\n def real_make_vector(self, obj):\n return NGSolveVector(obj)\n\n def real_vector_from_numpy(self, data, ensure_copy=False):\n v = self.real_zero_vector()\n v.to_numpy()[:] = data\n return v\n\n class NGSolveMatrixOperator(LinearComplexifiedListVectorArrayOperatorBase):\n \"\"\"Wraps a NGSolve matrix as an |Operator|.\"\"\"\n\n def __init__(self, matrix, range, source, solver_options=None, name=None):\n self.__auto_init(locals())\n\n @defaults('default_solver')\n def _prepare_apply(self, U, mu, kind, least_squares=False, default_solver=''):\n if kind == 'apply_inverse':\n if least_squares:\n raise NotImplementedError\n solver = self.solver_options.get('inverse', default_solver) if self.solver_options else default_solver\n inv = self.matrix.Inverse(self.source.V.FreeDofs(), inverse=solver)\n return inv\n\n def _real_apply_one_vector(self, u, mu=None, prepare_data=None):\n r = self.range.real_zero_vector()\n self.matrix.Mult(u.impl.vec, r.impl.vec)\n return r\n\n def _real_apply_adjoint_one_vector(self, v, mu=None, prepare_data=None):\n u = self.source.real_zero_vector()\n try:\n mat = self.matrix.Transpose()\n except AttributeError:\n mat = self.matrix.T\n mat.Mult(v.impl.vec, u.impl.vec)\n return u\n\n def _real_apply_inverse_one_vector(self, v, mu=None, initial_guess=None,\n least_squares=False, prepare_data=None):\n inv = prepare_data\n r = self.source.real_zero_vector()\n r.impl.vec.data = inv * v.impl.vec\n return r\n\n def _assemble_lincomb(self, operators, coefficients, identity_shift=0., solver_options=None, name=None):\n if not all(isinstance(op, NGSolveMatrixOperator) for op in operators):\n return None\n if identity_shift != 0:\n return None\n\n matrix = operators[0].matrix.CreateMatrix()\n matrix.AsVector().data = float(coefficients[0]) * matrix.AsVector()\n for op, c in zip(operators[1:], coefficients[1:]):\n matrix.AsVector().data += float(c) * op.matrix.AsVector()\n return NGSolveMatrixOperator(matrix, self.range, self.source, solver_options=solver_options, name=name)\n\n def as_vector(self, copy=True):\n vec = self.matrix.AsVector().FV().NumPy()\n return NumpyVectorSpace.make_array(vec.copy() if copy else vec)\n\n class NGSolveVisualizer(ImmutableObject):\n \"\"\"Visualize an NGSolve grid function.\"\"\"\n\n def __init__(self, mesh, fespace):\n self.__auto_init(locals())\n self.space = NGSolveVectorSpace(fespace)\n\n def visualize(self, U, legend=None, separate_colorbars=True, filename=None, block=True):\n \"\"\"Visualize the provided data.\"\"\"\n if isinstance(U, VectorArray):\n U = (U,)\n assert all(u in self.space for u in U)\n if any(len(u) != 1 for u in U):\n raise NotImplementedError\n if any(u._list[0].imag_part is not None for u in U):\n raise NotImplementedError\n if legend is None:\n legend = [f'VectorArray{i}' for i in range(len(U))]\n if isinstance(legend, str):\n legend = [legend]\n assert len(legend) == len(U)\n legend = [l.replace(' ', '_') for l in legend] # NGSolve GUI will fail otherwise\n\n if filename:\n # ngsolve unconditionnaly appends \".vtk\"\n filename = Path(filename).resolve()\n if filename.suffix == '.vtk':\n filename = filename.parent / filename.stem\n else:\n self.logger.warning(f'NGSolve set VTKOutput filename to {filename}.vtk')\n coeffs = [u._list[0].real_part.impl for u in U]\n # ngsolve cannot handle full paths for filenames\n with change_to_directory(filename.parent):\n vtk = ngs.VTKOutput(ma=self.mesh, coefs=coeffs, names=legend, filename=str(filename), subdivision=0)\n vtk.Do()\n else:\n if not separate_colorbars:\n raise NotImplementedError\n\n for u, name in zip(U, legend):\n ngs.Draw(u._list[0].real_part.impl, self.mesh, name=name)\n"
] | [
[
"numpy.nan_to_num"
],
[
"numpy.argmax"
]
] |
Mishrasubha/napari | [
"c4d1038fc3ed30dc228949cbdedf12826ec2efc2"
] | [
"napari/_qt/layer_controls/qt_vectors_controls.py"
] | [
"import numpy as np\nfrom qtpy.QtCore import Qt\nfrom qtpy.QtWidgets import QComboBox, QDoubleSpinBox, QLabel\n\nfrom ...layers.utils._color_manager_constants import ColorMode\nfrom ...utils.translations import trans\nfrom ..utils import qt_signals_blocked\nfrom ..widgets.qt_color_swatch import QColorSwatchEdit\nfrom .qt_layer_controls_base import QtLayerControls\n\n\nclass QtVectorsControls(QtLayerControls):\n \"\"\"Qt view and controls for the napari Vectors layer.\n\n Parameters\n ----------\n layer : napari.layers.Vectors\n An instance of a napari Vectors layer.\n\n Attributes\n ----------\n edge_color_label : qtpy.QtWidgets.QLabel\n Label for edgeColorSwatch\n edgeColorSwatch : qtpy.QtWidgets.QFrame\n Color swatch showing display color of vectors.\n edgeComboBox : qtpy.QtWidgets.QComboBox\n Dropdown widget to select display color for vectors.\n color_mode_comboBox : qtpy.QtWidgets.QComboBox\n Dropdown widget to select edge_color_mode for the vectors.\n color_prop_box : qtpy.QtWidgets.QComboBox\n Dropdown widget to select _edge_color_property for the vectors.\n edge_prop_label : qtpy.QtWidgets.QLabel\n Label for color_prop_box\n grid_layout : qtpy.QtWidgets.QGridLayout\n Layout of Qt widget controls for the layer.\n layer : napari.layers.Vectors\n An instance of a napari Vectors layer.\n lengthSpinBox : qtpy.QtWidgets.QDoubleSpinBox\n Spin box widget controlling line length of vectors.\n Multiplicative factor on projections for length of all vectors.\n widthSpinBox : qtpy.QtWidgets.QDoubleSpinBox\n Spin box widget controlling edge line width of vectors.\n \"\"\"\n\n def __init__(self, layer):\n super().__init__(layer)\n\n self.layer.events.edge_width.connect(self._on_edge_width_change)\n self.layer.events.length.connect(self._on_length_change)\n self.layer.events.edge_color_mode.connect(\n self._on_edge_color_mode_change\n )\n self.layer.events.edge_color.connect(self._on_edge_color_change)\n\n # dropdown to select the property for mapping edge_color\n color_properties = self._get_property_values()\n color_prop_box = QComboBox(self)\n color_prop_box.activated[str].connect(self.change_edge_color_property)\n color_prop_box.addItems(color_properties)\n self.color_prop_box = color_prop_box\n self.edge_prop_label = QLabel(trans._('edge property:'))\n\n # vector direct color mode adjustment and widget\n self.edgeColorEdit = QColorSwatchEdit(\n initial_color=self.layer.edge_color,\n tooltip=trans._(\n 'click to set current edge color',\n ),\n )\n self.edgeColorEdit.color_changed.connect(self.change_edge_color_direct)\n self.edge_color_label = QLabel(trans._('edge color:'))\n self._on_edge_color_change()\n\n # dropdown to select the edge color mode\n colorModeComboBox = QComboBox(self)\n color_modes = [e.value for e in ColorMode]\n colorModeComboBox.addItems(color_modes)\n colorModeComboBox.activated[str].connect(self.change_edge_color_mode)\n self.color_mode_comboBox = colorModeComboBox\n self._on_edge_color_mode_change()\n\n # line width in pixels\n self.widthSpinBox = QDoubleSpinBox()\n self.widthSpinBox.setKeyboardTracking(False)\n self.widthSpinBox.setSingleStep(0.1)\n self.widthSpinBox.setMinimum(0.1)\n self.widthSpinBox.setMaximum(np.inf)\n self.widthSpinBox.setValue(self.layer.edge_width)\n self.widthSpinBox.valueChanged.connect(self.change_width)\n\n # line length\n self.lengthSpinBox = QDoubleSpinBox()\n self.lengthSpinBox.setKeyboardTracking(False)\n self.lengthSpinBox.setSingleStep(0.1)\n self.lengthSpinBox.setValue(self.layer.length)\n self.lengthSpinBox.setMinimum(0.1)\n self.lengthSpinBox.setMaximum(np.inf)\n self.lengthSpinBox.valueChanged.connect(self.change_length)\n\n # grid_layout created in QtLayerControls\n # addWidget(widget, row, column, [row_span, column_span])\n self.grid_layout.addWidget(QLabel(trans._('opacity:')), 0, 0)\n self.grid_layout.addWidget(self.opacitySlider, 0, 1, 1, 2)\n self.grid_layout.addWidget(QLabel(trans._('width:')), 1, 0)\n self.grid_layout.addWidget(self.widthSpinBox, 1, 1, 1, 2)\n self.grid_layout.addWidget(QLabel(trans._('length:')), 2, 0)\n self.grid_layout.addWidget(self.lengthSpinBox, 2, 1, 1, 2)\n self.grid_layout.addWidget(QLabel(trans._('blending:')), 3, 0)\n self.grid_layout.addWidget(self.blendComboBox, 3, 1, 1, 2)\n self.grid_layout.addWidget(QLabel(trans._('edge color mode:')), 4, 0)\n self.grid_layout.addWidget(self.color_mode_comboBox, 4, 1, 1, 2)\n self.grid_layout.addWidget(self.edge_color_label, 5, 0)\n self.grid_layout.addWidget(self.edgeColorEdit, 5, 1, 1, 2)\n self.grid_layout.addWidget(self.edge_prop_label, 6, 0)\n self.grid_layout.addWidget(self.color_prop_box, 6, 1, 1, 2)\n self.grid_layout.setRowStretch(7, 1)\n self.grid_layout.setColumnStretch(1, 1)\n self.grid_layout.setSpacing(4)\n\n def change_edge_color_property(self, property: str):\n \"\"\"Change edge_color_property of vectors on the layer model.\n This property is the property the edge color is mapped to.\n\n Parameters\n ----------\n property : str\n property to map the edge color to\n \"\"\"\n mode = self.layer.edge_color_mode\n try:\n self.layer.edge_color = property\n self.layer.edge_color_mode = mode\n except TypeError:\n # if the selected property is the wrong type for the current color mode\n # the color mode will be changed to the appropriate type, so we must update\n self._on_edge_color_mode_change()\n raise\n\n def change_edge_color_mode(self, mode: str):\n \"\"\"Change edge color mode of vectors on the layer model.\n\n Parameters\n ----------\n mode : str\n Edge color for vectors. Must be: 'direct', 'cycle', or 'colormap'\n \"\"\"\n old_mode = self.layer.edge_color_mode\n with self.layer.events.edge_color_mode.blocker():\n try:\n self.layer.edge_color_mode = mode\n self._update_edge_color_gui(mode)\n\n except ValueError:\n # if the color mode was invalid, revert to the old mode\n self.layer.edge_color_mode = old_mode\n raise\n\n def change_edge_color_direct(self, color: np.ndarray):\n \"\"\"Change edge color of vectors on the layer model.\n\n Parameters\n ----------\n color : np.ndarray\n Edge color for vectors, in an RGBA array\n \"\"\"\n self.layer.edge_color = color\n\n def change_width(self, value):\n \"\"\"Change edge line width of vectors on the layer model.\n\n Parameters\n ----------\n value : float\n Line width of vectors.\n \"\"\"\n self.layer.edge_width = value\n self.widthSpinBox.clearFocus()\n self.setFocus()\n\n def change_length(self, value):\n \"\"\"Change length of vectors on the layer model.\n\n Multiplicative factor on projections for length of all vectors.\n\n Parameters\n ----------\n value : float\n Length of vectors.\n \"\"\"\n self.layer.length = value\n self.lengthSpinBox.clearFocus()\n self.setFocus()\n\n def _update_edge_color_gui(self, mode: str):\n \"\"\"Update the GUI element associated with edge_color.\n This is typically used when edge_color_mode changes\n\n Parameters\n ----------\n mode : str\n The new edge_color mode the GUI needs to be updated for.\n Should be: 'direct', 'cycle', 'colormap'\n \"\"\"\n if mode in ('cycle', 'colormap'):\n self.edgeColorEdit.setHidden(True)\n self.edge_color_label.setHidden(True)\n self.color_prop_box.setHidden(False)\n self.edge_prop_label.setHidden(False)\n\n elif mode == 'direct':\n self.edgeColorEdit.setHidden(False)\n self.edge_color_label.setHidden(False)\n self.color_prop_box.setHidden(True)\n self.edge_prop_label.setHidden(True)\n\n def _get_property_values(self):\n \"\"\"Get the current property values from the Vectors layer\n\n Returns\n -------\n property_values : np.ndarray\n array of all of the union of the property names (keys)\n in Vectors.properties and Vectors._property_choices\n\n \"\"\"\n property_choices = [*self.layer._property_choices]\n properties = [*self.layer.properties]\n property_values = np.union1d(property_choices, properties)\n\n return property_values\n\n def _on_length_change(self):\n \"\"\"Change length of vectors.\"\"\"\n with self.layer.events.length.blocker():\n self.lengthSpinBox.setValue(self.layer.length)\n\n def _on_edge_width_change(self):\n \"\"\"Receive layer model width change event and update width spinbox.\"\"\"\n with self.layer.events.edge_width.blocker():\n self.widthSpinBox.setValue(self.layer.edge_width)\n\n def _on_edge_color_mode_change(self):\n \"\"\"Receive layer model edge color mode change event & update dropdown.\"\"\"\n with qt_signals_blocked(self.color_mode_comboBox):\n mode = self.layer._edge.color_mode\n index = self.color_mode_comboBox.findText(\n mode, Qt.MatchFixedString\n )\n self.color_mode_comboBox.setCurrentIndex(index)\n\n self._update_edge_color_gui(mode)\n\n def _on_edge_color_change(self):\n \"\"\"Receive layer model edge color change event & update dropdown.\"\"\"\n if (\n self.layer._edge.color_mode == ColorMode.DIRECT\n and len(self.layer.data) > 0\n ):\n with qt_signals_blocked(self.edgeColorEdit):\n self.edgeColorEdit.setColor(self.layer.edge_color[0])\n elif self.layer._edge.color_mode in (\n ColorMode.CYCLE,\n ColorMode.COLORMAP,\n ):\n with qt_signals_blocked(self.color_prop_box):\n prop = self.layer._edge.color_properties.name\n index = self.color_prop_box.findText(prop, Qt.MatchFixedString)\n self.color_prop_box.setCurrentIndex(index)\n"
] | [
[
"numpy.union1d"
]
] |
XuboGU/neorl | [
"066cdbd9e9cdbfe371278dba3ece116f25edab2d"
] | [
"neorl/tune/runners/estune.py"
] | [
"import numpy as np\nimport pandas as pd\nimport os\nimport random\nimport math\nfrom itertools import repeat\nimport itertools\nimport sys, copy, shutil\nimport subprocess\nfrom multiprocessing.dummy import Pool\nfrom collections import defaultdict\nimport copy\n\nimport random\nimport matplotlib.pyplot as plt\n\ntry: \n from collections.abc import Sequence\nexcept ImportError:\n from collections import Sequence\n\nclass ESTUNE:\n \"\"\"\n A class to parse neorl input template and construct cases for evolution strategy (ES) hyperparameter optimisation\n\n inputs: \n The template input file\n Class object from PARSER.py, featuring user input for TUNE\n neorl logo\n \"\"\"\n\n def __init__(self, tuneclass, inputfile, tuneblock, logo):\n self.logo=logo\n self.inputfile=inputfile\n self.tuneblock=tuneblock\n self.n_last_episodes=int(self.tuneblock[\"n_last_episodes\"])\n self.ncores=int(self.tuneblock[\"ncores\"])\n self.ncases=int(self.tuneblock[\"ncases\"])\n\n #---------------------------------------\n # define genetic algorithm parameters\n #---------------------------------------\n self.popsize=10\n if self.ncases < self.popsize:\n self.ngens=1\n else:\n self.ngens=int(self.ncases/self.popsize)\n self.MU=5\n if tuneclass == 'gatune': # ES/GA tune\n print(\"Performing semi-GA Tune\")\n self.INDPB=0.1\n elif tuneclass == 'estune': # ES tune\n print(\"Performing ES Tune\")\n self.INDPB=1.0\n else: # default setting is ES tune\n print(\"Performing ES Tune\")\n self.INDPB=1.0\n self.CXPB=0.5\n self.MUTPB=0.2\n self.ETA=0.6\n self.SMAX=0.5\n self.paramvals=dict()\n self.paraminds=dict()\n self.datatypes=[]\n\n #-------------------------------\n # construct results directory\n #-------------------------------\n if os.path.exists('./tunecases/'):\n shutil.rmtree('./tunecases/')\n os.makedirs('./tunecases/', exist_ok=True)\n else:\n os.makedirs('./tunecases/', exist_ok=True)\n self.csvlogger='tune.csv'\n self.tunesummary='tunesummary.txt'\n\n #---------------------------------\n # parse the input template\n #---------------------------------\n with open (self.inputfile, 'r') as input_file_text:\n self.template=input_file_text.readlines()\n \n first=0; last=0\n for i in range(len(self.template)):\n if ('READ TUNE' in self.template[i]):\n first=i\n if ('END TUNE' in self.template[i]):\n last=i\n if first == 0 and last ==0:\n raise ('TUNE card cannot be found')\n\n del self.template[first: last+1]\n self.template=\"\".join(self.template)\n\n def tune_count(self):\n \n \"\"\"\n 1- This function uses self.tuneblock, parse it, infer all parameters to be tuned and thier distribution\n 2- This function creates GA engine and instantiates the initial population for evolution algorithm\n \"\"\"\n \n self.param_dict={}\n for item in self.tuneblock:\n if '{' in item and '}' in item and item[0] != '#':\n #-----------------------------------------------------\n # check the existence of the name in the template\n #-----------------------------------------------------\n if item not in self.template:\n raise ValueError('parameter {} in TUNE block cannot be found in any other block, e.g. DQN, GA, PPO, etc.'.format(item)) \n\n item_lst=self.tuneblock[item].split(\",\")\n item_lst=[item.strip() for item in item_lst] # get rid of white spaces in the splitted values\n #-------------------------------------------------------\n # check if a uniform distribution of floats is identified\n #-------------------------------------------------------\n try:\n if \"float\" in item_lst:\n item_lst[0]=float(item_lst[0])\n item_lst[1]=float(item_lst[1])\n self.datatypes.append(\"float\")\n print ('-- debug: parameter {} has uniform distribution of type --float-- between {} and {}'.format(item,item_lst[0],item_lst[1]))\n elif \"u\" in item_lst: \n item_lst[0]=float(item_lst[0])\n item_lst[1]=float(item_lst[1])\n self.datatypes.append(\"float\")\n print ('-- debug: parameter {} has uniform distribution of type --float-- between {} and {}'.format(item,item_lst[0],item_lst[1]))\n except:\n raise Exception ('--error: TUNE cannot construct the user-given uniform distribution of --floats-- for {} according to (low, high, u) syntax'.format(item))\n \n #---------------------------------------------------\n # check if a random integer distribution is identified\n #---------------------------------------------------\n try:\n if \"int\" in item_lst:\n item_lst[0]=int(item_lst[0])\n item_lst[1]=int(item_lst[1])\n self.datatypes.append(\"int\")\n print ('-- debug: parameter {} has uniform distribution of type --int-- between {} and {}'.format(item,item_lst[0],item_lst[1]))\n elif \"randint\" in item_lst:\n item_lst[0]=int(item_lst[0])\n item_lst[1]=int(item_lst[1])\n self.datatypes.append(\"int\")\n print ('-- debug: parameter {} has uniform distribution of type --int-- between {} and {}'.format(item,item_lst[0],item_lst[1]))\n except:\n raise Exception ('--error: TUNE cannot construct the user-given uniform distribution of --int-- for {} according to (low, high, u) syntax'.format(item))\n \n #-----------------------------------------------------\n # check if a grid is identified\n #-----------------------------------------------------\n try:\n if \"grid\" in item_lst:\n element_lst=[]\n for element in item_lst:\n # check if it is an integer\n not_int=0\n try:\n element_lst.append(int(element.strip()))\n except Exception:\n not_int=1\n \n # else check if the elment is float\n if not_int:\n try:\n element_lst.append(float(element.strip()))\n # else consider it a string\n except Exception:\n element_lst.append(str(element.strip()))\n \n item_lst=element_lst\n self.datatypes.append(\"grid\")\n print ('-- debug: parameter {} has grid type with values {}'.format(item,item_lst))\n except:\n raise Exception ('--error: TUNE cannot construct the user-given grid for {} according to the comma-seperated syntax'.format(item))\n\n self.param_dict[item]=item_lst # Save the final parsed list for parameter {XXX} \n \n #-----------------------------------------------------\n # infer the bounds for strategy vector \n #-----------------------------------------------------\n if len(self.param_dict.keys()) <= 10:\n self.SMIN=0.1\n else:\n self.SMIN=1/(len(self.param_dict.keys()))\n\n def gen_cases(self, x=0):\n \"\"\"\n This function infers neorl.py path\n \"\"\"\n self.tune_count()\n self.param_names=list(self.param_dict.keys())\n #----------------------- \n # Infer neorl.py path\n #-----------------------\n # Find neorl path\n #self.here=os.path.dirname(os.path.abspath(__file__))\n #self.neorl_path=self.here.replace('src/tune','neorl.py') #try to infer neorl.py internally to call neorl inside or neorl\n #self.python_path=self.here.replace('neorl/src/tune','anaconda3/bin/python3') #try to infer python3 path to call neorl inside or neorl\n\n self.neorl_path=sys.argv[0]\n self.python_path=sys.executable\n print('--debug: NEORLPATH=', self.neorl_path)\n print('--debug: PYTHONPATH=', self.python_path)\n \n def GenES(self):\n \"\"\"\n Individual generator:\n 1- This function uses self.param_dict to obtain bounds for individual parameters\n Returns:\n -ind (list): an individual vector with values samples from inferred distribution \n -strategy (list): the strategy vector with values between smin and smax \n \"\"\" \n size=len(self.param_dict.keys()) # size of individual\n content=[]\n self.LOW=[] # Lower bounds for the parameters to be tuned\n self.UP=[] # Upper bounds for parameters to be tuned\n for key in list(self.param_dict.keys()):\n if 'int' in self.param_dict[key]:\n content.append(random.randint(self.param_dict[key][0], self.param_dict[key][1]))\n elif 'randint' in self.param_dict[key]:\n content.append(random.randint(self.param_dict[key][0], self.param_dict[key][1]))\n elif 'float' in self.param_dict[key]:\n content.append(random.uniform(self.param_dict[key][0], self.param_dict[key][1]))\n elif 'u' in self.param_dict[key]:\n content.append(random.uniform(self.param_dict[key][0], self.param_dict[key][1]))\n elif 'grid' in self.param_dict[key]:\n self.real_grid=list(self.param_dict[key])\n self.real_grid.remove('grid') # get rid of the 'grid' to avoid sampling it\n self.paramvals[key]=self.real_grid\n content.append(random.sample(self.real_grid, 1)[0])\n self.paraminds[len(content)-1]=key\n else:\n raise Exception('unknown data type is given, either int/randint, float/u, or grid are allowed for parameter distribution types')\n self.LOW.append(self.param_dict[key][0])\n self.UP.append(self.param_dict[key][1])\n ind=list(content)\n size = len(list(self.param_dict.keys()))\n strategy= [random.uniform(self.SMIN, self.SMAX) for _ in range(size)]\n return ind, strategy\n \n def init_pop(self):\n \"\"\"\n Population initializer\n Returns:\n -pop (dict): initial population in a dictionary form \n \"\"\"\n # initialize the population and strategy and run them in parallel (these samples will be used to initialize the memory)\n pop=defaultdict(list)\n \n for i in range(self.popsize):\n #caseid='es_gen{}_ind{}'.format(0,i+1)\n data=self.GenES()\n pop[i].append(data[0])\n pop[i].append(data[1])\n \n if self.ncores > 1: # evaluate warmup in parallel\n core_list=[]\n for key in pop:\n caseid='ind{}'.format(key+1)\n core_list.append([pop[key][0], caseid])\n p=Pool(self.ncores)\n fitness=p.map(self.gen_object, core_list)\n p.close(); p.join()\n\n [pop[ind].append(fitness[ind]) for ind in range(len(pop))]\n \n else: # evaluate warmup in series\n for key in pop:\n caseid='ind{}'.format(key+1)\n fitness=self.fit(pop[key][0], caseid)\n pop[key].append(fitness)\n return pop # return final pop dictionary with ind, strategy, and fitness\n\n def fit(self, ind, caseid):\n \"\"\"\n This function evaluates an individual's fitness\n Inputs:\n -ind (list): an individual whose fitness to evaluate\n -caseid (str): a string that specifies the given individual\n Returns: \n -mean_reward (float): fitness value \n \"\"\"\n try:\n #---------------------------------------------\n # Prepares directories and files for one case\n # --------------------------------------------\n self.param_names=list(self.param_dict.keys())\n i = caseid[3:]\n\n os.makedirs('./tunecases/case{}'.format(i), exist_ok=True)\n self.new_template=copy.deepcopy(self.template)\n for j in range (len(self.param_names)):\n self.new_template=self.new_template.replace(str(self.param_names[j]), str(ind[j]))\n \n filename='./tunecases/case{}/case{}.inp'.format(i, i)\n with open (filename, 'w') as fout:\n fout.writelines(self.new_template)\n \n # copy external files into the new directory, if extfiles card exists\n if 'extfiles' in self.tuneblock.keys():\n if self.tuneblock['extfiles']:\n print('--debug: external files are identified, copying them into each case directory')\n for item in self.tuneblock['extfiles']:\n os.system('cp -r {} ./tunecases/case{}/'.format(item, i))\n\n casenum = caseid[3:]\n print('--------------------------------------------------')\n print('Running TUNE Case {}/{}: {}'.format(casenum, self.ncases, ind))\n subprocess.call([self.python_path, self.neorl_path, '-i', 'case{}.inp'.format(casenum)], cwd='./tunecases/case{}/'.format(casenum)) # this exceutes neorl for this case.inp\n print('--------------------------------------------------')\n \n #--------------------------------------------------------------------------------------------------------------\n # Try to infer the _out.csv file in the directory since only one method is allowed\n csvfile=[f for f in os.listdir('./tunecases/case{}/case{}_log/'.format(casenum, casenum)) if f.endswith('_out.csv')]\n if len(csvfile) > 1:\n raise Exception ('multiple *_out.csv files can be found in the logger of TUNE, only one is allowed')\n #--------------------------------------------------------------------------------------------------------------\n reward_lst=pd.read_csv('./tunecases/case{}/case{}_log/{}'.format(casenum,casenum, csvfile[0]), usecols=['reward']).values\n mean_reward=np.mean(reward_lst[-self.n_last_episodes:])\n max_reward=np.max(reward_lst)\n \n with open (self.csvlogger, 'a') as fout:\n fout.write(str(casenum) +',')\n [fout.write(str(item) + ',') for item in ind]\n fout.write(str(mean_reward) + ',' + str(max_reward) + '\\n')\n \n return mean_reward\n \n except:\n print('--error: case{}.inp failed during execution'.format(casenum))\n \n return 'case{}.inp:failed'.format(casenum)\n \n def gen_object(self, inp):\n \"\"\"\n This is a worker for the multiprocess Pool \n Inputs:\n -inp (list of lists): contains data for each core [[ind1, caseid1], ..., [indN, caseidN]]\n Returns: \n -fitness value (float)\n \"\"\"\n return self.fit(inp[0], inp[1])\n \n def select(self, pop):\n \"\"\"\n Selection function sorts the population from max to min based on fitness and selects the k best\n Inputs:\n -pop (dict): population in dictionary structure\n -k (int): top k individuals are selected\n Returns:\n -best_dict (dict): the new orded dictionary with top k selected\n \"\"\"\n k=self.MU\n pop=list(pop.items())\n pop.sort(key=lambda e: e[1][2], reverse=True)\n sorted_dict=dict(pop[:k])\n\n # This block creates a new dict where keys are reset to 0 ... k in order to avoid unordered keys after sort \n best_dict=defaultdict(list)\n index=0\n for key in sorted_dict:\n best_dict[index].append(sorted_dict[key][0])\n best_dict[index].append(sorted_dict[key][1])\n best_dict[index].append(sorted_dict[key][2])\n index+=1 \n\n sorted_dict.clear()\n return best_dict\n \n def cx(self, ind1, ind2, strat1, strat2):\n \"\"\"\n Executes a classical two points crossover on both the individuals and their strategy. \n The individuals/strategies should be a list. The crossover points for the individual and the \n strategy are the same. \n\n Inputs:\n -ind1 (list): The first individual participating in the crossover. \n -ind2 (list): The second individual participating in the crossover.\n -strat1 (list): The first evolution strategy participating in the crossover. \n -strat2 (list): The second evolution strategy \n Returns:\n - The new ind1, ind2, strat1, strat2, after crossover in list form\n \"\"\"\n \n #for item in ind1:\n # print('individual 1', type(item))\n #for item in ind2:\n # print('individual 2', type(item))\n #for item in strat1:\n # print('strategy 1', type(item))\n #for item in strat2:\n # print('strategy 2', type(item))\n \n size = min(len(ind1), len(ind2))\n\n pt1 = random.randint(1, size)\n pt2 = random.randint(1, size-1)\n if pt2 >= pt1:\n pt2 +=1\n else:\n pt1, pt2 = pt2, pt1\n \n ind1[pt1:pt2], ind2[pt1:pt2] = ind2[pt1:pt2], ind1[pt1:pt2]\n strat1[pt1:pt2], strat2[pt1:pt2] = strat2[pt1:pt2], strat1[pt1:pt2]\n\n return ind1, ind2, strat1, strat2 \n \n def mutES(self, ind, strat):\n \"\"\"\n Mutate an evolution strategy according to mixed Discrete/Continuous mutation rules \n Input:\n -ind (list): individual to be mutated\n -strat (list): individual strategy to be mutated \n Returns:\n -ind (list): new individual after mutation\n -strat (list): individual strategy after mutation\n \"\"\"\n size=len(ind)\n tau=1/np.sqrt(2*size)\n tau_prime=1/np.sqrt(2*np.sqrt(size))\n \n for i in range(size):\n # Grid distribution received\n if self.datatypes[i] == \"grid\":\n #if i in self.paraminds.keys():\n norm=random.gauss(0,1)\n # modify the ind strategy\n strat[i] = 1/(1+(1-strat[i])/strat[i]*np.exp(-tau*norm-tau_prime*random.gauss(0,1)))\n # make a transformation of strategy to ensure it is between smin, smax \n y=(strat[i]-self.SMIN)/(self.SMAX-self.SMIN)\n if np.floor(y) % 2 == 0:\n y_prime=np.abs(y-np.floor(y))\n else:\n y_prime=1-np.abs(y-np.floor(y))\n strat[i] = self.SMIN + (self.SMAX-self.SMIN)*y_prime\n\n # check if this attribute is mutated based on the updated strategy\n if random.random() < strat[i]:\n # make a list of possibilities after excluding the current value to enforce mutation\n paramname=self.paraminds[i]\n ind[i]=random.sample(self.paramvals[paramname], 1)[0]\n\n # Random integer distribution received\n elif self.datatypes[i] == \"int\":\n norm=random.gauss(0,1)\n # modify the ind strategy \n strat[i] = 1/(1+(1-strat[i])/strat[i]*np.exp(-tau*norm-tau_prime*random.gauss(0,1)))\n # make a transformation of strategy to ensure it is between smin, smax \n y=(strat[i]-self.SMIN)/(self.SMAX-self.SMIN)\n if np.floor(y) % 2 == 0:\n y_prime=np.abs(y-np.floor(y))\n else:\n y_prime=1-np.abs(y-np.floor(y))\n strat[i] = self.SMIN + (self.SMAX-self.SMIN)*y_prime\n\n # check if this attribute is mutated based on the updated strategy \n #if random.random() < strat[i]:\n # make a list of possibilities after excluding the current value to enforce mutation\n choices=list(range(self.LOW[i], self.UP[i]+1))\n choices.remove(ind[i])\n ind[i] = random.choice(choices)\n\n # Uniform float distribution received\n elif self.datatypes[i] == \"float\":\n norm=random.gauss(0,1)\n if random.random() < self.INDPB: # this indicates whether ind/strategy to be mutated or not for this float variable\n strat[i] *= np.exp(tau*norm + tau_prime * random.gauss(0,1)) # normal mutation strategy\n ind[i] += strat[i] * random.gauss(0,1) # update the individual position\n \n # check if the new individual falls within lower/uppder boundaries\n if ind[i] < self.LOW[i]:\n ind[i] = self.LOW[i]\n if ind[i] > self.UP[i]:\n ind[i] = self.UP[i]\n \n else:\n raise Exception('ES mutation strategy works with int, float, or grid distributions, the type provided cannot be interpreted')\n \n return ind, strat\n \n def GenOffspring(self, pop):\n \"\"\"\n This function generates the offspring by applying crossover, mutation, OR reproduction. \n Inputs:\n -pop (dict): population in dictionary structure\n Returns:\n -offspring (dict): new modified population in dictionary structure\n \"\"\"\n\n pop_indices=list(range(0,len(pop)))\n offspring=defaultdict(list)\n for i in range(self.popsize):\n alpha=random.random()\n #----------------------\n # Crossover\n #----------------------\n if alpha < self.CXPB:\n index1, index2=random.sample(pop_indices,2)\n ind1, ind2, strat1, strat2=self.cx(ind1=list(pop[index1][0]), ind2=list(pop[index2][0]),\n strat1=list(pop[index1][1]), strat2=list(pop[index2][1]))\n offspring[i].append(ind1)\n offspring[i].append(strat1)\n #print('crossover is done for sample {} between {} and {}'.format(i,index1,index2))\n #----------------------\n # Mutation\n #----------------------\n elif alpha < self.CXPB + self.MUTPB: # Apply mutation\n index = random.choice(pop_indices)\n \n ind, strat=self.mutES(ind=list(pop[index][0]), strat=list(pop[index][1]))\n offspring[i].append(ind)\n offspring[i].append(strat)\n #print('mutation is done for sample {} based on {}'.format(i,index))\n #------------------------------\n # Reproduction from population\n #------------------------------\n else:\n index=random.choice(pop_indices)\n offspring[i].append(pop[index][0])\n offspring[i].append(pop[index][1])\n #print('reproduction is done for sample {} based on {}'.format(i,index))\n return offspring \n\n def run_cases(self):\n \"\"\"\n This function runs the evolutioanry algorithm over self.ngens generations. \n \"\"\"\n #------------------------------\n # Begin the evolution process\n #------------------------------\n with open (self.csvlogger, 'w') as fout:\n fout.write('caseid, ')\n [fout.write(item + ',') for item in self.param_names]\n fout.write('mean_reward,max_reward\\n')\n\n #print('PARAM dict', self.param_dict)\n #print('PARAM types', self.datatypes)\n self.population=self.init_pop()\n case_idx=0\n self.currentcase=self.popsize+1\n for gen in range(1, self.ngens): \n case_idx=0\n caseids=['ind{}'.format(ind) for ind in range(self.currentcase, self.currentcase+self.popsize+1)]\n # Vary the population and generate new offspring\n offspring=self.GenOffspring(pop=self.population)\n\n # Evaluate the individuals with invalid fitness using multiprocessing Pool\n if self.ncores > 1:\n core_list=[]\n for key in offspring:\n core_list.append([offspring[key][0], caseids[case_idx]])\n case_idx+=1\n # initialize a pool\n p=Pool(self.ncores)\n fitness=p.map(self.gen_object, core_list)\n p.close(); p.join()\n\n [offspring[ind].append(fitness[ind]) for ind in range(len(offspring))]\n else:\n for ind in range(len(offspring)):\n fitness=self.fit(offspring[ind][0], caseids[case_idx])\n case_idx+=1\n offspring[ind].append(fitness)\n \n self.currentcase+=self.popsize\n # Select the next generation population \n self.population = copy.deepcopy(self.select(pop=offspring))\n \n\n csvdata=pd.read_csv('tune.csv')\n asc_data=csvdata.sort_values(by=['caseid'],ascending=True)\n des_data=csvdata.sort_values(by=['mean_reward'],ascending=False)\n des_data2=csvdata.sort_values(by=['max_reward'],ascending=False)\n asc_data.to_csv('tune.csv', index=False)\n\n mean = np.mean(des_data.iloc[:,4:5])\n totalmean=mean.tolist()[0]\n \n try:\n failed_cases=len([print ('failed') for item in self.population if isinstance(item, str)])\n except:\n failed_cases='NA'\n \n print ('%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%')\n print('Mean Rewards for all cases=', totalmean)\n print ('%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%')\n print ('All TUNE CASES ARE COMPLETED')\n print ('%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%')\n print('--debug: Check tunesummary.txt file for best hyperparameters found')\n print('--debug: Check tune.csv file for complete csv logger of all cases results')\n print('--debug: Check tunecases directory for case-by-case detailed results')\n \n with open ('tunesummary.txt', 'w') as fout:\n \n fout.write(self.logo)\n fout.write('*****************************************************\\n')\n fout.write('Summary for the TUNE case \\n')\n fout.write('*****************************************************\\n')\n fout.write('Number of cases evaluated: {} \\n'.format(self.ncases))\n fout.write('Number of failed cases: {} \\n'.format(failed_cases))\n fout.write('Parameter names: {} \\n'.format(self.param_names))\n fout.write('Parameter values: {} \\n '.format(self.param_dict))\n fout.write ('--------------------------------------------------------------------------------------\\n')\n \n if des_data.shape[0] < 20:\n top=des_data.shape[0]\n fout.write ('Top {} hyperparameter configurations ranked according to MEAN reward \\n'.format(top))\n fout.write(des_data.iloc[:top].to_string(index=False))\n else:\n top=20\n fout.write ('Top {} hyperparameter configurations ranked according to MEAN reward \\n'.format(top))\n fout.write(des_data.iloc[:top].to_string(index=False))\n fout.write ('\\n')\n fout.write ('--------------------------------------------------------------------------------------\\n')\n if des_data2.shape[0] < 20:\n top=des_data2.shape[0]\n fout.write ('Top {} hyperparameter configurations ranked according to MAX reward \\n'.format(top))\n fout.write(des_data2.iloc[:top].to_string(index=False))\n else:\n top=20\n fout.write ('Top {} hyperparameter configurations ranked according to MAX reward \\n'.format(top))\n fout.write(des_data2.iloc[:top].to_string(index=False))"
] | [
[
"pandas.read_csv",
"numpy.floor",
"numpy.max",
"numpy.sqrt",
"numpy.mean"
]
] |
zhuriheng/faster-rcnn.pytorch | [
"7536b0f5eee254350fb4dce5c4a077ac6d29db16"
] | [
"test_net.py"
] | [
"# --------------------------------------------------------\n# Tensorflow Faster R-CNN\n# Licensed under The MIT License [see LICENSE for details]\n# Written by Jiasen Lu, Jianwei Yang, based on code from Ross Girshick\n# --------------------------------------------------------\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport _init_paths\nimport os\nimport sys\nimport numpy as np\nimport argparse\nimport pprint\nimport pdb\nimport time\n\nimport cv2\n\nimport torch\nfrom torch.autograd import Variable\nimport torch.nn as nn\nimport torch.optim as optim\nimport pickle\nfrom roi_data_layer.roidb import combined_roidb\nfrom roi_data_layer.roibatchLoader import roibatchLoader\nfrom model.utils.config import cfg, cfg_from_file, cfg_from_list, get_output_dir\nfrom model.rpn.bbox_transform import clip_boxes\nfrom model.nms.nms_wrapper import nms\nfrom model.rpn.bbox_transform import bbox_transform_inv\nfrom model.utils.net_utils import save_net, load_net, vis_detections\n\nfrom model.faster_rcnn.vgg16 import vgg16\nfrom model.faster_rcnn.resnet import resnet\n\ntry:\n xrange # Python 2\nexcept NameError:\n xrange = range # Python 3\n\n\ndef parse_args():\n \"\"\"\n Parse input arguments\n \"\"\"\n parser = argparse.ArgumentParser(description='Train a Fast R-CNN network')\n parser.add_argument('--dataset', dest='dataset',\n help='training dataset',\n default='pascal_voc', type=str)\n parser.add_argument('--cfg', dest='cfg_file',\n help='optional config file',\n default='cfgs/vgg16.yml', type=str)\n parser.add_argument('--net', dest='net',\n help='vgg16, res50, res101, res152',\n default='res101', type=str)\n parser.add_argument('--set', dest='set_cfgs',\n help='set config keys', default=None,\n nargs=argparse.REMAINDER)\n parser.add_argument('--load_dir', dest='load_dir',\n help='directory to load models', default=\"models\",\n type=str)\n parser.add_argument('--cuda', dest='cuda',\n help='whether use CUDA',\n action='store_true')\n parser.add_argument('--ls', dest='large_scale',\n help='whether use large imag scale',\n action='store_true')\n parser.add_argument('--mGPUs', dest='mGPUs',\n help='whether use multiple GPUs',\n action='store_true')\n parser.add_argument('--cag', dest='class_agnostic',\n help='whether perform class_agnostic bbox regression',\n action='store_true')\n parser.add_argument('--parallel_type', dest='parallel_type',\n help='which part of model to parallel, 0: all, 1: model before roi pooling',\n default=0, type=int)\n parser.add_argument('--checksession', dest='checksession',\n help='checksession to load model',\n default=1, type=int)\n parser.add_argument('--checkepoch', dest='checkepoch',\n help='checkepoch to load network',\n default=1, type=int)\n parser.add_argument('--checkpoint', dest='checkpoint',\n help='checkpoint to load network',\n default=10021, type=int)\n parser.add_argument('--vis', dest='vis',\n help='visualization mode',\n action='store_true')\n parser.add_argument('--input_dir', dest='input_dir',\n help='directory to save models',\n type=str)\n args = parser.parse_args()\n return args\n\nlr = cfg.TRAIN.LEARNING_RATE\nmomentum = cfg.TRAIN.MOMENTUM\nweight_decay = cfg.TRAIN.WEIGHT_DECAY\n\nif __name__ == '__main__':\n\n args = parse_args()\n\n print('Called with args:')\n print(args)\n\n if torch.cuda.is_available() and not args.cuda:\n print(\"WARNING: You have a CUDA device, so you should probably run with --cuda\")\n\n np.random.seed(cfg.RNG_SEED)\n if args.dataset == \"pascal_voc\":\n args.imdb_name = \"voc_2007_trainval\"\n args.imdbval_name = \"voc_2007_test\"\n args.set_cfgs = ['ANCHOR_SCALES', '[8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]']\n elif args.dataset == \"pascal_voc_0712\":\n args.imdb_name = \"voc_2007_trainval+voc_2012_trainval\"\n args.imdbval_name = \"voc_2007_test\"\n args.set_cfgs = ['ANCHOR_SCALES', '[8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]']\n elif args.dataset == \"coco\":\n args.imdb_name = \"coco_2014_train+coco_2014_valminusminival\"\n args.imdbval_name = \"coco_2014_minival\"\n args.set_cfgs = ['ANCHOR_SCALES', '[4, 8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]']\n elif args.dataset == \"imagenet\":\n args.imdb_name = \"imagenet_train\"\n args.imdbval_name = \"imagenet_val\"\n args.set_cfgs = ['ANCHOR_SCALES', '[8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]']\n elif args.dataset == \"vg\":\n args.imdb_name = \"vg_150-50-50_minitrain\"\n args.imdbval_name = \"vg_150-50-50_minival\"\n args.set_cfgs = ['ANCHOR_SCALES', '[4, 8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]']\n\n args.cfg_file = \"cfgs/{}/{}_ls.yml\".format(args.dataset, args.net) if args.large_scale else \"cfgs/{}/{}.yml\".format(\n args.dataset, args.net)\n\n if args.cfg_file is not None:\n cfg_from_file(args.cfg_file)\n if args.set_cfgs is not None:\n cfg_from_list(args.set_cfgs)\n\n print('Using config:')\n pprint.pprint(cfg)\n\n cfg.TRAIN.USE_FLIPPED = False\n imdb, roidb, ratio_list, ratio_index = combined_roidb(args.imdbval_name, False)\n imdb.competition_mode(on=True)\n\n print('{:d} roidb entries'.format(len(roidb)))\n\n input_dir = args.input_dir\n if not os.path.exists(input_dir):\n raise Exception('There is no input directory for loading network from ' + input_dir)\n load_name = os.path.join(input_dir,\n 'faster_rcnn_{}_{}_{}.pth'.format(args.checksession, args.checkepoch, args.checkpoint))\n\n # initilize the network here.\n if args.net == 'vgg16':\n fasterRCNN = vgg16(imdb.classes, pretrained=False, class_agnostic=args.class_agnostic)\n elif args.net == 'res101':\n fasterRCNN = resnet(imdb.classes, 101, pretrained=False, class_agnostic=args.class_agnostic)\n elif args.net == 'res50':\n fasterRCNN = resnet(imdb.classes, 50, pretrained=False, class_agnostic=args.class_agnostic)\n elif args.net == 'res152':\n fasterRCNN = resnet(imdb.classes, 152, pretrained=False, class_agnostic=args.class_agnostic)\n else:\n print(\"network is not defined\")\n pdb.set_trace()\n\n fasterRCNN.create_architecture()\n\n print(\"load checkpoint %s\" % (load_name))\n checkpoint = torch.load(load_name)\n fasterRCNN.load_state_dict(checkpoint['model'])\n if 'pooling_mode' in checkpoint.keys():\n cfg.POOLING_MODE = checkpoint['pooling_mode']\n\n\n print('load model successfully!')\n # initilize the tensor holder here.\n im_data = torch.FloatTensor(1)\n im_info = torch.FloatTensor(1)\n num_boxes = torch.LongTensor(1)\n gt_boxes = torch.FloatTensor(1)\n\n # ship to cuda\n if args.cuda:\n im_data = im_data.cuda()\n im_info = im_info.cuda()\n num_boxes = num_boxes.cuda()\n gt_boxes = gt_boxes.cuda()\n\n # make variable\n im_data = Variable(im_data)\n im_info = Variable(im_info)\n num_boxes = Variable(num_boxes)\n gt_boxes = Variable(gt_boxes)\n\n if args.cuda:\n cfg.CUDA = True\n\n if args.cuda:\n fasterRCNN.cuda()\n\n start = time.time()\n max_per_image = 100\n\n vis = args.vis\n\n if vis:\n thresh = 0.05\n else:\n thresh = 0.0\n\n save_name = 'faster_rcnn_10'\n num_images = len(imdb.image_index)\n all_boxes = [[[] for _ in xrange(num_images)]\n for _ in xrange(imdb.num_classes)]\n\n output_dir = get_output_dir(imdb, save_name)\n dataset = roibatchLoader(roidb, ratio_list, ratio_index, 1, \\\n imdb.num_classes, training=False, normalize = False)\n dataloader = torch.utils.data.DataLoader(dataset, batch_size=1,\n shuffle=False, num_workers=0,\n pin_memory=True)\n\n data_iter = iter(dataloader)\n\n _t = {'im_detect': time.time(), 'misc': time.time()}\n det_file = os.path.join(output_dir, 'detections.pkl')\n\n fasterRCNN.eval()\n empty_array = np.transpose(np.array([[],[],[],[],[]]), (1,0))\n for i in range(num_images):\n\n data = next(data_iter)\n im_data.data.resize_(data[0].size()).copy_(data[0])\n im_info.data.resize_(data[1].size()).copy_(data[1])\n gt_boxes.data.resize_(data[2].size()).copy_(data[2])\n num_boxes.data.resize_(data[3].size()).copy_(data[3])\n\n det_tic = time.time()\n rois, cls_prob, bbox_pred, \\\n rpn_loss_cls, rpn_loss_box, \\\n RCNN_loss_cls, RCNN_loss_bbox, \\\n rois_label = fasterRCNN(im_data, im_info, gt_boxes, num_boxes)\n\n scores = cls_prob.data\n boxes = rois.data[:, :, 1:5]\n\n if cfg.TEST.BBOX_REG:\n # Apply bounding-box regression deltas\n box_deltas = bbox_pred.data\n if cfg.TRAIN.BBOX_NORMALIZE_TARGETS_PRECOMPUTED:\n # Optionally normalize targets by a precomputed mean and stdev\n if args.class_agnostic:\n box_deltas = box_deltas.view(-1, 4) * torch.FloatTensor(cfg.TRAIN.BBOX_NORMALIZE_STDS).cuda() \\\n + torch.FloatTensor(cfg.TRAIN.BBOX_NORMALIZE_MEANS).cuda()\n box_deltas = box_deltas.view(1, -1, 4)\n else:\n box_deltas = box_deltas.view(-1, 4) * torch.FloatTensor(cfg.TRAIN.BBOX_NORMALIZE_STDS).cuda() \\\n + torch.FloatTensor(cfg.TRAIN.BBOX_NORMALIZE_MEANS).cuda()\n box_deltas = box_deltas.view(1, -1, 4 * len(imdb.classes))\n\n pred_boxes = bbox_transform_inv(boxes, box_deltas, 1)\n pred_boxes = clip_boxes(pred_boxes, im_info.data, 1)\n else:\n # Simply repeat the boxes, once for each class\n _ = torch.from_numpy(np.tile(boxes, (1, scores.shape[1])))\n pred_boxes = _.cuda() if args.cuda > 0 else _\n\n pred_boxes /= data[1][0][2].item()\n\n scores = scores.squeeze()\n pred_boxes = pred_boxes.squeeze()\n det_toc = time.time()\n detect_time = det_toc - det_tic\n misc_tic = time.time()\n if vis:\n im = cv2.imread(imdb.image_path_at(i))\n im2show = np.copy(im)\n for j in xrange(1, imdb.num_classes):\n inds = torch.nonzero(scores[:,j]>thresh).view(-1)\n # if there is det\n if inds.numel() > 0:\n cls_scores = scores[:,j][inds]\n _, order = torch.sort(cls_scores, 0, True)\n if args.class_agnostic:\n cls_boxes = pred_boxes[inds, :]\n else:\n cls_boxes = pred_boxes[inds][:, j * 4:(j + 1) * 4]\n\n cls_dets = torch.cat((cls_boxes, cls_scores.unsqueeze(1)), 1)\n # cls_dets = torch.cat((cls_boxes, cls_scores), 1)\n cls_dets = cls_dets[order]\n keep = nms(cls_dets, cfg.TEST.NMS)\n cls_dets = cls_dets[keep.view(-1).long()]\n if vis:\n im2show = vis_detections(im2show, imdb.classes[j], cls_dets.cpu().numpy(), 0.3)\n all_boxes[j][i] = cls_dets.cpu().numpy()\n else:\n all_boxes[j][i] = empty_array\n\n # Limit to max_per_image detections *over all classes*\n if max_per_image > 0:\n image_scores = np.hstack([all_boxes[j][i][:, -1]\n for j in xrange(1, imdb.num_classes)])\n if len(image_scores) > max_per_image:\n image_thresh = np.sort(image_scores)[-max_per_image]\n for j in xrange(1, imdb.num_classes):\n keep = np.where(all_boxes[j][i][:, -1] >= image_thresh)[0]\n all_boxes[j][i] = all_boxes[j][i][keep, :]\n\n misc_toc = time.time()\n nms_time = misc_toc - misc_tic\n\n sys.stdout.write('im_detect: {:d}/{:d} {:.3f}s {:.3f}s \\r' \\\n .format(i + 1, num_images, detect_time, nms_time))\n sys.stdout.flush()\n\n if vis:\n cv2.imwrite('result.png', im2show)\n pdb.set_trace()\n #cv2.imshow('test', im2show)\n #cv2.waitKey(0)\n\n with open(det_file, 'wb') as f:\n pickle.dump(all_boxes, f, pickle.HIGHEST_PROTOCOL)\n\n print('Evaluating detections')\n imdb.evaluate_detections(all_boxes, output_dir)\n\n end = time.time()\n print(\"test time: %0.4fs\" % (end - start))\n"
] | [
[
"torch.utils.data.DataLoader",
"torch.FloatTensor",
"numpy.tile",
"torch.nonzero",
"torch.load",
"numpy.sort",
"torch.autograd.Variable",
"numpy.random.seed",
"numpy.copy",
"torch.cuda.is_available",
"numpy.array",
"numpy.where",
"torch.LongTensor",
"torch.sort"
]
] |
hirune924/kaggle-HuBMAP | [
"e4c2008378eb773db551cee52380bfccdf3a10fa"
] | [
"system/system.py"
] | [
"import pytorch_lightning as pl\nfrom loss.loss import get_loss\nfrom optimizer.optimizer import get_optimizer\nfrom scheduler.scheduler import get_scheduler\n\nimport torch\nimport numpy as np\nfrom pytorch_lightning.metrics import Accuracy\nimport segmentation_models_pytorch as smp\n\nfrom utils.utils import load_obj\nimport albumentations as A\nfrom utils.preprocessing import *\nimport shutil\n\n\n\nclass LitClassifier(pl.LightningModule):\n def __init__(self, hparams, model):\n super().__init__()\n self.save_hyperparameters(hparams)\n self.model = model\n self.criteria = get_loss(hparams.training.loss)\n #self.accuracy = Accuracy()\n self.dice = smp.utils.losses.DiceLoss(activation='sigmoid')\n\n def forward(self, x):\n # use forward for inference/predictions\n return self.model(x)\n\n def configure_optimizers(self):\n optimizer = get_optimizer(self.model.parameters(), self.hparams.training.optimizer)\n\n scheduler = get_scheduler(optimizer, self.hparams.training.scheduler)\n \n return [optimizer], [scheduler]\n\n def training_step(self, batch, batch_idx):\n x, y = batch\n if self.hparams.dataset.mixup:\n num_batch = self.hparams.dataset.batch_size\n alpha = 0.2\n #rnd = torch.from_numpy(np.random.beta(alpha,alpha,int(num_batch/2))).type_as(x)\n #rnd = rnd.reshape(int(num_batch/2), 1, 1, 1)\n #x = x[:int(num_batch/2)]*rnd + x[int(num_batch/2):]*(1-rnd)\n #y = y[:int(num_batch/2)]*rnd + y[int(num_batch/2):]*(1-rnd)\n rnd = torch.from_numpy(np.random.beta(alpha,alpha,1)).type_as(x)\n x = x[:int(num_batch/2)]*rnd + x[int(num_batch/2):]*(1-rnd)\n y_hat = self.model(x)\n if self.hparams.dataset.mixup:\n loss = self.criteria(y_hat, y[:int(num_batch/2)])*rnd + self.criteria(y_hat, y[int(num_batch/2):])*(1-rnd)\n else:\n loss = self.criteria(y_hat, y)\n self.log('train_loss', loss, on_epoch=True)\n return loss\n\n def validation_step(self, batch, batch_idx):\n x, y = batch\n y_hat = self.model(x)\n loss = self.criteria(y_hat, y)\n dice = 1-self.dice(y_hat, y)\n\n #self.log('val_loss', loss)\n #self.log('val_dice', dice)\n\n return {\n \"val_loss\": loss,\n \"val_dice\": dice\n }\n \n def validation_epoch_end(self, outputs):\n avg_val_loss = torch.stack([x[\"val_loss\"] for x in outputs]).mean()\n avg_val_dice = torch.stack([x[\"val_dice\"] for x in outputs]).mean()\n\n self.log('val_loss', avg_val_loss)\n self.log('val_dice', avg_val_dice)\n #y = torch.cat([x[\"y\"] for x in outputs]).cpu()\n #y_hat = torch.cat([x[\"y_hat\"] for x in outputs]).cpu()\n\n #preds = np.argmax(y_hat, axis=1)\n\n #val_accuracy = self.accuracy(y, preds)\n\n #self.log('avg_val_loss', avg_val_loss)\n #self.log('val_acc', val_accuracy)\n\n def test_step(self, batch, batch_idx):\n x, y = batch\n y_hat = self.model(x)\n loss = self.criteria(y_hat, y)\n self.log('test_loss', loss)\n\n \n"
] | [
[
"numpy.random.beta",
"torch.stack"
]
] |
han-kwang/coronatest-scandata | [
"98fd49f4fdcda10561bce41e769bbbb70ecfe94e"
] | [
"coronatest_analyze_csv.py"
] | [
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"Analyze CSV file into scores.\n\nCreated on Sat Feb 12 22:15:29 2022 // @hk_nien\n\"\"\"\nfrom pathlib import Path\nimport os\nimport re\nimport sys\nimport pandas as pd\nimport numpy as np\n\nPCODES = dict([\n # Regio Noord\n (1011, 'Amsterdam'),\n (1625, 'Hoorn|Zwaag'),\n (1811, 'Alkmaar'),\n (7471, 'Goor'),\n (7556, 'Hengelo'),\n (7903, 'Hoogeveen'),\n (7942, 'Meppel'),\n (8011, 'Zwolle'),\n (8232, 'Lelystad'),\n (8442, 'Heerenveen'),\n (8911, 'Leeuwarden'),\n (9291, 'Kollum'),\n (9501, 'Stadskanaal'),\n (9726, 'Groningen'),\n\n # Regio Midden\n (2406, 'Alphen a/d Rijn'),\n (2515, 'Den Haag'),\n (3013, 'Rotterdam'),\n (3511, 'Utrecht'),\n (3901, 'Veenendaal'),\n ((7137, 7131), 'Lichtenvoorde|Groenlo'),\n (7311, 'Apeldoorn'),\n\n # Regio Zuid\n (4325, 'Renesse'),\n (4462, 'Goes'),\n (4701, 'Roosendaal'),\n (5038, 'Tilburg'),\n (5401, 'Uden'),\n (5611, 'Eindhoven'),\n (5801, 'Oostrum'),\n (6101, 'Echt'),\n (6229, 'Maastricht'),\n (6541, 'Nijmegen'),\n ])\n\n\ndef get_bad_scan_times():\n \"\"\"Return list of Timestamps with bad scan times, from CSV data.\"\"\"\n df = pd.read_csv('data-ggd/ggd_bad_scans.txt', comment='#')\n tstamps = pd.to_datetime(df['Timestamp']).to_list()\n return tstamps\n\ndef _mean_time(ts_list):\n \"\"\"Return mean timestamp value from list of timestamps.\"\"\"\n ts0 = ts_list[0]\n delta_sum = pd.Timedelta(0)\n for ts in ts_list:\n delta_sum += (ts -ts0)\n ts_mean = ts0 + delta_sum / len(ts_list)\n return ts_mean\n\n\ndef _delta_time_hhmm(hm):\n \"\"\"Convert 'hh:mm' string to TimeDelta.\"\"\"\n return pd.Timedelta(f'{hm}:00')\n\n\ndef _summary_to_scores(summary):\n \"\"\"Convert summary from _read_log to scores dict and effective timestamp.\n\n Parameters:\n\n - summary: dict with int(pc4) -> [(query_time, appt_time), ...]\n\n Return:\n\n - scores dict: int(pc4) -> score (int or float or '?')\n - timestamp: middle query timestamp of this run.\n \"\"\"\n\n # Convert to number codes.\n scores = {k: '?' for k in PCODES}\n multi_pcs = {} # pc4 -> (pc4[0], pc4[1], ...)\n for pc in PCODES:\n if isinstance(pc, tuple):\n for pc1 in pc:\n multi_pcs[pc1] = pc\n\n qtms = []\n dhm = _delta_time_hhmm\n for pc4, vlist in summary.items():\n pc4 = int(pc4)\n if pc4 not in scores:\n if pc4 in multi_pcs:\n pc4_key = multi_pcs[pc4]\n else:\n print(f'{pc4} not in list...')\n continue\n else:\n pc4_key = pc4\n if len(vlist) == 0:\n scores[pc4_key] = 7\n continue\n qtm = _mean_time([v[0] for v in vlist]) # query time\n qtms.append(qtm)\n atm = min(v[1] for v in vlist) # earliest appointment time\n qtm_00 = pd.Timestamp(qtm.strftime('%Y-%m-%dT00:00'))\n thresholds = [\n (3, qtm_00 + dhm('23:59')),\n (4, qtm + dhm('24:00')),\n (5, qtm_00 + dhm('48:00')),\n (6, qtm + dhm('48:00')),\n (6.3, qtm_00 + dhm('72:00')),\n (6.7, qtm + dhm('72:00')),\n (7, atm)\n ]\n if qtm.hour < 9:\n thresholds.insert(0, (1, qtm_00 + dhm('13:00')))\n elif qtm.hour < 13:\n thresholds.insert(0, (1, qtm + dhm('4:00')))\n elif qtm.hour < 17:\n thresholds.insert(0, (1, qtm_00 + dhm('24:00')))\n thresholds.insert(1, (2, qtm + dhm('20:00')))\n else:\n thresholds.insert(0, (1, qtm_00 + dhm('24:00')))\n thresholds.insert(1, (2, qtm_00 + dhm('37:00')))\n\n for s, tm in thresholds:\n if atm < tm:\n scores[pc4_key] = s\n break\n if len(qtms) == 0:\n qtm_mid = pd.Timestamp(None)\n else:\n qtm_min = min(qtms)\n qtm_mid = qtm_min + (max(qtms) - qtm_min)/2\n return scores, qtm_mid\n\n\ndef _get_min_wait(summary):\n \"\"\"Return minimum and median wait Timedelta between scan time and appointment.\n\n summary is dict of pc4 -> list of timestamps\n No data -> 999 h.\n\n For the median, NaT is counted as infinite.\n \"\"\"\n wtimes = []\n for _, vlist in summary.items():\n wtimes_this = [atm - qtm for qtm, atm in vlist]\n wtimes.append(\n min(wtimes_this) if wtimes_this else pd.Timedelta(99, 'h')\n )\n minwait = min(wtimes) if wtimes else 999\n medwait = pd.Timedelta(np.median(wtimes))\n return minwait, medwait\n\n\ndef load_csv(csv_fname):\n \"\"\"Return DataFrame and list of start times (+1).\"\"\"\n df = pd.read_csv(csv_fname, comment='#')\n df['req_pc4'] = df['req_pc4'].astype(int)\n\n for c in df.columns:\n if c.endswith('_time') or c.endswith('_date'):\n df[c] = pd.to_datetime(df[c])\n else:\n df.loc[df[c].isna(), c] = None\n\n # start_tms: list of scan start times (plus one extra at the end)\n start_tms = df.loc[df['scan_time'].diff() > pd.Timedelta('10 min'), 'scan_time']\n start_tms = [df.iloc[0]['scan_time']] + list(start_tms)\n start_tms += [df.iloc[-1]['scan_time'] + pd.Timedelta('1 min')]\n return df, start_tms\n\ndef load_multi_csvs(csv_fnames):\n \"\"\"Return DataFrame and list of start times (+1)\"\"\"\n dfs = []\n start_tms = []\n for f in csv_fnames:\n df, st = load_csv(f)\n dfs.append(df)\n start_tms.extend(st[:-1])\n df = pd.concat(dfs).reset_index()\n start_tms.append(df.iloc[-1]['scan_time'] + pd.Timedelta('1 min'))\n return df, start_tms\n\n\ndef get_scan_scores(df, tm_range):\n \"\"\"Get scan scores as pc4 -> score dict.\n\n Parameters:\n\n - df: DataFrame with scan_time, req_date, req_pc4, opt0_short_addr,\n opt0_time, opt0_loc_id, etc.\n - tm_range: (tm_start, tm_stop) timestamps.\n\n Return:\n\n - tstamp: timestamp of the scan (mid-point)\n - scores: dict of pc4->score\n - min_wait: Timedelta of minimum wait time from scan to appointment\n \"\"\"\n mask = (df['scan_time'] >= tm_range[0]) & (df['scan_time'] < tm_range[1])\n df1 = df.loc[mask]\n summary = {}\n for pc4, city_re in PCODES.items():\n pc4_tup = (pc4,) if isinstance(pc4, int) else pc4\n options = []\n req_pc4 = None\n for _, row in df1.loc[df1['req_pc4'].isin(pc4_tup)].iterrows():\n req_pc4 = int(row['req_pc4'])\n for i in range(3):\n addr = row[f'opt{i}_short_addr']\n if addr and re.match(f'{city_re}$', addr[5:]):\n options.append((row['scan_time'], row[f'opt{i}_time']))\n if req_pc4 is not None:\n summary[req_pc4] = options\n scores, tstamp = _summary_to_scores(summary)\n if pd.isna(tstamp):\n tstamp = df1.iloc[len(df1)//2]['scan_time']\n minwait, medwait = _get_min_wait(summary)\n if medwait == 999:\n medwait = pd.Timedelta(None)\n return tstamp, scores, minwait, medwait\n\n\ndef get_scan_scores_df(df, tm_ranges, decimal_comma=True):\n \"\"\"Get scan scores as dataframe, from csv dataframe.\n\n Blacklisted scan times are dropped.\n\n Parameters:\n\n - df: DataFrame with scan_time, req_date, req_pc4, opt0_short_addr,\n opt0_time, opt0_loc_id, etc.\n - tm_ranges: list of timestamps (+one at the end) with boundaries\n of timestamp ranges.\n - decimal_comma: True to have string values 6,3 rather than float 6.3.\n\n Return:\n\n - Dataframe with scores, date_str, time_str, pc4, min_wait, med_wait as columns.\n \"\"\"\n n = len(tm_ranges)\n records = []\n index = []\n minwait_hs = []\n medwait_hs = []\n bad_stimes = get_bad_scan_times()\n for i in range(n-1):\n tm_ra = tm_ranges[i:i+2]\n is_ok = True\n for tm in bad_stimes:\n if tm_ra[0] <= tm < tm_ra[1]:\n is_ok = False\n break\n if not is_ok:\n print(f'Dropped scan at {tm_ra[0].strftime(\"%Y-%m-%d %H:%M\")}')\n continue\n tm, scores, minwait, medwait = get_scan_scores(df, tm_ra)\n records.append(scores)\n index.append(tm)\n minwait_hs.append(minwait.total_seconds() / 3600)\n medwait_hs.append(medwait.total_seconds() / 3600)\n\n dates = [t.strftime('%Y-%m-%d') for t in index]\n times = [t.strftime('%H:%M') for t in index]\n sdf = pd.DataFrame.from_records(records)\n sdf.insert(0, 'Time', times)\n sdf.insert(0, 'Date', dates)\n sdf['min_wait_h'] = np.around(minwait_hs, 2)\n sdf['med_wait_h'] = np.around(medwait_hs, 2)\n sdf.loc[sdf['min_wait_h'].isna(), 'min_wait_h'] = 999\n sdf.columns = [\n ('/'.join([str(x) for x in c]) if isinstance(c, tuple) else c)\n for c in sdf.columns\n ]\n if decimal_comma:\n for c in sdf.columns[2:]:\n sdf[c] = sdf[c].astype(str)\n sdf[c] = sdf[c].str.replace('.', ',', regex=False)\n sdf[c] = sdf[c].str.replace(',0$', '', regex=False)\n sdf[c] = sdf[c].str.replace('?', '', regex=False)\n\n return sdf\n\n\nif __name__ == '__main__':\n\n in_spyder = ('SPYDER_ARGS' in os.environ)\n csv_fnames = sorted(Path('data-ggd').glob('ggd_scan-????-W??.csv'))\n do_all = ('--all' in sys.argv)\n do_all = do_all or in_spyder and input('(A)ll or latest?').lower() == 'a'\n if do_all:\n df, start_tms = load_multi_csvs(csv_fnames)\n sdf = get_scan_scores_df(df, start_tms).iloc[::-1]\n else:\n df, start_tms = load_csv(csv_fnames[-1])\n sdf = get_scan_scores_df(df, start_tms[-2:])\n print(sdf)\n if len(sdf) > 1:\n sdf.to_clipboard(index=False)\n print('Copied to clipboard including headers')\n elif len(sdf) == 1:\n sdf.iloc[[0], 2:].to_clipboard(header=False, index=False)\n print('Copied to clipboard, scores only.')\n else:\n print('No output.')\n\n if not in_spyder:\n # Note: in Spyder, copy/paste will stall while input is blocked.\n input('Press Enter to quit and clear clipboard.')\n\n"
] | [
[
"pandas.read_csv",
"numpy.median",
"pandas.Timedelta",
"pandas.DataFrame.from_records",
"pandas.to_datetime",
"pandas.concat",
"numpy.around",
"pandas.Timestamp",
"pandas.isna"
]
] |
hardywu/vnpy | [
"81ab73dc57d12a3ff7c74c73665513b46fc0f668"
] | [
"vnpy/app/portfolio_strategy/backtesting.py"
] | [
"from collections import defaultdict\nfrom datetime import date, datetime, timedelta\nfrom typing import Dict, List, Set, Tuple\nfrom functools import lru_cache\nfrom copy import copy\nimport traceback\n\nimport numpy as np\nimport plotly.graph_objects as go\nfrom plotly.subplots import make_subplots\nfrom pandas import DataFrame\n\nfrom vnpy.trader.constant import Direction, Offset, Interval, Status\nfrom vnpy.trader.database import database_manager\nfrom vnpy.trader.object import OrderData, TradeData, BarData\nfrom vnpy.trader.utility import round_to, extract_vt_symbol\n\nfrom .template import StrategyTemplate\n\n\nINTERVAL_DELTA_MAP = {\n Interval.MINUTE: timedelta(minutes=1),\n Interval.HOUR: timedelta(hours=1),\n Interval.DAILY: timedelta(days=1),\n}\n\n\nclass BacktestingEngine:\n \"\"\"\"\"\"\n\n gateway_name = \"BACKTESTING\"\n\n def __init__(self):\n \"\"\"\"\"\"\n self.vt_symbols: List[str] = []\n self.start: datetime = None\n self.end: datetime = None\n\n self.rates: Dict[str, float] = 0\n self.slippages: Dict[str, float] = 0\n self.sizes: Dict[str, float] = 1\n self.priceticks: Dict[str, float] = 0\n\n self.capital: float = 1_000_000\n self.risk_free: float = 0.02\n\n self.strategy: StrategyTemplate = None\n self.bars: Dict[str, BarData] = {}\n self.datetime: datetime = None\n\n self.interval: Interval = None\n self.days: int = 0\n self.history_data: Dict[Tuple, BarData] = {}\n self.dts: Set[datetime] = set()\n\n self.limit_order_count = 0\n self.limit_orders = {}\n self.active_limit_orders = {}\n\n self.trade_count = 0\n self.trades = {}\n\n self.logs = []\n\n self.daily_results = {}\n self.daily_df = None\n\n def clear_data(self) -> None:\n \"\"\"\n Clear all data of last backtesting.\n \"\"\"\n self.strategy = None\n self.bars = {}\n self.datetime = None\n\n self.limit_order_count = 0\n self.limit_orders.clear()\n self.active_limit_orders.clear()\n\n self.trade_count = 0\n self.trades.clear()\n\n self.logs.clear()\n self.daily_results.clear()\n self.daily_df = None\n\n def set_parameters(\n self,\n vt_symbols: List[str],\n interval: Interval,\n start: datetime,\n rates: Dict[str, float],\n slippages: Dict[str, float],\n sizes: Dict[str, float],\n priceticks: Dict[str, float],\n capital: int = 0,\n end: datetime = None,\n risk_free: float = 0\n ) -> None:\n \"\"\"\"\"\"\n self.vt_symbols = vt_symbols\n self.interval = interval\n\n self.rates = rates\n self.slippages = slippages\n self.sizes = sizes\n self.priceticks = priceticks\n\n self.start = start\n self.end = end\n self.capital = capital\n self.risk_free = risk_free\n\n def add_strategy(self, strategy_class: type, setting: dict) -> None:\n \"\"\"\"\"\"\n self.strategy = strategy_class(\n self, strategy_class.__name__, copy(self.vt_symbols), setting\n )\n\n def load_data(self) -> None:\n \"\"\"\"\"\"\n self.output(\"开始加载历史数据\")\n\n if not self.end:\n self.end = datetime.now()\n\n if self.start >= self.end:\n self.output(\"起始日期必须小于结束日期\")\n return\n\n # Clear previously loaded history data\n self.history_data.clear()\n self.dts.clear()\n\n # Load 30 days of data each time and allow for progress update\n progress_delta = timedelta(days=30)\n total_delta = self.end - self.start\n interval_delta = INTERVAL_DELTA_MAP[self.interval]\n\n for vt_symbol in self.vt_symbols:\n start = self.start\n end = self.start + progress_delta\n progress = 0\n\n data_count = 0\n while start < self.end:\n end = min(end, self.end) # Make sure end time stays within set range\n\n data = load_bar_data(\n vt_symbol,\n self.interval,\n start,\n end\n )\n\n for bar in data:\n self.dts.add(bar.datetime)\n self.history_data[(bar.datetime, vt_symbol)] = bar\n data_count += 1\n\n progress += progress_delta / total_delta\n progress = min(progress, 1)\n progress_bar = \"#\" * int(progress * 10)\n self.output(f\"{vt_symbol}加载进度:{progress_bar} [{progress:.0%}]\")\n\n start = end + interval_delta\n end += (progress_delta + interval_delta)\n\n self.output(f\"{vt_symbol}历史数据加载完成,数据量:{data_count}\")\n\n self.output(\"所有历史数据加载完成\")\n\n def run_backtesting(self) -> None:\n \"\"\"\"\"\"\n self.strategy.on_init()\n\n # Generate sorted datetime list\n dts = list(self.dts)\n dts.sort()\n\n # Use the first [days] of history data for initializing strategy\n day_count = 0\n ix = 0\n\n for ix, dt in enumerate(dts):\n if self.datetime and dt.day != self.datetime.day:\n day_count += 1\n if day_count >= self.days:\n break\n\n try:\n self.new_bars(dt)\n except Exception:\n self.output(\"触发异常,回测终止\")\n self.output(traceback.format_exc())\n return\n\n self.strategy.inited = True\n self.output(\"策略初始化完成\")\n\n self.strategy.on_start()\n self.strategy.trading = True\n self.output(\"开始回放历史数据\")\n\n # Use the rest of history data for running backtesting\n for dt in dts[ix:]:\n try:\n self.new_bars(dt)\n except Exception:\n self.output(\"触发异常,回测终止\")\n self.output(traceback.format_exc())\n return\n\n self.output(\"历史数据回放结束\")\n\n def calculate_result(self) -> None:\n \"\"\"\"\"\"\n self.output(\"开始计算逐日盯市盈亏\")\n\n if not self.trades:\n self.output(\"成交记录为空,无法计算\")\n return\n\n # Add trade data into daily reuslt.\n for trade in self.trades.values():\n d = trade.datetime.date()\n daily_result = self.daily_results[d]\n daily_result.add_trade(trade)\n\n # Calculate daily result by iteration.\n pre_closes = {}\n start_poses = {}\n\n for daily_result in self.daily_results.values():\n daily_result.calculate_pnl(\n pre_closes,\n start_poses,\n self.sizes,\n self.rates,\n self.slippages,\n )\n\n pre_closes = daily_result.close_prices\n start_poses = daily_result.end_poses\n\n # Generate dataframe\n results = defaultdict(list)\n\n for daily_result in self.daily_results.values():\n fields = [\n \"date\", \"trade_count\", \"turnover\",\n \"commission\", \"slippage\", \"trading_pnl\",\n \"holding_pnl\", \"total_pnl\", \"net_pnl\"\n ]\n for key in fields:\n value = getattr(daily_result, key)\n results[key].append(value)\n\n self.daily_df = DataFrame.from_dict(results).set_index(\"date\")\n\n self.output(\"逐日盯市盈亏计算完成\")\n return self.daily_df\n\n def calculate_statistics(self, df: DataFrame = None, output=True) -> None:\n \"\"\"\"\"\"\n self.output(\"开始计算策略统计指标\")\n\n # Check DataFrame input exterior\n if df is None:\n df = self.daily_df\n\n # Check for init DataFrame\n if df is None:\n # Set all statistics to 0 if no trade.\n start_date = \"\"\n end_date = \"\"\n total_days = 0\n profit_days = 0\n loss_days = 0\n end_balance = 0\n max_drawdown = 0\n max_ddpercent = 0\n max_drawdown_duration = 0\n total_net_pnl = 0\n daily_net_pnl = 0\n total_commission = 0\n daily_commission = 0\n total_slippage = 0\n daily_slippage = 0\n total_turnover = 0\n daily_turnover = 0\n total_trade_count = 0\n daily_trade_count = 0\n total_return = 0\n annual_return = 0\n daily_return = 0\n return_std = 0\n sharpe_ratio = 0\n return_drawdown_ratio = 0\n else:\n # Calculate balance related time series data\n df[\"balance\"] = df[\"net_pnl\"].cumsum() + self.capital\n df[\"return\"] = np.log(df[\"balance\"] / df[\"balance\"].shift(1)).fillna(0)\n df[\"highlevel\"] = (\n df[\"balance\"].rolling(\n min_periods=1, window=len(df), center=False).max()\n )\n df[\"drawdown\"] = df[\"balance\"] - df[\"highlevel\"]\n df[\"ddpercent\"] = df[\"drawdown\"] / df[\"highlevel\"] * 100\n\n # Calculate statistics value\n start_date = df.index[0]\n end_date = df.index[-1]\n\n total_days = len(df)\n profit_days = len(df[df[\"net_pnl\"] > 0])\n loss_days = len(df[df[\"net_pnl\"] < 0])\n\n end_balance = df[\"balance\"].iloc[-1]\n max_drawdown = df[\"drawdown\"].min()\n max_ddpercent = df[\"ddpercent\"].min()\n max_drawdown_end = df[\"drawdown\"].idxmin()\n\n if isinstance(max_drawdown_end, date):\n max_drawdown_start = df[\"balance\"][:max_drawdown_end].idxmax()\n max_drawdown_duration = (max_drawdown_end - max_drawdown_start).days\n else:\n max_drawdown_duration = 0\n\n total_net_pnl = df[\"net_pnl\"].sum()\n daily_net_pnl = total_net_pnl / total_days\n\n total_commission = df[\"commission\"].sum()\n daily_commission = total_commission / total_days\n\n total_slippage = df[\"slippage\"].sum()\n daily_slippage = total_slippage / total_days\n\n total_turnover = df[\"turnover\"].sum()\n daily_turnover = total_turnover / total_days\n\n total_trade_count = df[\"trade_count\"].sum()\n daily_trade_count = total_trade_count / total_days\n\n total_return = (end_balance / self.capital - 1) * 100\n annual_return = total_return / total_days * 240\n daily_return = df[\"return\"].mean() * 100\n return_std = df[\"return\"].std() * 100\n\n if return_std:\n daily_risk_free = self.risk_free / np.sqrt(240)\n sharpe_ratio = (daily_return - daily_risk_free) / return_std * np.sqrt(240)\n else:\n sharpe_ratio = 0\n\n return_drawdown_ratio = -total_net_pnl / max_drawdown\n\n # Output\n if output:\n self.output(\"-\" * 30)\n self.output(f\"首个交易日:\\t{start_date}\")\n self.output(f\"最后交易日:\\t{end_date}\")\n\n self.output(f\"总交易日:\\t{total_days}\")\n self.output(f\"盈利交易日:\\t{profit_days}\")\n self.output(f\"亏损交易日:\\t{loss_days}\")\n\n self.output(f\"起始资金:\\t{self.capital:,.2f}\")\n self.output(f\"结束资金:\\t{end_balance:,.2f}\")\n\n self.output(f\"总收益率:\\t{total_return:,.2f}%\")\n self.output(f\"年化收益:\\t{annual_return:,.2f}%\")\n self.output(f\"最大回撤: \\t{max_drawdown:,.2f}\")\n self.output(f\"百分比最大回撤: {max_ddpercent:,.2f}%\")\n self.output(f\"最长回撤天数: \\t{max_drawdown_duration}\")\n\n self.output(f\"总盈亏:\\t{total_net_pnl:,.2f}\")\n self.output(f\"总手续费:\\t{total_commission:,.2f}\")\n self.output(f\"总滑点:\\t{total_slippage:,.2f}\")\n self.output(f\"总成交金额:\\t{total_turnover:,.2f}\")\n self.output(f\"总成交笔数:\\t{total_trade_count}\")\n\n self.output(f\"日均盈亏:\\t{daily_net_pnl:,.2f}\")\n self.output(f\"日均手续费:\\t{daily_commission:,.2f}\")\n self.output(f\"日均滑点:\\t{daily_slippage:,.2f}\")\n self.output(f\"日均成交金额:\\t{daily_turnover:,.2f}\")\n self.output(f\"日均成交笔数:\\t{daily_trade_count}\")\n\n self.output(f\"日均收益率:\\t{daily_return:,.2f}%\")\n self.output(f\"收益标准差:\\t{return_std:,.2f}%\")\n self.output(f\"Sharpe Ratio:\\t{sharpe_ratio:,.2f}\")\n self.output(f\"收益回撤比:\\t{return_drawdown_ratio:,.2f}\")\n\n statistics = {\n \"start_date\": start_date,\n \"end_date\": end_date,\n \"total_days\": total_days,\n \"profit_days\": profit_days,\n \"loss_days\": loss_days,\n \"capital\": self.capital,\n \"end_balance\": end_balance,\n \"max_drawdown\": max_drawdown,\n \"max_ddpercent\": max_ddpercent,\n \"max_drawdown_duration\": max_drawdown_duration,\n \"total_net_pnl\": total_net_pnl,\n \"daily_net_pnl\": daily_net_pnl,\n \"total_commission\": total_commission,\n \"daily_commission\": daily_commission,\n \"total_slippage\": total_slippage,\n \"daily_slippage\": daily_slippage,\n \"total_turnover\": total_turnover,\n \"daily_turnover\": daily_turnover,\n \"total_trade_count\": total_trade_count,\n \"daily_trade_count\": daily_trade_count,\n \"total_return\": total_return,\n \"annual_return\": annual_return,\n \"daily_return\": daily_return,\n \"return_std\": return_std,\n \"sharpe_ratio\": sharpe_ratio,\n \"return_drawdown_ratio\": return_drawdown_ratio,\n }\n\n # Filter potential error infinite value\n for key, value in statistics.items():\n if value in (np.inf, -np.inf):\n value = 0\n statistics[key] = np.nan_to_num(value)\n\n self.output(\"策略统计指标计算完成\")\n return statistics\n\n def show_chart(self, df: DataFrame = None) -> None:\n \"\"\"\"\"\"\n # Check DataFrame input exterior\n if df is None:\n df = self.daily_df\n\n # Check for init DataFrame\n if df is None:\n return\n\n fig = make_subplots(\n rows=4,\n cols=1,\n subplot_titles=[\"Balance\", \"Drawdown\", \"Daily Pnl\", \"Pnl Distribution\"],\n vertical_spacing=0.06\n )\n\n balance_line = go.Scatter(\n x=df.index,\n y=df[\"balance\"],\n mode=\"lines\",\n name=\"Balance\"\n )\n drawdown_scatter = go.Scatter(\n x=df.index,\n y=df[\"drawdown\"],\n fillcolor=\"red\",\n fill='tozeroy',\n mode=\"lines\",\n name=\"Drawdown\"\n )\n pnl_bar = go.Bar(y=df[\"net_pnl\"], name=\"Daily Pnl\")\n pnl_histogram = go.Histogram(x=df[\"net_pnl\"], nbinsx=100, name=\"Days\")\n\n fig.add_trace(balance_line, row=1, col=1)\n fig.add_trace(drawdown_scatter, row=2, col=1)\n fig.add_trace(pnl_bar, row=3, col=1)\n fig.add_trace(pnl_histogram, row=4, col=1)\n\n fig.update_layout(height=1000, width=1000)\n fig.show()\n\n def update_daily_close(self, bars: Dict[str, BarData], dt: datetime) -> None:\n \"\"\"\"\"\"\n d = dt.date()\n\n close_prices = {}\n for bar in bars.values():\n close_prices[bar.vt_symbol] = bar.close_price\n\n daily_result = self.daily_results.get(d, None)\n\n if daily_result:\n daily_result.update_close_prices(close_prices)\n else:\n self.daily_results[d] = PortfolioDailyResult(d, close_prices)\n\n def new_bars(self, dt: datetime) -> None:\n \"\"\"\"\"\"\n self.datetime = dt\n\n bars: Dict[str, BarData] = {}\n for vt_symbol in self.vt_symbols:\n bar = self.history_data.get((dt, vt_symbol), None)\n\n # If bar data of vt_symbol at dt exists\n if bar:\n # Update bar data for crossing order\n self.bars[vt_symbol] = bar\n\n # Put bar into dict for strategy.on_bars update\n bars[vt_symbol] = bar\n # Otherwise, use previous close to backfill\n elif vt_symbol in self.bars:\n old_bar = self.bars[vt_symbol]\n\n bar = BarData(\n symbol=old_bar.symbol,\n exchange=old_bar.exchange,\n datetime=dt,\n open_price=old_bar.close_price,\n high_price=old_bar.close_price,\n low_price=old_bar.close_price,\n close_price=old_bar.close_price,\n gateway_name=old_bar.gateway_name\n )\n self.bars[vt_symbol] = bar\n\n self.cross_limit_order()\n self.strategy.on_bars(bars)\n\n self.update_daily_close(self.bars, dt)\n\n def cross_limit_order(self) -> None:\n \"\"\"\n Cross limit order with last bar/tick data.\n \"\"\"\n for order in list(self.active_limit_orders.values()):\n bar = self.bars[order.vt_symbol]\n\n long_cross_price = bar.low_price\n short_cross_price = bar.high_price\n long_best_price = bar.open_price\n short_best_price = bar.open_price\n\n # Push order update with status \"not traded\" (pending).\n if order.status == Status.SUBMITTING:\n order.status = Status.NOTTRADED\n self.strategy.update_order(order)\n\n # Check whether limit orders can be filled.\n long_cross = (\n order.direction == Direction.LONG\n and order.price >= long_cross_price\n and long_cross_price > 0\n )\n\n short_cross = (\n order.direction == Direction.SHORT\n and order.price <= short_cross_price\n and short_cross_price > 0\n )\n\n if not long_cross and not short_cross:\n continue\n\n # Push order update with status \"all traded\" (filled).\n order.traded = order.volume\n order.status = Status.ALLTRADED\n self.strategy.update_order(order)\n\n self.active_limit_orders.pop(order.vt_orderid)\n\n # Push trade update\n self.trade_count += 1\n\n if long_cross:\n trade_price = min(order.price, long_best_price)\n else:\n trade_price = max(order.price, short_best_price)\n\n trade = TradeData(\n symbol=order.symbol,\n exchange=order.exchange,\n orderid=order.orderid,\n tradeid=str(self.trade_count),\n direction=order.direction,\n offset=order.offset,\n price=trade_price,\n volume=order.volume,\n datetime=self.datetime,\n gateway_name=self.gateway_name,\n )\n\n self.strategy.update_trade(trade)\n self.trades[trade.vt_tradeid] = trade\n\n def load_bars(\n self,\n strategy: StrategyTemplate,\n days: int,\n interval: Interval\n ) -> None:\n \"\"\"\"\"\"\n self.days = days\n\n def send_order(\n self,\n strategy: StrategyTemplate,\n vt_symbol: str,\n direction: Direction,\n offset: Offset,\n price: float,\n volume: float,\n lock: bool,\n net: bool\n ) -> List[str]:\n \"\"\"\"\"\"\n price = round_to(price, self.priceticks[vt_symbol])\n symbol, exchange = extract_vt_symbol(vt_symbol)\n\n self.limit_order_count += 1\n\n order = OrderData(\n symbol=symbol,\n exchange=exchange,\n orderid=str(self.limit_order_count),\n direction=direction,\n offset=offset,\n price=price,\n volume=volume,\n status=Status.SUBMITTING,\n datetime=self.datetime,\n gateway_name=self.gateway_name,\n )\n\n self.active_limit_orders[order.vt_orderid] = order\n self.limit_orders[order.vt_orderid] = order\n\n return [order.vt_orderid]\n\n def cancel_order(self, strategy: StrategyTemplate, vt_orderid: str) -> None:\n \"\"\"\n Cancel order by vt_orderid.\n \"\"\"\n if vt_orderid not in self.active_limit_orders:\n return\n order = self.active_limit_orders.pop(vt_orderid)\n\n order.status = Status.CANCELLED\n self.strategy.update_order(order)\n\n def write_log(self, msg: str, strategy: StrategyTemplate = None) -> None:\n \"\"\"\n Write log message.\n \"\"\"\n msg = f\"{self.datetime}\\t{msg}\"\n self.logs.append(msg)\n\n def send_email(self, msg: str, strategy: StrategyTemplate = None) -> None:\n \"\"\"\n Send email to default receiver.\n \"\"\"\n pass\n\n def sync_strategy_data(self, strategy: StrategyTemplate) -> None:\n \"\"\"\n Sync strategy data into json file.\n \"\"\"\n pass\n\n def put_strategy_event(self, strategy: StrategyTemplate) -> None:\n \"\"\"\n Put an event to update strategy status.\n \"\"\"\n pass\n\n def output(self, msg) -> None:\n \"\"\"\n Output message of backtesting engine.\n \"\"\"\n print(f\"{datetime.now()}\\t{msg}\")\n\n def get_all_trades(self) -> List[TradeData]:\n \"\"\"\n Return all trade data of current backtesting result.\n \"\"\"\n return list(self.trades.values())\n\n def get_all_orders(self) -> List[OrderData]:\n \"\"\"\n Return all limit order data of current backtesting result.\n \"\"\"\n return list(self.limit_orders.values())\n\n def get_all_daily_results(self) -> List[\"PortfolioDailyResult\"]:\n \"\"\"\n Return all daily result data.\n \"\"\"\n return list(self.daily_results.values())\n\n\nclass ContractDailyResult:\n \"\"\"\"\"\"\n\n def __init__(self, result_date: date, close_price: float):\n \"\"\"\"\"\"\n self.date: date = result_date\n self.close_price: float = close_price\n self.pre_close: float = 0\n\n self.trades: List[TradeData] = []\n self.trade_count: int = 0\n\n self.start_pos: float = 0\n self.end_pos: float = 0\n\n self.turnover: float = 0\n self.commission: float = 0\n self.slippage: float = 0\n\n self.trading_pnl: float = 0\n self.holding_pnl: float = 0\n self.total_pnl: float = 0\n self.net_pnl: float = 0\n\n def add_trade(self, trade: TradeData) -> None:\n \"\"\"\"\"\"\n self.trades.append(trade)\n\n def calculate_pnl(\n self,\n pre_close: float,\n start_pos: float,\n size: int,\n rate: float,\n slippage: float\n ) -> None:\n \"\"\"\"\"\"\n # If no pre_close provided on the first day,\n # use value 1 to avoid zero division error\n if pre_close:\n self.pre_close = pre_close\n else:\n self.pre_close = 1\n\n # Holding pnl is the pnl from holding position at day start\n self.start_pos = start_pos\n self.end_pos = start_pos\n\n self.holding_pnl = self.start_pos * (self.close_price - self.pre_close) * size\n\n # Trading pnl is the pnl from new trade during the day\n self.trade_count = len(self.trades)\n\n for trade in self.trades:\n if trade.direction == Direction.LONG:\n pos_change = trade.volume\n else:\n pos_change = -trade.volume\n\n self.end_pos += pos_change\n\n turnover = trade.volume * size * trade.price\n\n self.trading_pnl += pos_change * (self.close_price - trade.price) * size\n self.slippage += trade.volume * size * slippage\n self.turnover += turnover\n self.commission += turnover * rate\n\n # Net pnl takes account of commission and slippage cost\n self.total_pnl = self.trading_pnl + self.holding_pnl\n self.net_pnl = self.total_pnl - self.commission - self.slippage\n\n def update_close_price(self, close_price: float) -> None:\n \"\"\"\"\"\"\n self.close_price = close_price\n\n\nclass PortfolioDailyResult:\n \"\"\"\"\"\"\n\n def __init__(self, result_date: date, close_prices: Dict[str, float]):\n \"\"\"\"\"\"\n self.date: date = result_date\n self.close_prices: Dict[str, float] = close_prices\n self.pre_closes: Dict[str, float] = {}\n self.start_poses: Dict[str, float] = {}\n self.end_poses: Dict[str, float] = {}\n\n self.contract_results: Dict[str, ContractDailyResult] = {}\n\n for vt_symbol, close_price in close_prices.items():\n self.contract_results[vt_symbol] = ContractDailyResult(result_date, close_price)\n\n self.trade_count: int = 0\n self.turnover: float = 0\n self.commission: float = 0\n self.slippage: float = 0\n self.trading_pnl: float = 0\n self.holding_pnl: float = 0\n self.total_pnl: float = 0\n self.net_pnl: float = 0\n\n def add_trade(self, trade: TradeData) -> None:\n \"\"\"\"\"\"\n contract_result = self.contract_results[trade.vt_symbol]\n contract_result.add_trade(trade)\n\n def calculate_pnl(\n self,\n pre_closes: Dict[str, float],\n start_poses: Dict[str, float],\n sizes: Dict[str, float],\n rates: Dict[str, float],\n slippages: Dict[str, float],\n ) -> None:\n \"\"\"\"\"\"\n self.pre_closes = pre_closes\n\n for vt_symbol, contract_result in self.contract_results.items():\n contract_result.calculate_pnl(\n pre_closes.get(vt_symbol, 0),\n start_poses.get(vt_symbol, 0),\n sizes[vt_symbol],\n rates[vt_symbol],\n slippages[vt_symbol]\n )\n\n self.trade_count += contract_result.trade_count\n self.turnover += contract_result.turnover\n self.commission += contract_result.commission\n self.slippage += contract_result.slippage\n self.trading_pnl += contract_result.trading_pnl\n self.holding_pnl += contract_result.holding_pnl\n self.total_pnl += contract_result.total_pnl\n self.net_pnl += contract_result.net_pnl\n\n self.end_poses[vt_symbol] = contract_result.end_pos\n\n def update_close_prices(self, close_prices: Dict[str, float]) -> None:\n \"\"\"\"\"\"\n self.close_prices = close_prices\n\n for vt_symbol, close_price in close_prices.items():\n contract_result = self.contract_results.get(vt_symbol, None)\n if contract_result:\n contract_result.update_close_price(close_price)\n\n\n@lru_cache(maxsize=999)\ndef load_bar_data(\n vt_symbol: str,\n interval: Interval,\n start: datetime,\n end: datetime\n):\n \"\"\"\"\"\"\n symbol, exchange = extract_vt_symbol(vt_symbol)\n\n return database_manager.load_bar_data(\n symbol, exchange, interval, start, end\n )\n"
] | [
[
"numpy.sqrt",
"numpy.nan_to_num",
"pandas.DataFrame.from_dict"
]
] |
rohit-konda/markovGames | [
"d6dd1b8a11f1c95658a468f9e471aecfcf0e6839"
] | [
"markovGames/learning/bruteSearch.py"
] | [
"import numpy as np\nfrom itertools import product\nfrom markovGames.gameDefs.mdpDefs import Policy\n\n\ndef getAllDetPol(numStates, numActions):\n detProbs = [np.array([1 if j == i else 0 for j in range(numActions)]) for i in range(numActions)]\n return product(detProbs, repeat=numStates)\n\n\ndef getPolList(states, acSet):\n # list of possible deterministic policies\n numStates = len(states)\n numActions = len(acSet)\n detPol = getAllDetPol(numStates, numActions)\n return [Policy(states, pol, acSet) for pol in detPol]\n\n\ndef prodPolList(states, listActions):\n # get policies for each action Set\n polList = [getPolList(states, ac) for ac in listActions]\n return polList\n\n\ndef getPayoff(utilMap, listAcSet):\n # utilMap: maps list of agent policies to real numbers,\n # allPolicyList: list of agent i (list of possible policies)\n def utilInd(index):\n jointAc = [listAcSet[j][ind] for j, ind in enumerate(index)]\n val = utilMap(jointAc)\n return val\n\n numPL = [len(pL) for pL in listAcSet]\n payoff = np.zeros(numPL)\n for ind in product(*[range(nI) for nI in numPL]):\n payoff[ind] = utilInd(ind)\n return payoff\n\n\ndef getArgOpt(tensor):\n return np.unravel_index(np.argmax(tensor), tensor.shape)\n\n\ndef bruteFindNash(payoffList):\n TOLERANCE = 1e-7\n cpnes = list(np.argwhere(payoffList[0] > np.amax(payoffList[0], 0) - TOLERANCE))\n cpnes = [tuple(cpne) for cpne in cpnes]\n N = len(payoffList)\n\n for i in range(1, N):\n pMat = payoffList[i]\n for cpne in cpnes[:]:\n ind = cpne[:i] + (slice(None),) + cpne[i + 1:]\n if pMat[cpne] < np.max(pMat[ind]) - TOLERANCE:\n cpnes.pop(cpnes.index(cpne))\n return cpnes\n\n\ndef getEfficiency(cpnes, welfareMat):\n # welfareMat - matrix form of welfare\n pneWelf = [welfareMat[cpne] for cpne in cpnes]\n opt = np.max(welfareMat)\n priceRatios = [float(pne) / opt for pne in pneWelf]\n return priceRatios\n\n\ndef getPoA(cpnes, welfareMat):\n return min(getEfficiency(cpnes, welfareMat))\n"
] | [
[
"numpy.amax",
"numpy.max",
"numpy.argmax",
"numpy.zeros"
]
] |
M155K4R4/Tensorflow | [
"e5e03ef3148303b3dfed89a1492dedf92b45be25",
"e5e03ef3148303b3dfed89a1492dedf92b45be25"
] | [
"tensorflow/contrib/rnn/python/ops/lstm_ops.py",
"tensorflow/contrib/bayesflow/python/ops/hmc_impl.py"
] | [
"# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"LSTM Block Cell ops.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport abc\n\nfrom tensorflow.contrib.rnn.ops import gen_lstm_ops\nfrom tensorflow.contrib.util import loader\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.layers import base as base_layer\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import init_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import nn_ops\nfrom tensorflow.python.ops import rnn_cell_impl\nfrom tensorflow.python.platform import resource_loader\n\n_lstm_ops_so = loader.load_op_library(\n resource_loader.get_path_to_datafile(\"_lstm_ops.so\"))\n\nLayerRNNCell = rnn_cell_impl.LayerRNNCell # pylint: disable=invalid-name\n\n\n# pylint: disable=invalid-name\ndef _lstm_block_cell(x,\n cs_prev,\n h_prev,\n w,\n b,\n wci=None,\n wcf=None,\n wco=None,\n forget_bias=None,\n cell_clip=None,\n use_peephole=None,\n name=None):\n r\"\"\"Computes the LSTM cell forward propagation for 1 time step.\n\n This implementation uses 1 weight matrix and 1 bias vector, and there's an\n optional peephole connection.\n\n This kernel op implements the following mathematical equations:\n\n ```python\n xh = [x, h_prev]\n [i, ci, f, o] = xh * w + b\n f = f + forget_bias\n\n if not use_peephole:\n wci = wcf = wco = 0\n\n i = sigmoid(cs_prev * wci + i)\n f = sigmoid(cs_prev * wcf + f)\n ci = tanh(ci)\n\n cs = ci .* i + cs_prev .* f\n cs = clip(cs, cell_clip)\n\n o = sigmoid(cs * wco + o)\n co = tanh(cs)\n h = co .* o\n ```\n\n Args:\n x: A `Tensor`. Must be one of the following types: `float32`.\n The input to the LSTM cell, shape (batch_size, num_inputs).\n cs_prev: A `Tensor`. Must have the same type as `x`.\n Value of the cell state at previous time step.\n h_prev: A `Tensor`. Must have the same type as `x`.\n Output of the previous cell at previous time step.\n w: A `Tensor`. Must have the same type as `x`. The weight matrix.\n b: A `Tensor`. Must have the same type as `x`. The bias vector.\n wci: A `Tensor`. Must have the same type as `x`.\n The weight matrix for input gate peephole connection.\n wcf: A `Tensor`. Must have the same type as `x`.\n The weight matrix for forget gate peephole connection.\n wco: A `Tensor`. Must have the same type as `x`.\n The weight matrix for output gate peephole connection.\n forget_bias: An optional `float`. Defaults to `1`. The forget gate bias.\n cell_clip: An optional `float`. Defaults to `-1` (no clipping).\n Value to clip the 'cs' value to. Disable by setting to negative value.\n use_peephole: An optional `bool`. Defaults to `False`.\n Whether to use peephole weights.\n name: A name for the operation (optional).\n\n Returns:\n A tuple of `Tensor` objects (i, cs, f, o, ci, co, h).\n i: A `Tensor`. Has the same type as `x`. The input gate.\n cs: A `Tensor`. Has the same type as `x`. The cell state before the tanh.\n f: A `Tensor`. Has the same type as `x`. The forget gate.\n o: A `Tensor`. Has the same type as `x`. The output gate.\n ci: A `Tensor`. Has the same type as `x`. The cell input.\n co: A `Tensor`. Has the same type as `x`. The cell after the tanh.\n h: A `Tensor`. Has the same type as `x`. The output h vector.\n\n Raises:\n ValueError: If cell_size is None.\n \"\"\"\n if wci is None:\n cell_size = cs_prev.get_shape().with_rank(2)[1].value\n if cell_size is None:\n raise ValueError(\"cell_size from `cs_prev` should not be None.\")\n wci = array_ops.constant(0, dtype=dtypes.float32, shape=[cell_size])\n wcf = wci\n wco = wci\n\n # pylint: disable=protected-access\n return gen_lstm_ops.lstm_block_cell(\n x=x,\n cs_prev=cs_prev,\n h_prev=h_prev,\n w=w,\n wci=wci,\n wcf=wcf,\n wco=wco,\n b=b,\n forget_bias=forget_bias,\n cell_clip=cell_clip if cell_clip is not None else -1,\n use_peephole=use_peephole,\n name=name)\n # pylint: enable=protected-access\n\n\ndef _block_lstm(seq_len_max,\n x,\n w,\n b,\n cs_prev=None,\n h_prev=None,\n wci=None,\n wcf=None,\n wco=None,\n forget_bias=None,\n cell_clip=None,\n use_peephole=None,\n name=None):\n r\"\"\"TODO(williamchan): add doc.\n\n Args:\n seq_len_max: A `Tensor` of type `int64`.\n x: A list of at least 1 `Tensor` objects of the same type in: `float32`.\n w: A `Tensor`. Must have the same type as `x`.\n b: A `Tensor`. Must have the same type as `x`.\n cs_prev: A `Tensor`. Must have the same type as `x`.\n h_prev: A `Tensor`. Must have the same type as `x`.\n wci: A `Tensor`. Must have the same type as `x`.\n wcf: A `Tensor`. Must have the same type as `x`.\n wco: A `Tensor`. Must have the same type as `x`.\n forget_bias: An optional `float`. Defaults to `1`.\n cell_clip: An optional `float`. Defaults to `-1` (no clipping).\n use_peephole: An optional `bool`. Defaults to `False`.\n name: A name for the operation (optional).\n\n Returns:\n A tuple of `Tensor` objects (i, cs, f, o, ci, co, h).\n i: A list with the same number of `Tensor` objects as `x` of `Tensor`\n objects of the same type as x.\n cs: A list with the same number of `Tensor` objects as `x` of `Tensor`\n objects of the same type as x.\n f: A list with the same number of `Tensor` objects as `x` of `Tensor`\n objects of the same type as x.\n o: A list with the same number of `Tensor` objects as `x` of `Tensor`\n objects of the same type as x.\n ci: A list with the same number of `Tensor` objects as `x` of `Tensor`\n objects of the same type as x.\n co: A list with the same number of `Tensor` objects as `x` of `Tensor`\n objects of the same type as x.\n h: A list with the same number of `Tensor` objects as `x` of `Tensor`\n objects of the same type as x.\n\n Raises:\n ValueError: If `b` does not have a valid shape.\n \"\"\"\n batch_size = x[0].get_shape().with_rank(2)[0].value\n cell_size4 = b.get_shape().with_rank(1)[0].value\n if cell_size4 is None:\n raise ValueError(\"`b` shape must not be None.\")\n cell_size = cell_size4 / 4\n zero_state = None\n if cs_prev is None or h_prev is None:\n zero_state = array_ops.constant(\n 0, dtype=dtypes.float32, shape=[batch_size, cell_size])\n if cs_prev is None:\n cs_prev = zero_state\n if h_prev is None:\n h_prev = zero_state\n if wci is None:\n wci = array_ops.constant(0, dtype=dtypes.float32, shape=[cell_size])\n wcf = wci\n wco = wci\n\n # pylint: disable=protected-access\n i, cs, f, o, ci, co, h = gen_lstm_ops.block_lstm(\n seq_len_max=seq_len_max,\n x=array_ops.stack(x),\n cs_prev=cs_prev,\n h_prev=h_prev,\n w=w,\n wci=wci,\n wcf=wcf,\n wco=wco,\n b=b,\n forget_bias=forget_bias,\n cell_clip=cell_clip if cell_clip is not None else -1,\n name=name,\n use_peephole=use_peephole)\n\n return array_ops.unstack(i), array_ops.unstack(cs), array_ops.unstack(\n f), array_ops.unstack(o), array_ops.unstack(ci), array_ops.unstack(\n co), array_ops.unstack(h)\n # pylint: enable=protected-access\n # pylint: enable=invalid-name\n\n\n_lstm_block_cell_grad_outputs = [\"cs_prev_grad\", \"dicfo\"]\n\n\n@ops.RegisterGradient(\"LSTMBlockCell\")\ndef _LSTMBlockCellGrad(op, *grad):\n \"\"\"Gradient for LSTMBlockCell.\"\"\"\n (x, cs_prev, h_prev, w, wci, wcf, wco, b) = op.inputs\n (i, cs, f, o, ci, co, _) = op.outputs\n (_, cs_grad, _, _, _, _, h_grad) = grad\n\n batch_size = x.get_shape().with_rank(2)[0].value\n if batch_size is None:\n batch_size = -1\n input_size = x.get_shape().with_rank(2)[1].value\n if input_size is None:\n raise ValueError(\"input_size from `x` should not be None.\")\n cell_size = cs_prev.get_shape().with_rank(2)[1].value\n if cell_size is None:\n raise ValueError(\"cell_size from `cs_prev` should not be None.\")\n\n (cs_prev_grad, dicfo, wci_grad, wcf_grad,\n wco_grad) = gen_lstm_ops.lstm_block_cell_grad(\n x,\n cs_prev,\n h_prev,\n w,\n wci,\n wcf,\n wco,\n b,\n i,\n cs,\n f,\n o,\n ci,\n co,\n cs_grad,\n h_grad,\n use_peephole=op.get_attr(\"use_peephole\"))\n\n # Backprop from dicfo to xh.\n xh_grad = math_ops.matmul(dicfo, w, transpose_b=True)\n\n x_grad = array_ops.slice(xh_grad, (0, 0), (batch_size, input_size))\n x_grad.get_shape().merge_with(x.get_shape())\n\n h_prev_grad = array_ops.slice(xh_grad, (0, input_size),\n (batch_size, cell_size))\n h_prev_grad.get_shape().merge_with(h_prev.get_shape())\n\n # Backprop from dicfo to w.\n xh = array_ops.concat([x, h_prev], 1)\n w_grad = math_ops.matmul(xh, dicfo, transpose_a=True)\n w_grad.get_shape().merge_with(w.get_shape())\n\n # Backprop from dicfo to b.\n b_grad = nn_ops.bias_add_grad(dicfo)\n b_grad.get_shape().merge_with(b.get_shape())\n\n return (x_grad, cs_prev_grad, h_prev_grad, w_grad, wci_grad, wcf_grad,\n wco_grad, b_grad)\n\n\n@ops.RegisterGradient(\"BlockLSTM\")\ndef _BlockLSTMGrad(op, *grad):\n \"\"\"Gradient for BlockLSTM.\"\"\"\n seq_len_max, x, cs_prev, h_prev, w, wci, wcf, wco, b = op.inputs\n i, cs, f, o, ci, co, h = op.outputs\n\n cs_grad = grad[1]\n h_grad = grad[6]\n\n (x_grad, cs_prev_grad, h_prev_grad, w_grad, wci_grad, wcf_grad, wco_grad,\n b_grad) = gen_lstm_ops.block_lstm_grad(\n seq_len_max,\n x,\n cs_prev,\n h_prev,\n w,\n wci,\n wcf,\n wco,\n b,\n i,\n cs,\n f,\n o,\n ci,\n co,\n h,\n cs_grad,\n h_grad,\n use_peephole=op.get_attr(\"use_peephole\"))\n\n return [\n None, x_grad, cs_prev_grad, h_prev_grad, w_grad, wci_grad, wcf_grad,\n wco_grad, b_grad\n ]\n\n\nclass LSTMBlockCell(LayerRNNCell):\n \"\"\"Basic LSTM recurrent network cell.\n\n The implementation is based on: http://arxiv.org/abs/1409.2329.\n\n We add `forget_bias` (default: 1) to the biases of the forget gate in order to\n reduce the scale of forgetting in the beginning of the training.\n\n Unlike `rnn_cell_impl.LSTMCell`, this is a monolithic op and should be much\n faster. The weight and bias matrices should be compatible as long as the\n variable scope matches.\n \"\"\"\n\n def __init__(self,\n num_units,\n forget_bias=1.0,\n cell_clip=None,\n use_peephole=False,\n reuse=None,\n name=\"lstm_cell\"):\n \"\"\"Initialize the basic LSTM cell.\n\n Args:\n num_units: int, The number of units in the LSTM cell.\n forget_bias: float, The bias added to forget gates (see above).\n cell_clip: An optional `float`. Defaults to `-1` (no clipping).\n use_peephole: Whether to use peephole connections or not.\n reuse: (optional) boolean describing whether to reuse variables in an\n existing scope. If not `True`, and the existing scope already has the\n given variables, an error is raised.\n name: String, the name of the layer. Layers with the same name will\n share weights, but to avoid mistakes we require reuse=True in such\n cases. By default this is \"lstm_cell\", for variable-name compatibility\n with `tf.nn.rnn_cell.LSTMCell`.\n\n When restoring from CudnnLSTM-trained checkpoints, must use\n CudnnCompatibleLSTMBlockCell instead.\n \"\"\"\n super(LSTMBlockCell, self).__init__(_reuse=reuse, name=name)\n self._num_units = num_units\n self._forget_bias = forget_bias\n self._use_peephole = use_peephole\n self._cell_clip = cell_clip if cell_clip is not None else -1\n self._names = {\n \"W\": \"kernel\",\n \"b\": \"bias\",\n \"wci\": \"w_i_diag\",\n \"wcf\": \"w_f_diag\",\n \"wco\": \"w_o_diag\",\n \"scope\": \"lstm_cell\"\n }\n # Inputs must be 2-dimensional.\n self.input_spec = base_layer.InputSpec(ndim=2)\n\n @property\n def state_size(self):\n return rnn_cell_impl.LSTMStateTuple(self._num_units, self._num_units)\n\n @property\n def output_size(self):\n return self._num_units\n\n def build(self, inputs_shape):\n if not inputs_shape[1].value:\n raise ValueError(\n \"Expecting inputs_shape[1] to be set: %s\" % str(inputs_shape))\n input_size = inputs_shape[1].value\n self._kernel = self.add_variable(\n self._names[\"W\"], [input_size + self._num_units, self._num_units * 4])\n self._bias = self.add_variable(\n self._names[\"b\"], [self._num_units * 4],\n initializer=init_ops.constant_initializer(0.0))\n if self._use_peephole:\n self._w_i_diag = self.add_variable(self._names[\"wci\"], [self._num_units])\n self._w_f_diag = self.add_variable(self._names[\"wcf\"], [self._num_units])\n self._w_o_diag = self.add_variable(self._names[\"wco\"], [self._num_units])\n\n self.built = True\n\n def call(self, inputs, state):\n \"\"\"Long short-term memory cell (LSTM).\"\"\"\n if len(state) != 2:\n raise ValueError(\"Expecting state to be a tuple with length 2.\")\n\n if self._use_peephole:\n wci = self._w_i_diag\n wcf = self._w_f_diag\n wco = self._w_o_diag\n else:\n wci = wcf = wco = array_ops.zeros([self._num_units])\n\n (cs_prev, h_prev) = state\n (_, cs, _, _, _, _, h) = _lstm_block_cell(\n inputs,\n cs_prev,\n h_prev,\n self._kernel,\n self._bias,\n wci=wci,\n wcf=wcf,\n wco=wco,\n forget_bias=self._forget_bias,\n cell_clip=self._cell_clip,\n use_peephole=self._use_peephole)\n\n new_state = rnn_cell_impl.LSTMStateTuple(cs, h)\n return h, new_state\n\n\nclass LSTMBlockWrapper(base_layer.Layer):\n \"\"\"This is a helper class that provides housekeeping for LSTM cells.\n\n This may be useful for alternative LSTM and similar type of cells.\n The subclasses must implement `_call_cell` method and `num_units` property.\n \"\"\"\n\n @abc.abstractproperty\n def num_units(self):\n \"\"\"Number of units in this cell (output dimension).\"\"\"\n pass\n\n @abc.abstractmethod\n def _call_cell(self, inputs, initial_cell_state, initial_output, dtype,\n sequence_length):\n \"\"\"Run this LSTM on inputs, starting from the given state.\n\n This method must be implemented by subclasses and does the actual work\n of calling the cell.\n\n Args:\n inputs: `3-D` tensor with shape `[time_len, batch_size, input_size]`\n initial_cell_state: initial value for cell state, shape `[batch_size,\n self._num_units]`\n initial_output: initial value of cell output, shape `[batch_size,\n self._num_units]`\n dtype: The data type for the initial state and expected output.\n sequence_length: Specifies the length of each sequence in inputs. An int32\n or int64 vector (tensor) size [batch_size], values in [0, time_len) or\n None.\n\n Returns:\n A pair containing:\n\n - State: A `3-D` tensor of shape `[time_len, batch_size, output_size]`\n - Output: A `3-D` tensor of shape `[time_len, batch_size, output_size]`\n \"\"\"\n pass\n\n def call(self, inputs, initial_state=None, dtype=None, sequence_length=None):\n \"\"\"Run this LSTM on inputs, starting from the given state.\n\n Args:\n inputs: `3-D` tensor with shape `[time_len, batch_size, input_size]`\n or a list of `time_len` tensors of shape `[batch_size, input_size]`.\n initial_state: a tuple `(initial_cell_state, initial_output)` with tensors\n of shape `[batch_size, self._num_units]`. If this is not provided, the\n cell is expected to create a zero initial state of type `dtype`.\n dtype: The data type for the initial state and expected output. Required\n if `initial_state` is not provided or RNN state has a heterogeneous\n dtype.\n sequence_length: Specifies the length of each sequence in inputs. An\n `int32` or `int64` vector (tensor) size `[batch_size]`, values in `[0,\n time_len).`\n Defaults to `time_len` for each element.\n\n Returns:\n A pair containing:\n\n - Output: A `3-D` tensor of shape `[time_len, batch_size, output_size]`\n or a list of time_len tensors of shape `[batch_size, output_size]`,\n to match the type of the `inputs`.\n - Final state: a tuple `(cell_state, output)` matching `initial_state`.\n\n Raises:\n ValueError: in case of shape mismatches\n \"\"\"\n is_list = isinstance(inputs, list)\n if is_list:\n inputs = array_ops.stack(inputs)\n inputs_shape = inputs.get_shape().with_rank(3)\n if not inputs_shape[2]:\n raise ValueError(\"Expecting inputs_shape[2] to be set: %s\" % inputs_shape)\n batch_size = inputs_shape[1].value\n if batch_size is None:\n batch_size = array_ops.shape(inputs)[1]\n time_len = inputs_shape[0].value\n if time_len is None:\n time_len = array_ops.shape(inputs)[0]\n\n # Provide default values for initial_state and dtype\n if initial_state is None:\n if dtype is None:\n raise ValueError(\"Either initial_state or dtype needs to be specified\")\n z = array_ops.zeros(\n array_ops.stack([batch_size, self.num_units]), dtype=dtype)\n initial_state = z, z\n else:\n if len(initial_state) != 2:\n raise ValueError(\n \"Expecting initial_state to be a tuple with length 2 or None\")\n if dtype is None:\n dtype = initial_state[0].dtype\n\n # create the actual cell\n if sequence_length is not None:\n sequence_length = ops.convert_to_tensor(sequence_length)\n initial_cell_state, initial_output = initial_state # pylint: disable=unpacking-non-sequence\n cell_states, outputs = self._call_cell(\n inputs, initial_cell_state, initial_output, dtype, sequence_length)\n\n if sequence_length is not None:\n # Mask out the part beyond sequence_length\n mask = array_ops.transpose(\n array_ops.sequence_mask(sequence_length, time_len, dtype=dtype),\n [1, 0])\n mask = array_ops.tile(\n array_ops.expand_dims(mask, [-1]), [1, 1, self.num_units])\n outputs *= mask\n # Prepend initial states to cell_states and outputs for indexing to work\n # correctly,since we want to access the last valid state at\n # sequence_length - 1, which can even be -1, corresponding to the\n # initial state.\n mod_cell_states = array_ops.concat(\n [array_ops.expand_dims(initial_cell_state, [0]), cell_states], 0)\n mod_outputs = array_ops.concat(\n [array_ops.expand_dims(initial_output, [0]), outputs], 0)\n final_cell_state = self._gather_states(mod_cell_states, sequence_length,\n batch_size)\n final_output = self._gather_states(mod_outputs, sequence_length,\n batch_size)\n else:\n # No sequence_lengths used: final state is the last state\n final_cell_state = cell_states[-1]\n final_output = outputs[-1]\n\n if is_list:\n # Input was a list, so return a list\n outputs = array_ops.unstack(outputs)\n\n final_state = rnn_cell_impl.LSTMStateTuple(final_cell_state, final_output)\n return outputs, final_state\n\n def _gather_states(self, data, indices, batch_size):\n \"\"\"Produce `out`, s.t. out(i, j) = data(indices(i), i, j).\"\"\"\n mod_indices = indices * batch_size + math_ops.range(batch_size)\n return array_ops.gather(\n array_ops.reshape(data, [-1, self.num_units]), mod_indices)\n\n\nclass LSTMBlockFusedCell(LSTMBlockWrapper):\n \"\"\"FusedRNNCell implementation of LSTM.\n\n This is an extremely efficient LSTM implementation, that uses a single TF op\n for the entire LSTM. It should be both faster and more memory-efficient than\n LSTMBlockCell defined above.\n\n The implementation is based on: http://arxiv.org/abs/1409.2329.\n\n We add forget_bias (default: 1) to the biases of the forget gate in order to\n reduce the scale of forgetting in the beginning of the training.\n\n The variable naming is consistent with `rnn_cell_impl.LSTMCell`.\n \"\"\"\n\n def __init__(self,\n num_units,\n forget_bias=1.0,\n cell_clip=None,\n use_peephole=False,\n reuse=None,\n name=\"lstm_fused_cell\"):\n \"\"\"Initialize the LSTM cell.\n\n Args:\n num_units: int, The number of units in the LSTM cell.\n forget_bias: float, The bias added to forget gates (see above).\n cell_clip: clip the cell to this value. Default is no cell clipping.\n use_peephole: Whether to use peephole connections or not.\n reuse: (optional) boolean describing whether to reuse variables in an\n existing scope. If not `True`, and the existing scope already has the\n given variables, an error is raised.\n name: String, the name of the layer. Layers with the same name will\n share weights, but to avoid mistakes we require reuse=True in such\n cases. By default this is \"lstm_cell\", for variable-name compatibility\n with `tf.nn.rnn_cell.LSTMCell`.\n \"\"\"\n super(LSTMBlockFusedCell, self).__init__(_reuse=reuse, name=name)\n self._num_units = num_units\n self._forget_bias = forget_bias\n self._cell_clip = cell_clip if cell_clip is not None else -1\n self._use_peephole = use_peephole\n\n # Inputs must be 3-dimensional.\n self.input_spec = base_layer.InputSpec(ndim=3)\n\n @property\n def num_units(self):\n \"\"\"Number of units in this cell (output dimension).\"\"\"\n return self._num_units\n\n def build(self, input_shape):\n input_size = input_shape[2].value\n self._kernel = self.add_variable(\n \"kernel\", [input_size + self._num_units, self._num_units * 4])\n self._bias = self.add_variable(\n \"bias\", [self._num_units * 4],\n initializer=init_ops.constant_initializer(0.0))\n if self._use_peephole:\n self._w_i_diag = self.add_variable(\"w_i_diag\", [self._num_units])\n self._w_f_diag = self.add_variable(\"w_f_diag\", [self._num_units])\n self._w_o_diag = self.add_variable(\"w_o_diag\", [self._num_units])\n\n self.built = True\n\n def _call_cell(self,\n inputs,\n initial_cell_state=None,\n initial_output=None,\n dtype=None,\n sequence_length=None):\n \"\"\"Run this LSTM on inputs, starting from the given state.\n\n Args:\n inputs: `3-D` tensor with shape `[time_len, batch_size, input_size]`\n initial_cell_state: initial value for cell state, shape `[batch_size,\n self._num_units]`\n initial_output: initial value of cell output, shape `[batch_size,\n self._num_units]`\n dtype: The data type for the initial state and expected output.\n sequence_length: Specifies the length of each sequence in inputs. An\n `int32` or `int64` vector (tensor) size `[batch_size]`, values in `[0,\n time_len)` or None.\n\n Returns:\n A pair containing:\n\n - Cell state (cs): A `3-D` tensor of shape `[time_len, batch_size,\n output_size]`\n - Output (h): A `3-D` tensor of shape `[time_len, batch_size,\n output_size]`\n \"\"\"\n\n inputs_shape = inputs.get_shape().with_rank(3)\n time_len = inputs_shape[0].value\n if time_len is None:\n time_len = array_ops.shape(inputs)[0]\n\n if self._use_peephole:\n wci = self._w_i_diag\n wco = self._w_o_diag\n wcf = self._w_f_diag\n else:\n wci = wcf = wco = array_ops.zeros([self._num_units], dtype=dtype)\n\n if sequence_length is None:\n max_seq_len = math_ops.to_int64(time_len)\n else:\n max_seq_len = math_ops.to_int64(math_ops.reduce_max(sequence_length))\n\n _, cs, _, _, _, _, h = gen_lstm_ops.block_lstm(\n seq_len_max=max_seq_len,\n x=inputs,\n cs_prev=initial_cell_state,\n h_prev=initial_output,\n w=self._kernel,\n wci=wci,\n wcf=wcf,\n wco=wco,\n b=self._bias,\n forget_bias=self._forget_bias,\n cell_clip=self._cell_clip,\n use_peephole=self._use_peephole)\n return cs, h\n",
"# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Hamiltonian Monte Carlo, a gradient-based MCMC algorithm.\n\n@@sample_chain\n@@sample_annealed_importance_chain\n@@kernel\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\nimport numpy as np\n\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import control_flow_ops\nfrom tensorflow.python.ops import functional_ops\nfrom tensorflow.python.ops import gradients_impl as gradients_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import random_ops\nfrom tensorflow.python.ops.distributions import util as distributions_util\n\n__all__ = [\n \"sample_chain\",\n \"sample_annealed_importance_chain\",\n \"kernel\",\n]\n\n\nKernelResults = collections.namedtuple(\n \"KernelResults\",\n [\n \"acceptance_probs\",\n \"current_grads_target_log_prob\", # \"Current result\" means \"accepted\".\n \"current_target_log_prob\", # \"Current result\" means \"accepted\".\n \"energy_change\",\n \"is_accepted\",\n \"proposed_grads_target_log_prob\",\n \"proposed_state\",\n \"proposed_target_log_prob\",\n \"random_positive\",\n ])\n\n\ndef _make_dummy_kernel_results(\n dummy_state,\n dummy_target_log_prob,\n dummy_grads_target_log_prob):\n return KernelResults(\n acceptance_probs=dummy_target_log_prob,\n current_grads_target_log_prob=dummy_grads_target_log_prob,\n current_target_log_prob=dummy_target_log_prob,\n energy_change=dummy_target_log_prob,\n is_accepted=array_ops.ones_like(dummy_target_log_prob, dtypes.bool),\n proposed_grads_target_log_prob=dummy_grads_target_log_prob,\n proposed_state=dummy_state,\n proposed_target_log_prob=dummy_target_log_prob,\n random_positive=dummy_target_log_prob,\n )\n\n\ndef sample_chain(\n num_results,\n target_log_prob_fn,\n current_state,\n step_size,\n num_leapfrog_steps,\n num_burnin_steps=0,\n num_steps_between_results=0,\n seed=None,\n current_target_log_prob=None,\n current_grads_target_log_prob=None,\n name=None):\n \"\"\"Runs multiple iterations of one or more Hamiltonian Monte Carlo chains.\n\n Hamiltonian Monte Carlo (HMC) is a Markov chain Monte Carlo (MCMC) algorithm\n that takes a series of gradient-informed steps to produce a Metropolis\n proposal. This function samples from an HMC Markov chain at `current_state`\n and whose stationary distribution has log-unnormalized-density\n `target_log_prob_fn()`.\n\n This function samples from multiple chains in parallel. It assumes that the\n the leftmost dimensions of (each) `current_state` (part) index an independent\n chain. The function `target_log_prob_fn()` sums log-probabilities across\n event dimensions (i.e., current state (part) rightmost dimensions). Each\n element of the output of `target_log_prob_fn()` represents the (possibly\n unnormalized) log-probability of the joint distribution over (all) the current\n state (parts).\n\n The `current_state` can be represented as a single `Tensor` or a `list` of\n `Tensors` which collectively represent the current state. When specifying a\n `list`, one must also specify a list of `step_size`s.\n\n Note: `target_log_prob_fn` is called exactly twice.\n\n Only one out of every `num_steps_between_samples + 1` steps is included in the\n returned results. This \"thinning\" comes at a cost of reduced statistical\n power, while reducing memory requirements and autocorrelation. For more\n discussion see [1].\n\n [1]: \"Statistically efficient thinning of a Markov chain sampler.\"\n Art B. Owen. April 2017.\n http://statweb.stanford.edu/~owen/reports/bestthinning.pdf\n\n #### Examples:\n\n ##### Sample from a diagonal-variance Gaussian.\n\n ```python\n tfd = tf.contrib.distributions\n\n def make_likelihood(true_variances):\n return tfd.MultivariateNormalDiag(\n scale_diag=tf.sqrt(true_variances))\n\n dims = 10\n dtype = np.float32\n true_variances = tf.linspace(dtype(1), dtype(3), dims)\n likelihood = make_likelihood(true_variances)\n\n states, kernel_results = hmc.sample_chain(\n num_results=1000,\n target_log_prob_fn=likelihood.log_prob,\n current_state=tf.zeros(dims),\n step_size=0.5,\n num_leapfrog_steps=2,\n num_burnin_steps=500)\n\n # Compute sample stats.\n sample_mean = tf.reduce_mean(states, axis=0)\n sample_var = tf.reduce_mean(\n tf.squared_difference(states, sample_mean),\n axis=0)\n ```\n\n ##### Sampling from factor-analysis posteriors with known factors.\n\n I.e.,\n\n ```none\n for i=1..n:\n w[i] ~ Normal(0, eye(d)) # prior\n x[i] ~ Normal(loc=matmul(w[i], F)) # likelihood\n ```\n\n where `F` denotes factors.\n\n ```python\n tfd = tf.contrib.distributions\n\n def make_prior(dims, dtype):\n return tfd.MultivariateNormalDiag(\n loc=tf.zeros(dims, dtype))\n\n def make_likelihood(weights, factors):\n return tfd.MultivariateNormalDiag(\n loc=tf.tensordot(weights, factors, axes=[[0], [-1]]))\n\n # Setup data.\n num_weights = 10\n num_factors = 4\n num_chains = 100\n dtype = np.float32\n\n prior = make_prior(num_weights, dtype)\n weights = prior.sample(num_chains)\n factors = np.random.randn(num_factors, num_weights).astype(dtype)\n x = make_likelihood(weights, factors).sample(num_chains)\n\n def target_log_prob(w):\n # Target joint is: `f(w) = p(w, x | factors)`.\n return prior.log_prob(w) + make_likelihood(w, factors).log_prob(x)\n\n # Get `num_results` samples from `num_chains` independent chains.\n chains_states, kernels_results = hmc.sample_chain(\n num_results=1000,\n target_log_prob_fn=target_log_prob,\n current_state=tf.zeros([num_chains, dims], dtype),\n step_size=0.1,\n num_leapfrog_steps=2,\n num_burnin_steps=500)\n\n # Compute sample stats.\n sample_mean = tf.reduce_mean(chains_states, axis=[0, 1])\n sample_var = tf.reduce_mean(\n tf.squared_difference(chains_states, sample_mean),\n axis=[0, 1])\n ```\n\n Args:\n num_results: Integer number of Markov chain draws.\n target_log_prob_fn: Python callable which takes an argument like\n `current_state` (or `*current_state` if it's a list) and returns its\n (possibly unnormalized) log-density under the target distribution.\n current_state: `Tensor` or Python `list` of `Tensor`s representing the\n current state(s) of the Markov chain(s). The first `r` dimensions index\n independent chains, `r = tf.rank(target_log_prob_fn(*current_state))`.\n step_size: `Tensor` or Python `list` of `Tensor`s representing the step size\n for the leapfrog integrator. Must broadcast with the shape of\n `current_state`. Larger step sizes lead to faster progress, but too-large\n step sizes make rejection exponentially more likely. When possible, it's\n often helpful to match per-variable step sizes to the standard deviations\n of the target distribution in each variable.\n num_leapfrog_steps: Integer number of steps to run the leapfrog integrator\n for. Total progress per HMC step is roughly proportional to `step_size *\n num_leapfrog_steps`.\n num_burnin_steps: Integer number of chain steps to take before starting to\n collect results.\n Default value: 0 (i.e., no burn-in).\n num_steps_between_results: Integer number of chain steps between collecting\n a result. Only one out of every `num_steps_between_samples + 1` steps is\n included in the returned results. This \"thinning\" comes at a cost of\n reduced statistical power, while reducing memory requirements and\n autocorrelation. For more discussion see [1].\n Default value: 0 (i.e., no subsampling).\n seed: Python integer to seed the random number generator.\n current_target_log_prob: (Optional) `Tensor` representing the value of\n `target_log_prob_fn` at the `current_state`. The only reason to specify\n this argument is to reduce TF graph size.\n Default value: `None` (i.e., compute as needed).\n current_grads_target_log_prob: (Optional) Python list of `Tensor`s\n representing gradient of `target_log_prob` at the `current_state` and wrt\n the `current_state`. Must have same shape as `current_state`. The only\n reason to specify this argument is to reduce TF graph size.\n Default value: `None` (i.e., compute as needed).\n name: Python `str` name prefixed to Ops created by this function.\n Default value: `None` (i.e., \"hmc_sample_chain\").\n\n Returns:\n accepted_states: Tensor or Python list of `Tensor`s representing the\n state(s) of the Markov chain(s) at each result step. Has same shape as\n input `current_state` but with a prepended `num_results`-size dimension.\n kernel_results: `collections.namedtuple` of internal calculations used to\n advance the chain.\n \"\"\"\n with ops.name_scope(\n name, \"hmc_sample_chain\",\n [num_results, current_state, step_size, num_leapfrog_steps,\n num_burnin_steps, num_steps_between_results, seed,\n current_target_log_prob, current_grads_target_log_prob]):\n with ops.name_scope(\"initialize\"):\n [\n current_state,\n step_size,\n current_target_log_prob,\n current_grads_target_log_prob,\n ] = _prepare_args(\n target_log_prob_fn,\n current_state,\n step_size,\n current_target_log_prob,\n current_grads_target_log_prob)\n num_results = ops.convert_to_tensor(\n num_results,\n dtype=dtypes.int32,\n name=\"num_results\")\n num_leapfrog_steps = ops.convert_to_tensor(\n num_leapfrog_steps,\n dtype=dtypes.int32,\n name=\"num_leapfrog_steps\")\n num_burnin_steps = ops.convert_to_tensor(\n num_burnin_steps,\n dtype=dtypes.int32,\n name=\"num_burnin_steps\")\n num_steps_between_results = ops.convert_to_tensor(\n num_steps_between_results,\n dtype=dtypes.int32,\n name=\"num_steps_between_results\")\n\n def _run_chain(num_steps, current_state, kernel_results):\n \"\"\"Runs the chain(s) for `num_steps`.\"\"\"\n def _loop_body(iter_, current_state, kernel_results):\n return [iter_ + 1] + list(kernel(\n target_log_prob_fn,\n current_state,\n step_size,\n num_leapfrog_steps,\n seed,\n kernel_results.current_target_log_prob,\n kernel_results.current_grads_target_log_prob))\n while_loop_kwargs = dict(\n cond=lambda iter_, *args: iter_ < num_steps,\n body=_loop_body,\n loop_vars=[\n np.int32(0),\n current_state,\n kernel_results,\n ],\n )\n if seed is not None:\n while_loop_kwargs[\"parallel_iterations\"] = 1\n return control_flow_ops.while_loop(\n **while_loop_kwargs)[1:] # Lop-off \"iter_\".\n\n def _scan_body(args_list, iter_):\n \"\"\"Closure which implements `tf.scan` body.\"\"\"\n current_state, kernel_results = args_list\n return _run_chain(\n 1 + array_ops.where(math_ops.equal(iter_, 0),\n num_burnin_steps,\n num_steps_between_results),\n current_state,\n kernel_results)\n\n scan_kwargs = dict(\n fn=_scan_body,\n elems=math_ops.range(num_results), # iter_: used to choose burnin.\n initializer=[\n current_state,\n _make_dummy_kernel_results(\n current_state,\n current_target_log_prob,\n current_grads_target_log_prob),\n ])\n if seed is not None:\n scan_kwargs[\"parallel_iterations\"] = 1\n return functional_ops.scan(**scan_kwargs)\n\n\ndef sample_annealed_importance_chain(\n proposal_log_prob_fn,\n num_steps,\n target_log_prob_fn,\n current_state,\n step_size,\n num_leapfrog_steps,\n seed=None,\n name=None):\n \"\"\"Runs annealed importance sampling (AIS) to estimate normalizing constants.\n\n This function uses Hamiltonian Monte Carlo to sample from a series of\n distributions that slowly interpolates between an initial \"proposal\"\n distribution:\n\n `exp(proposal_log_prob_fn(x) - proposal_log_normalizer)`\n\n and the target distribution:\n\n `exp(target_log_prob_fn(x) - target_log_normalizer)`,\n\n accumulating importance weights along the way. The product of these\n importance weights gives an unbiased estimate of the ratio of the\n normalizing constants of the initial distribution and the target\n distribution:\n\n `E[exp(ais_weights)] = exp(target_log_normalizer - proposal_log_normalizer)`.\n\n Note: `proposal_log_prob_fn` and `target_log_prob_fn` are called exactly three\n times (although this may be reduced to two times, in the future).\n\n #### Examples:\n\n ##### Estimate the normalizing constant of a log-gamma distribution.\n\n ```python\n tfd = tf.contrib.distributions\n\n # Run 100 AIS chains in parallel\n num_chains = 100\n dims = 20\n dtype = np.float32\n\n proposal = tfd.MultivatiateNormalDiag(\n loc=tf.zeros([dims], dtype=dtype))\n\n target = tfd.TransformedDistribution(\n distribution=tfd.Gamma(concentration=dtype(2),\n rate=dtype(3)),\n bijector=tfd.bijectors.Invert(tfd.bijectors.Exp()),\n event_shape=[dims])\n\n chains_state, ais_weights, kernels_results = (\n hmc.sample_annealed_importance_chain(\n proposal_log_prob_fn=proposal.log_prob,\n num_steps=1000,\n target_log_prob_fn=target.log_prob,\n step_size=0.2,\n current_state=proposal.sample(num_chains),\n num_leapfrog_steps=2))\n\n log_estimated_normalizer = (tf.reduce_logsumexp(ais_weights)\n - np.log(num_chains))\n log_true_normalizer = tf.lgamma(2.) - 2. * tf.log(3.)\n ```\n\n ##### Estimate marginal likelihood of a Bayesian regression model.\n\n ```python\n tfd = tf.contrib.distributions\n\n def make_prior(dims, dtype):\n return tfd.MultivariateNormalDiag(\n loc=tf.zeros(dims, dtype))\n\n def make_likelihood(weights, x):\n return tfd.MultivariateNormalDiag(\n loc=tf.tensordot(weights, x, axes=[[0], [-1]]))\n\n # Run 100 AIS chains in parallel\n num_chains = 100\n dims = 10\n dtype = np.float32\n\n # Make training data.\n x = np.random.randn(num_chains, dims).astype(dtype)\n true_weights = np.random.randn(dims).astype(dtype)\n y = np.dot(x, true_weights) + np.random.randn(num_chains)\n\n # Setup model.\n prior = make_prior(dims, dtype)\n def target_log_prob_fn(weights):\n return prior.log_prob(weights) + make_likelihood(weights, x).log_prob(y)\n\n proposal = tfd.MultivariateNormalDiag(\n loc=tf.zeros(dims, dtype))\n\n weight_samples, ais_weights, kernel_results = (\n hmc.sample_annealed_importance_chain(\n num_steps=1000,\n proposal_log_prob_fn=proposal.log_prob,\n target_log_prob_fn=target_log_prob_fn\n current_state=tf.zeros([num_chains, dims], dtype),\n step_size=0.1,\n num_leapfrog_steps=2))\n log_normalizer_estimate = (tf.reduce_logsumexp(ais_weights)\n - np.log(num_chains))\n ```\n\n Args:\n proposal_log_prob_fn: Python callable that returns the log density of the\n initial distribution.\n num_steps: Integer number of Markov chain updates to run. More\n iterations means more expense, but smoother annealing between q\n and p, which in turn means exponentially lower variance for the\n normalizing constant estimator.\n target_log_prob_fn: Python callable which takes an argument like\n `current_state` (or `*current_state` if it's a list) and returns its\n (possibly unnormalized) log-density under the target distribution.\n current_state: `Tensor` or Python `list` of `Tensor`s representing the\n current state(s) of the Markov chain(s). The first `r` dimensions index\n independent chains, `r = tf.rank(target_log_prob_fn(*current_state))`.\n step_size: `Tensor` or Python `list` of `Tensor`s representing the step size\n for the leapfrog integrator. Must broadcast with the shape of\n `current_state`. Larger step sizes lead to faster progress, but too-large\n step sizes make rejection exponentially more likely. When possible, it's\n often helpful to match per-variable step sizes to the standard deviations\n of the target distribution in each variable.\n num_leapfrog_steps: Integer number of steps to run the leapfrog integrator\n for. Total progress per HMC step is roughly proportional to `step_size *\n num_leapfrog_steps`.\n seed: Python integer to seed the random number generator.\n name: Python `str` name prefixed to Ops created by this function.\n Default value: `None` (i.e., \"hmc_sample_annealed_importance_chain\").\n\n Returns:\n accepted_state: `Tensor` or Python list of `Tensor`s representing the\n state(s) of the Markov chain(s) at the final iteration. Has same shape as\n input `current_state`.\n ais_weights: Tensor with the estimated weight(s). Has shape matching\n `target_log_prob_fn(current_state)`.\n kernel_results: `collections.namedtuple` of internal calculations used to\n advance the chain.\n \"\"\"\n def make_convex_combined_log_prob_fn(iter_):\n def _fn(*args):\n p = proposal_log_prob_fn(*args)\n t = target_log_prob_fn(*args)\n dtype = p.dtype.base_dtype\n beta = (math_ops.cast(iter_ + 1, dtype)\n / math_ops.cast(num_steps, dtype))\n return (1. - beta) * p + beta * t\n return _fn\n\n with ops.name_scope(\n name, \"hmc_sample_annealed_importance_chain\",\n [num_steps, current_state, step_size, num_leapfrog_steps, seed]):\n with ops.name_scope(\"initialize\"):\n [\n current_state,\n step_size,\n current_log_prob,\n current_grads_log_prob,\n ] = _prepare_args(\n make_convex_combined_log_prob_fn(iter_=0),\n current_state,\n step_size,\n description=\"convex_combined_log_prob\")\n num_steps = ops.convert_to_tensor(\n num_steps,\n dtype=dtypes.int32,\n name=\"num_steps\")\n num_leapfrog_steps = ops.convert_to_tensor(\n num_leapfrog_steps,\n dtype=dtypes.int32,\n name=\"num_leapfrog_steps\")\n def _loop_body(iter_, ais_weights, current_state, kernel_results):\n \"\"\"Closure which implements `tf.while_loop` body.\"\"\"\n current_state_parts = (list(current_state)\n if _is_list_like(current_state)\n else [current_state])\n # TODO(b/72994218): Consider refactoring things to avoid this unecessary\n # call.\n ais_weights += ((target_log_prob_fn(*current_state_parts)\n - proposal_log_prob_fn(*current_state_parts))\n / math_ops.cast(num_steps, ais_weights.dtype))\n return [iter_ + 1, ais_weights] + list(kernel(\n make_convex_combined_log_prob_fn(iter_),\n current_state,\n step_size,\n num_leapfrog_steps,\n seed,\n kernel_results.current_target_log_prob,\n kernel_results.current_grads_target_log_prob))\n\n while_loop_kwargs = dict(\n cond=lambda iter_, *args: iter_ < num_steps,\n body=_loop_body,\n loop_vars=[\n np.int32(0), # iter_\n array_ops.zeros_like(current_log_prob), # ais_weights\n current_state,\n _make_dummy_kernel_results(current_state,\n current_log_prob,\n current_grads_log_prob),\n ])\n if seed is not None:\n while_loop_kwargs[\"parallel_iterations\"] = 1\n\n [ais_weights, current_state, kernel_results] = control_flow_ops.while_loop(\n **while_loop_kwargs)[1:] # Lop-off \"iter_\".\n\n return [current_state, ais_weights, kernel_results]\n\n\ndef kernel(target_log_prob_fn,\n current_state,\n step_size,\n num_leapfrog_steps,\n seed=None,\n current_target_log_prob=None,\n current_grads_target_log_prob=None,\n name=None):\n \"\"\"Runs one iteration of Hamiltonian Monte Carlo.\n\n Hamiltonian Monte Carlo (HMC) is a Markov chain Monte Carlo (MCMC)\n algorithm that takes a series of gradient-informed steps to produce\n a Metropolis proposal. This function applies one step of HMC to\n randomly update the variable `x`.\n\n This function can update multiple chains in parallel. It assumes that all\n leftmost dimensions of `current_state` index independent chain states (and are\n therefore updated independently). The output of `target_log_prob_fn()` should\n sum log-probabilities across all event dimensions. Slices along the rightmost\n dimensions may have different target distributions; for example,\n `current_state[0, :]` could have a different target distribution from\n `current_state[1, :]`. This is up to `target_log_prob_fn()`. (The number of\n independent chains is `tf.size(target_log_prob_fn(*current_state))`.)\n\n #### Examples:\n\n ##### Simple chain with warm-up.\n\n ```python\n tfd = tf.contrib.distributions\n\n # Tuning acceptance rates:\n dtype = np.float32\n target_accept_rate = 0.631\n num_warmup_iter = 500\n num_chain_iter = 500\n\n x = tf.get_variable(name=\"x\", initializer=dtype(1))\n step_size = tf.get_variable(name=\"step_size\", initializer=dtype(1))\n\n target = tfd.Normal(loc=dtype(0), scale=dtype(1))\n\n new_x, other_results = hmc.kernel(\n target_log_prob_fn=target.log_prob,\n current_state=x,\n step_size=step_size,\n num_leapfrog_steps=3)[:4]\n\n x_update = x.assign(new_x)\n\n step_size_update = step_size.assign_add(\n step_size * tf.where(\n other_results.acceptance_probs > target_accept_rate,\n 0.01, -0.01))\n\n warmup = tf.group([x_update, step_size_update])\n\n tf.global_variables_initializer().run()\n\n sess.graph.finalize() # No more graph building.\n\n # Warm up the sampler and adapt the step size\n for _ in xrange(num_warmup_iter):\n sess.run(warmup)\n\n # Collect samples without adapting step size\n samples = np.zeros([num_chain_iter])\n for i in xrange(num_chain_iter):\n _, x_, target_log_prob_, grad_ = sess.run([\n x_update,\n x,\n other_results.target_log_prob,\n other_results.grads_target_log_prob])\n samples[i] = x_\n\n print(samples.mean(), samples.std())\n ```\n\n ##### Sample from more complicated posterior.\n\n I.e.,\n\n ```none\n W ~ MVN(loc=0, scale=sigma * eye(dims))\n for i=1...num_samples:\n X[i] ~ MVN(loc=0, scale=eye(dims))\n eps[i] ~ Normal(loc=0, scale=1)\n Y[i] = X[i].T * W + eps[i]\n ```\n\n ```python\n tfd = tf.contrib.distributions\n\n def make_training_data(num_samples, dims, sigma):\n dt = np.asarray(sigma).dtype\n zeros = tf.zeros(dims, dtype=dt)\n x = tfd.MultivariateNormalDiag(\n loc=zeros).sample(num_samples, seed=1)\n w = tfd.MultivariateNormalDiag(\n loc=zeros,\n scale_identity_multiplier=sigma).sample(seed=2)\n noise = tfd.Normal(\n loc=dt(0),\n scale=dt(1)).sample(num_samples, seed=3)\n y = tf.tensordot(x, w, axes=[[1], [0]]) + noise\n return y, x, w\n\n def make_prior(sigma, dims):\n # p(w | sigma)\n return tfd.MultivariateNormalDiag(\n loc=tf.zeros([dims], dtype=sigma.dtype),\n scale_identity_multiplier=sigma)\n\n def make_likelihood(x, w):\n # p(y | x, w)\n return tfd.MultivariateNormalDiag(\n loc=tf.tensordot(x, w, axes=[[1], [0]]))\n\n # Setup assumptions.\n dtype = np.float32\n num_samples = 150\n dims = 10\n num_iters = int(5e3)\n\n true_sigma = dtype(0.5)\n y, x, true_weights = make_training_data(num_samples, dims, true_sigma)\n\n # Estimate of `log(true_sigma)`.\n log_sigma = tf.get_variable(name=\"log_sigma\", initializer=dtype(0))\n sigma = tf.exp(log_sigma)\n\n # State of the Markov chain.\n weights = tf.get_variable(\n name=\"weights\",\n initializer=np.random.randn(dims).astype(dtype))\n\n prior = make_prior(sigma, dims)\n\n def joint_log_prob_fn(w):\n # f(w) = log p(w, y | x)\n return prior.log_prob(w) + make_likelihood(x, w).log_prob(y)\n\n weights_update = weights.assign(\n hmc.kernel(target_log_prob_fn=joint_log_prob,\n current_state=weights,\n step_size=0.1,\n num_leapfrog_steps=5)[0])\n\n with tf.control_dependencies([weights_update]):\n loss = -prior.log_prob(weights)\n\n optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.01)\n log_sigma_update = optimizer.minimize(loss, var_list=[log_sigma])\n\n sess.graph.finalize() # No more graph building.\n\n tf.global_variables_initializer().run()\n\n sigma_history = np.zeros(num_iters, dtype)\n weights_history = np.zeros([num_iters, dims], dtype)\n\n for i in xrange(num_iters):\n _, sigma_, weights_, _ = sess.run([log_sigma_update, sigma, weights])\n weights_history[i, :] = weights_\n sigma_history[i] = sigma_\n\n true_weights_ = sess.run(true_weights)\n\n # Should converge to something close to true_sigma.\n plt.plot(sigma_history);\n plt.ylabel(\"sigma\");\n plt.xlabel(\"iteration\");\n ```\n\n Args:\n target_log_prob_fn: Python callable which takes an argument like\n `current_state` (or `*current_state` if it's a list) and returns its\n (possibly unnormalized) log-density under the target distribution.\n current_state: `Tensor` or Python `list` of `Tensor`s representing the\n current state(s) of the Markov chain(s). The first `r` dimensions index\n independent chains, `r = tf.rank(target_log_prob_fn(*current_state))`.\n step_size: `Tensor` or Python `list` of `Tensor`s representing the step size\n for the leapfrog integrator. Must broadcast with the shape of\n `current_state`. Larger step sizes lead to faster progress, but too-large\n step sizes make rejection exponentially more likely. When possible, it's\n often helpful to match per-variable step sizes to the standard deviations\n of the target distribution in each variable.\n num_leapfrog_steps: Integer number of steps to run the leapfrog integrator\n for. Total progress per HMC step is roughly proportional to `step_size *\n num_leapfrog_steps`.\n seed: Python integer to seed the random number generator.\n current_target_log_prob: (Optional) `Tensor` representing the value of\n `target_log_prob_fn` at the `current_state`. The only reason to\n specify this argument is to reduce TF graph size.\n Default value: `None` (i.e., compute as needed).\n current_grads_target_log_prob: (Optional) Python list of `Tensor`s\n representing gradient of `current_target_log_prob` at the `current_state`\n and wrt the `current_state`. Must have same shape as `current_state`. The\n only reason to specify this argument is to reduce TF graph size.\n Default value: `None` (i.e., compute as needed).\n name: Python `str` name prefixed to Ops created by this function.\n Default value: `None` (i.e., \"hmc_kernel\").\n\n Returns:\n accepted_state: Tensor or Python list of `Tensor`s representing the state(s)\n of the Markov chain(s) at each result step. Has same shape as\n `current_state`.\n kernel_results: `collections.namedtuple` of internal calculations used to\n advance the chain.\n\n Raises:\n ValueError: if there isn't one `step_size` or a list with same length as\n `current_state`.\n \"\"\"\n with ops.name_scope(\n name, \"hmc_kernel\",\n [current_state, step_size, num_leapfrog_steps, seed,\n current_target_log_prob, current_grads_target_log_prob]):\n with ops.name_scope(\"initialize\"):\n [current_state_parts, step_sizes, current_target_log_prob,\n current_grads_target_log_prob] = _prepare_args(\n target_log_prob_fn, current_state, step_size,\n current_target_log_prob, current_grads_target_log_prob,\n maybe_expand=True)\n independent_chain_ndims = distributions_util.prefer_static_rank(\n current_target_log_prob)\n current_momentums = []\n for s in current_state_parts:\n current_momentums.append(random_ops.random_normal(\n shape=array_ops.shape(s),\n dtype=s.dtype.base_dtype,\n seed=seed))\n seed = distributions_util.gen_new_seed(\n seed, salt=\"hmc_kernel_momentums\")\n\n num_leapfrog_steps = ops.convert_to_tensor(\n num_leapfrog_steps,\n dtype=dtypes.int32,\n name=\"num_leapfrog_steps\")\n [\n proposed_momentums,\n proposed_state_parts,\n proposed_target_log_prob,\n proposed_grads_target_log_prob,\n ] = _leapfrog_integrator(current_momentums,\n target_log_prob_fn,\n current_state_parts,\n step_sizes,\n num_leapfrog_steps,\n current_target_log_prob,\n current_grads_target_log_prob)\n\n energy_change = _compute_energy_change(current_target_log_prob,\n current_momentums,\n proposed_target_log_prob,\n proposed_momentums,\n independent_chain_ndims)\n\n # u < exp(min(-energy, 0)), where u~Uniform[0,1)\n # ==> -log(u) >= max(e, 0)\n # ==> -log(u) >= e\n # (Perhaps surprisingly, we don't have a better way to obtain a random\n # uniform from positive reals, i.e., `tf.random_uniform(minval=0,\n # maxval=np.inf)` won't work.)\n random_uniform = random_ops.random_uniform(\n shape=array_ops.shape(energy_change),\n dtype=energy_change.dtype,\n seed=seed)\n random_positive = -math_ops.log(random_uniform)\n is_accepted = random_positive >= energy_change\n\n accepted_target_log_prob = array_ops.where(is_accepted,\n proposed_target_log_prob,\n current_target_log_prob)\n\n accepted_state_parts = [_choose(is_accepted,\n proposed_state_part,\n current_state_part,\n independent_chain_ndims)\n for current_state_part, proposed_state_part\n in zip(current_state_parts, proposed_state_parts)]\n\n accepted_grads_target_log_prob = [\n _choose(is_accepted,\n proposed_grad,\n grad,\n independent_chain_ndims)\n for proposed_grad, grad\n in zip(proposed_grads_target_log_prob, current_grads_target_log_prob)]\n\n maybe_flatten = lambda x: x if _is_list_like(current_state) else x[0]\n return [\n maybe_flatten(accepted_state_parts),\n KernelResults(\n acceptance_probs=math_ops.exp(math_ops.minimum(-energy_change, 0.)),\n current_grads_target_log_prob=accepted_grads_target_log_prob,\n current_target_log_prob=accepted_target_log_prob,\n energy_change=energy_change,\n is_accepted=is_accepted,\n proposed_grads_target_log_prob=proposed_grads_target_log_prob,\n proposed_state=maybe_flatten(proposed_state_parts),\n proposed_target_log_prob=proposed_target_log_prob,\n random_positive=random_positive,\n ),\n ]\n\n\ndef _leapfrog_integrator(current_momentums,\n target_log_prob_fn,\n current_state_parts,\n step_sizes,\n num_leapfrog_steps,\n current_target_log_prob=None,\n current_grads_target_log_prob=None,\n name=None):\n \"\"\"Applies `num_leapfrog_steps` of the leapfrog integrator.\n\n Assumes a simple quadratic kinetic energy function: `0.5 ||momentum||**2`.\n\n #### Examples:\n\n ##### Simple quadratic potential.\n\n ```python\n tfd = tf.contrib.distributions\n\n dims = 10\n num_iter = int(1e3)\n dtype = np.float32\n\n position = tf.placeholder(np.float32)\n momentum = tf.placeholder(np.float32)\n\n [\n new_momentums,\n new_positions,\n ] = hmc._leapfrog_integrator(\n current_momentums=[momentum],\n target_log_prob_fn=tfd.MultivariateNormalDiag(\n loc=tf.zeros(dims, dtype)).log_prob,\n current_state_parts=[position],\n step_sizes=0.1,\n num_leapfrog_steps=3)[:2]\n\n sess.graph.finalize() # No more graph building.\n\n momentum_ = np.random.randn(dims).astype(dtype)\n position_ = np.random.randn(dims).astype(dtype)\n\n positions = np.zeros([num_iter, dims], dtype)\n for i in xrange(num_iter):\n position_, momentum_ = sess.run(\n [new_momentums[0], new_position[0]],\n feed_dict={position: position_, momentum: momentum_})\n positions[i] = position_\n\n plt.plot(positions[:, 0]); # Sinusoidal.\n ```\n\n Args:\n current_momentums: Tensor containing the value(s) of the momentum\n variable(s) to update.\n target_log_prob_fn: Python callable which takes an argument like\n `*current_state_parts` and returns its (possibly unnormalized) log-density\n under the target distribution.\n current_state_parts: Python `list` of `Tensor`s representing the current\n state(s) of the Markov chain(s). The first `independent_chain_ndims` of\n the `Tensor`(s) index different chains.\n step_sizes: Python `list` of `Tensor`s representing the step size for the\n leapfrog integrator. Must broadcast with the shape of\n `current_state_parts`. Larger step sizes lead to faster progress, but\n too-large step sizes make rejection exponentially more likely. When\n possible, it's often helpful to match per-variable step sizes to the\n standard deviations of the target distribution in each variable.\n num_leapfrog_steps: Integer number of steps to run the leapfrog integrator\n for. Total progress per HMC step is roughly proportional to `step_size *\n num_leapfrog_steps`.\n current_target_log_prob: (Optional) `Tensor` representing the value of\n `target_log_prob_fn(*current_state_parts)`. The only reason to specify\n this argument is to reduce TF graph size.\n Default value: `None` (i.e., compute as needed).\n current_grads_target_log_prob: (Optional) Python list of `Tensor`s\n representing gradient of `target_log_prob_fn(*current_state_parts`) wrt\n `current_state_parts`. Must have same shape as `current_state_parts`. The\n only reason to specify this argument is to reduce TF graph size.\n Default value: `None` (i.e., compute as needed).\n name: Python `str` name prefixed to Ops created by this function.\n Default value: `None` (i.e., \"hmc_leapfrog_integrator\").\n\n Returns:\n proposed_momentums: Updated value of the momentum.\n proposed_state_parts: Tensor or Python list of `Tensor`s representing the\n state(s) of the Markov chain(s) at each result step. Has same shape as\n input `current_state_parts`.\n proposed_target_log_prob: `Tensor` representing the value of\n `target_log_prob_fn` at `accepted_state`.\n proposed_grads_target_log_prob: Gradient of `proposed_target_log_prob` wrt\n `accepted_state`.\n\n Raises:\n ValueError: if `len(momentums) != len(state_parts)`.\n ValueError: if `len(state_parts) != len(step_sizes)`.\n ValueError: if `len(state_parts) != len(grads_target_log_prob)`.\n TypeError: if `not target_log_prob.dtype.is_floating`.\n \"\"\"\n def _loop_body(step,\n current_momentums,\n current_state_parts,\n ignore_current_target_log_prob, # pylint: disable=unused-argument\n current_grads_target_log_prob):\n return [step + 1] + list(_leapfrog_step(current_momentums,\n target_log_prob_fn,\n current_state_parts,\n step_sizes,\n current_grads_target_log_prob))\n\n with ops.name_scope(\n name, \"hmc_leapfrog_integrator\",\n [current_momentums, current_state_parts, step_sizes, num_leapfrog_steps,\n current_target_log_prob, current_grads_target_log_prob]):\n if len(current_momentums) != len(current_state_parts):\n raise ValueError(\"`momentums` must be in one-to-one correspondence \"\n \"with `state_parts`\")\n num_leapfrog_steps = ops.convert_to_tensor(num_leapfrog_steps,\n name=\"num_leapfrog_steps\")\n current_target_log_prob, current_grads_target_log_prob = (\n _maybe_call_fn_and_grads(\n target_log_prob_fn,\n current_state_parts,\n current_target_log_prob,\n current_grads_target_log_prob))\n return control_flow_ops.while_loop(\n cond=lambda iter_, *args: iter_ < num_leapfrog_steps,\n body=_loop_body,\n loop_vars=[\n np.int32(0), # iter_\n current_momentums,\n current_state_parts,\n current_target_log_prob,\n current_grads_target_log_prob,\n ],\n back_prop=False)[1:] # Lop-off \"iter_\".\n\n\ndef _leapfrog_step(current_momentums,\n target_log_prob_fn,\n current_state_parts,\n step_sizes,\n current_grads_target_log_prob,\n name=None):\n \"\"\"Applies one step of the leapfrog integrator.\"\"\"\n with ops.name_scope(\n name, \"_leapfrog_step\",\n [current_momentums, current_state_parts, step_sizes,\n current_grads_target_log_prob]):\n proposed_momentums = [m + 0.5 * ss * g for m, ss, g\n in zip(current_momentums,\n step_sizes,\n current_grads_target_log_prob)]\n proposed_state_parts = [x + ss * m for x, ss, m\n in zip(current_state_parts,\n step_sizes,\n proposed_momentums)]\n proposed_target_log_prob = target_log_prob_fn(*proposed_state_parts)\n if not proposed_target_log_prob.dtype.is_floating:\n raise TypeError(\"`target_log_prob_fn` must produce a `Tensor` \"\n \"with `float` `dtype`.\")\n proposed_grads_target_log_prob = gradients_ops.gradients(\n proposed_target_log_prob, proposed_state_parts)\n if any(g is None for g in proposed_grads_target_log_prob):\n raise ValueError(\n \"Encountered `None` gradient. Does your target `target_log_prob_fn` \"\n \"access all `tf.Variable`s via `tf.get_variable`?\\n\"\n \" current_state_parts: {}\\n\"\n \" proposed_state_parts: {}\\n\"\n \" proposed_grads_target_log_prob: {}\".format(\n current_state_parts,\n proposed_state_parts,\n proposed_grads_target_log_prob))\n proposed_momentums = [m + 0.5 * ss * g for m, ss, g\n in zip(proposed_momentums,\n step_sizes,\n proposed_grads_target_log_prob)]\n return [\n proposed_momentums,\n proposed_state_parts,\n proposed_target_log_prob,\n proposed_grads_target_log_prob,\n ]\n\n\ndef _compute_energy_change(current_target_log_prob,\n current_momentums,\n proposed_target_log_prob,\n proposed_momentums,\n independent_chain_ndims,\n name=None):\n \"\"\"Helper to `kernel` which computes the energy change.\"\"\"\n with ops.name_scope(\n name, \"compute_energy_change\",\n ([current_target_log_prob, proposed_target_log_prob,\n independent_chain_ndims] +\n current_momentums + proposed_momentums)):\n # Abbreviate lk0=log_kinetic_energy and lk1=proposed_log_kinetic_energy\n # since they're a mouthful and lets us inline more.\n lk0, lk1 = [], []\n for current_momentum, proposed_momentum in zip(current_momentums,\n proposed_momentums):\n axis = math_ops.range(independent_chain_ndims,\n array_ops.rank(current_momentum))\n lk0.append(_log_sum_sq(current_momentum, axis))\n lk1.append(_log_sum_sq(proposed_momentum, axis))\n\n lk0 = -np.log(2.) + math_ops.reduce_logsumexp(array_ops.stack(lk0, axis=-1),\n axis=-1)\n lk1 = -np.log(2.) + math_ops.reduce_logsumexp(array_ops.stack(lk1, axis=-1),\n axis=-1)\n lp0 = -current_target_log_prob # log_potential\n lp1 = -proposed_target_log_prob # proposed_log_potential\n x = array_ops.stack([lp1, math_ops.exp(lk1), -lp0, -math_ops.exp(lk0)],\n axis=-1)\n\n # The sum is NaN if any element is NaN or we see both +Inf and -Inf.\n # Thus we will replace such rows with infinite energy change which implies\n # rejection. Recall that float-comparisons with NaN are always False.\n is_sum_determinate = (\n math_ops.reduce_all(math_ops.is_finite(x) | (x >= 0.), axis=-1) &\n math_ops.reduce_all(math_ops.is_finite(x) | (x <= 0.), axis=-1))\n is_sum_determinate = array_ops.tile(\n is_sum_determinate[..., array_ops.newaxis],\n multiples=array_ops.concat([\n array_ops.ones(array_ops.rank(is_sum_determinate),\n dtype=dtypes.int32),\n [4],\n ], axis=0))\n x = array_ops.where(is_sum_determinate,\n x,\n array_ops.fill(array_ops.shape(x),\n value=x.dtype.as_numpy_dtype(np.inf)))\n\n return math_ops.reduce_sum(x, axis=-1)\n\n\ndef _choose(is_accepted,\n accepted,\n rejected,\n independent_chain_ndims,\n name=None):\n \"\"\"Helper to `kernel` which expand_dims `is_accepted` to apply tf.where.\"\"\"\n def _expand_is_accepted_like(x):\n with ops.name_scope(\"_choose\"):\n expand_shape = array_ops.concat([\n array_ops.shape(is_accepted),\n array_ops.ones([array_ops.rank(x) - array_ops.rank(is_accepted)],\n dtype=dtypes.int32),\n ], axis=0)\n multiples = array_ops.concat([\n array_ops.ones([array_ops.rank(is_accepted)], dtype=dtypes.int32),\n array_ops.shape(x)[independent_chain_ndims:],\n ], axis=0)\n m = array_ops.tile(array_ops.reshape(is_accepted, expand_shape),\n multiples)\n m.set_shape(x.shape)\n return m\n with ops.name_scope(name, \"_choose\", values=[\n is_accepted, accepted, rejected, independent_chain_ndims]):\n return array_ops.where(_expand_is_accepted_like(accepted),\n accepted,\n rejected)\n\n\ndef _maybe_call_fn_and_grads(fn,\n fn_arg_list,\n fn_result=None,\n grads_fn_result=None,\n description=\"target_log_prob\"):\n \"\"\"Helper which computes `fn_result` and `grads` if needed.\"\"\"\n fn_arg_list = (list(fn_arg_list) if _is_list_like(fn_arg_list)\n else [fn_arg_list])\n if fn_result is None:\n fn_result = fn(*fn_arg_list)\n if not fn_result.dtype.is_floating:\n raise TypeError(\"`{}` must be a `Tensor` with `float` `dtype`.\".format(\n description))\n if grads_fn_result is None:\n grads_fn_result = gradients_ops.gradients(\n fn_result, fn_arg_list)\n if len(fn_arg_list) != len(grads_fn_result):\n raise ValueError(\"`{}` must be in one-to-one correspondence with \"\n \"`grads_{}`\".format(*[description]*2))\n if any(g is None for g in grads_fn_result):\n raise ValueError(\"Encountered `None` gradient.\")\n return fn_result, grads_fn_result\n\n\ndef _prepare_args(target_log_prob_fn, state, step_size,\n target_log_prob=None, grads_target_log_prob=None,\n maybe_expand=False, description=\"target_log_prob\"):\n \"\"\"Helper which processes input args to meet list-like assumptions.\"\"\"\n state_parts = list(state) if _is_list_like(state) else [state]\n state_parts = [ops.convert_to_tensor(s, name=\"state\")\n for s in state_parts]\n target_log_prob, grads_target_log_prob = _maybe_call_fn_and_grads(\n target_log_prob_fn,\n state_parts,\n target_log_prob,\n grads_target_log_prob,\n description)\n step_sizes = list(step_size) if _is_list_like(step_size) else [step_size]\n step_sizes = [\n ops.convert_to_tensor(\n s, name=\"step_size\", dtype=target_log_prob.dtype)\n for s in step_sizes]\n if len(step_sizes) == 1:\n step_sizes *= len(state_parts)\n if len(state_parts) != len(step_sizes):\n raise ValueError(\"There should be exactly one `step_size` or it should \"\n \"have same length as `current_state`.\")\n maybe_flatten = lambda x: x if maybe_expand or _is_list_like(state) else x[0]\n return [\n maybe_flatten(state_parts),\n maybe_flatten(step_sizes),\n target_log_prob,\n grads_target_log_prob,\n ]\n\n\ndef _is_list_like(x):\n \"\"\"Helper which returns `True` if input is `list`-like.\"\"\"\n return isinstance(x, (tuple, list))\n\n\ndef _log_sum_sq(x, axis=None):\n \"\"\"Computes log(sum(x**2)).\"\"\"\n return math_ops.reduce_logsumexp(2. * math_ops.log(math_ops.abs(x)), axis)\n"
] | [
[
"tensorflow.python.ops.array_ops.sequence_mask",
"tensorflow.python.ops.nn_ops.bias_add_grad",
"tensorflow.python.ops.array_ops.expand_dims",
"tensorflow.python.ops.array_ops.reshape",
"tensorflow.python.platform.resource_loader.get_path_to_datafile",
"tensorflow.python.ops.math_ops.matmul",
"tensorflow.python.ops.array_ops.slice",
"tensorflow.python.ops.math_ops.range",
"tensorflow.python.ops.init_ops.constant_initializer",
"tensorflow.python.ops.array_ops.unstack",
"tensorflow.python.ops.array_ops.zeros",
"tensorflow.python.ops.array_ops.shape",
"tensorflow.contrib.rnn.ops.gen_lstm_ops.block_lstm",
"tensorflow.python.ops.array_ops.constant",
"tensorflow.python.framework.ops.convert_to_tensor",
"tensorflow.contrib.rnn.ops.gen_lstm_ops.lstm_block_cell",
"tensorflow.python.layers.base.InputSpec",
"tensorflow.python.framework.ops.RegisterGradient",
"tensorflow.python.ops.math_ops.reduce_max",
"tensorflow.python.ops.array_ops.stack",
"tensorflow.python.ops.array_ops.concat",
"tensorflow.python.ops.rnn_cell_impl.LSTMStateTuple",
"tensorflow.python.ops.math_ops.to_int64"
],
[
"tensorflow.python.ops.math_ops.equal",
"tensorflow.python.ops.distributions.util.prefer_static_rank",
"tensorflow.python.ops.gradients_impl.gradients",
"numpy.log",
"tensorflow.python.ops.math_ops.is_finite",
"tensorflow.python.ops.math_ops.range",
"tensorflow.python.ops.array_ops.rank",
"tensorflow.python.ops.math_ops.minimum",
"tensorflow.python.ops.math_ops.reduce_sum",
"tensorflow.python.ops.array_ops.shape",
"tensorflow.python.ops.control_flow_ops.while_loop",
"numpy.int32",
"tensorflow.python.ops.math_ops.cast",
"tensorflow.python.ops.functional_ops.scan",
"tensorflow.python.framework.ops.convert_to_tensor",
"tensorflow.python.ops.math_ops.abs",
"tensorflow.python.ops.array_ops.zeros_like",
"tensorflow.python.ops.math_ops.log",
"tensorflow.python.framework.ops.name_scope",
"tensorflow.python.ops.array_ops.ones_like",
"tensorflow.python.ops.array_ops.stack",
"tensorflow.python.ops.array_ops.where",
"tensorflow.python.ops.array_ops.reshape",
"tensorflow.python.ops.distributions.util.gen_new_seed",
"tensorflow.python.ops.math_ops.exp"
]
] |
DEVESHTARASIA/big-data-tutorial | [
"74e2aa1241c30913c5f12b9667f9d626002b98a2"
] | [
"tutorial/helpers.py"
] | [
"\"\"\"\nSmall helpers for code that is not shown in the notebooks\n\"\"\"\n\nfrom sklearn import neighbors, datasets, linear_model\nimport pylab as pl\nimport numpy as np\nfrom matplotlib.colors import ListedColormap\n\n# Create color maps for 3-class classification problem, as with iris\ncmap_light = ListedColormap(['#FFAAAA', '#AAFFAA', '#AAAAFF'])\ncmap_bold = ListedColormap(['#FF0000', '#00FF00', '#0000FF'])\n\ndef plot_iris_knn():\n iris = datasets.load_iris()\n X = iris.data[:, :2] # we only take the first two features. We could\n # avoid this ugly slicing by using a two-dim dataset\n y = iris.target\n\n knn = neighbors.KNeighborsClassifier(n_neighbors=3)\n knn.fit(X, y)\n\n x_min, x_max = X[:, 0].min() - .1, X[:, 0].max() + .1\n y_min, y_max = X[:, 1].min() - .1, X[:, 1].max() + .1\n xx, yy = np.meshgrid(np.linspace(x_min, x_max, 100),\n np.linspace(y_min, y_max, 100))\n Z = knn.predict(np.c_[xx.ravel(), yy.ravel()])\n\n # Put the result into a color plot\n Z = Z.reshape(xx.shape)\n pl.figure()\n pl.pcolormesh(xx, yy, Z, cmap=cmap_light)\n\n # Plot also the training points\n pl.scatter(X[:, 0], X[:, 1], c=y, cmap=cmap_bold)\n pl.xlabel('sepal length (cm)')\n pl.ylabel('sepal width (cm)')\n pl.axis('tight')\n\n\ndef plot_polynomial_regression():\n rng = np.random.RandomState(0)\n x = 2*rng.rand(100) - 1\n\n f = lambda t: 1.2 * t**2 + .1 * t**3 - .4 * t **5 - .5 * t ** 9\n y = f(x) + .4 * rng.normal(size=100)\n\n x_test = np.linspace(-1, 1, 100)\n\n pl.figure()\n pl.scatter(x, y, s=4)\n\n X = np.array([x**i for i in range(5)]).T\n X_test = np.array([x_test**i for i in range(5)]).T\n regr = linear_model.LinearRegression()\n regr.fit(X, y)\n pl.plot(x_test, regr.predict(X_test), label='4th order')\n\n X = np.array([x**i for i in range(10)]).T\n X_test = np.array([x_test**i for i in range(10)]).T\n regr = linear_model.LinearRegression()\n regr.fit(X, y)\n pl.plot(x_test, regr.predict(X_test), label='9th order')\n\n pl.legend(loc='best')\n pl.axis('tight')\n pl.title('Fitting a 4th and a 9th order polynomial')\n\n pl.figure()\n pl.scatter(x, y, s=4)\n pl.plot(x_test, f(x_test), label=\"truth\")\n pl.axis('tight')\n pl.title('Ground truth (9th order polynomial)')"
] | [
[
"matplotlib.colors.ListedColormap",
"sklearn.linear_model.LinearRegression",
"numpy.random.RandomState",
"sklearn.neighbors.KNeighborsClassifier",
"numpy.linspace",
"sklearn.datasets.load_iris"
]
] |
cosmic-cortex/torchkit | [
"9f44c8a500a4345d81feac14b6b200c5d190283a"
] | [
"torchkit/models/vision/segmentation/unet.py"
] | [
"import torch\r\nimport torch.nn as nn\r\nimport torch.nn.functional as F\r\nfrom torch.autograd import Variable\r\n\r\n\r\ndef pad_to_shape(this, shp):\r\n \"\"\"\r\n Not a very safe function.\r\n \"\"\"\r\n return F.pad(this, (0, shp[3] - this.shape[3], 0, shp[2] - this.shape[2]))\r\n\r\n\r\nclass First(nn.Module):\r\n def __init__(self, in_channels, middle_channels, out_channels, dropout=False):\r\n super(First, self).__init__()\r\n\r\n layers = [\r\n nn.Conv2d(in_channels, middle_channels, kernel_size=3, padding=1),\r\n nn.BatchNorm2d(middle_channels),\r\n nn.ReLU(inplace=True),\r\n nn.Conv2d(middle_channels, out_channels, kernel_size=3, padding=1),\r\n nn.BatchNorm2d(out_channels),\r\n nn.ReLU(inplace=True)\r\n ]\r\n\r\n if dropout:\r\n assert 0 <= dropout <= 1, 'dropout must be between 0 and 1'\r\n layers.append(nn.Dropout2d(p=dropout))\r\n\r\n self.first = nn.Sequential(*layers)\r\n\r\n def forward(self, x):\r\n return self.first(x)\r\n\r\n\r\nclass Encoder(nn.Module):\r\n def __init__(\r\n self, in_channels, middle_channels, out_channels,\r\n dropout=False, downsample_kernel=2\r\n ):\r\n super(Encoder, self).__init__()\r\n\r\n layers = [\r\n nn.MaxPool2d(kernel_size=downsample_kernel),\r\n nn.Conv2d(in_channels, middle_channels, kernel_size=3, padding=1),\r\n nn.BatchNorm2d(middle_channels),\r\n nn.ReLU(inplace=True),\r\n nn.Conv2d(middle_channels, out_channels, kernel_size=3, padding=1),\r\n nn.BatchNorm2d(out_channels),\r\n nn.ReLU(inplace=True)\r\n ]\r\n\r\n if dropout:\r\n assert 0 <= dropout <= 1, 'dropout must be between 0 and 1'\r\n layers.append(nn.Dropout2d(p=dropout))\r\n\r\n self.encoder = nn.Sequential(*layers)\r\n\r\n def forward(self, x):\r\n return self.encoder(x)\r\n\r\n\r\nclass Center(nn.Module):\r\n def __init__(self, in_channels, middle_channels, out_channels, deconv_channels, dropout=False):\r\n super(Center, self).__init__()\r\n\r\n layers = [\r\n nn.MaxPool2d(kernel_size=2),\r\n nn.Conv2d(in_channels, middle_channels, kernel_size=3, padding=1),\r\n nn.BatchNorm2d(middle_channels),\r\n nn.ReLU(inplace=True),\r\n nn.Conv2d(middle_channels, out_channels, kernel_size=3, padding=1),\r\n nn.BatchNorm2d(out_channels),\r\n nn.ReLU(inplace=True),\r\n nn.ConvTranspose2d(out_channels, deconv_channels, kernel_size=2, stride=2)\r\n ]\r\n\r\n if dropout:\r\n assert 0 <= dropout <= 1, 'dropout must be between 0 and 1'\r\n layers.append(nn.Dropout2d(p=dropout))\r\n\r\n self.center = nn.Sequential(*layers)\r\n\r\n def forward(self, x):\r\n return self.center(x)\r\n\r\n\r\nclass Decoder(nn.Module):\r\n def __init__(self, in_channels, middle_channels, out_channels, deconv_channels, dropout=False):\r\n super(Decoder, self).__init__()\r\n\r\n layers = [\r\n nn.Conv2d(in_channels, middle_channels, kernel_size=3, padding=1),\r\n nn.BatchNorm2d(middle_channels),\r\n nn.ReLU(inplace=True),\r\n nn.Conv2d(middle_channels, out_channels, kernel_size=3, padding=1),\r\n nn.BatchNorm2d(out_channels),\r\n nn.ReLU(inplace=True),\r\n nn.ConvTranspose2d(out_channels, deconv_channels, kernel_size=2, stride=2)\r\n ]\r\n\r\n if dropout:\r\n assert 0 <= dropout <= 1, 'dropout must be between 0 and 1'\r\n layers.append(nn.Dropout2d(p=dropout))\r\n\r\n self.decoder = nn.Sequential(*layers)\r\n\r\n def forward(self, x):\r\n return self.decoder(x)\r\n\r\n\r\nclass Last(nn.Module):\r\n def __init__(self, in_channels, middle_channels, out_channels, softmax=False):\r\n super(Last, self).__init__()\r\n\r\n layers = [\r\n nn.Conv2d(in_channels, middle_channels, kernel_size=3, padding=1),\r\n nn.BatchNorm2d(middle_channels),\r\n nn.ReLU(inplace=True),\r\n nn.Conv2d(middle_channels, middle_channels, kernel_size=3, padding=1),\r\n nn.BatchNorm2d(middle_channels),\r\n nn.ReLU(inplace=True),\r\n nn.Conv2d(middle_channels, out_channels, kernel_size=1),\r\n nn.Sigmoid()\r\n ]\r\n\r\n if softmax:\r\n layers.append(nn.Softmax2d())\r\n\r\n self.first = nn.Sequential(*layers)\r\n\r\n def forward(self, x):\r\n return self.first(x)\r\n\r\n\r\nclass UNet(nn.Module):\r\n def __init__(self, in_channels, out_channels, softmax=False):\r\n super(UNet, self).__init__()\r\n self.first = First(in_channels, 64, 64)\r\n self.encoder_1 = Encoder(64, 128, 128)\r\n self.encoder_2 = Encoder(128, 256, 256)\r\n self.encoder_3 = Encoder(256, 512, 512)\r\n self.center = Center(512, 1024, 1024, 512)\r\n self.decoder_3 = Decoder(1024, 512, 512, 256)\r\n self.decoder_2 = Decoder(512, 256, 256, 128)\r\n self.decoder_1 = Decoder(256, 128, 128, 64)\r\n self.last = Last(128, 64, out_channels, softmax=softmax)\r\n\r\n def forward(self, x):\r\n x_first = self.first(x)\r\n x_enc_1 = self.encoder_1(x_first)\r\n x_enc_2 = self.encoder_2(x_enc_1)\r\n x_enc_3 = self.encoder_3(x_enc_2)\r\n x_cent = self.center(x_enc_3)\r\n x_dec_3 = self.decoder_3(torch.cat([pad_to_shape(x_cent, x_enc_3.shape), x_enc_3], dim=1))\r\n x_dec_2 = self.decoder_2(torch.cat([pad_to_shape(x_dec_3, x_enc_2.shape), x_enc_2], dim=1))\r\n x_dec_1 = self.decoder_1(torch.cat([pad_to_shape(x_dec_2, x_enc_1.shape), x_enc_1], dim=1))\r\n return self.last(torch.cat([pad_to_shape(x_dec_1, x_first.shape), x_first], dim=1))\r\n\r\n\r\nif __name__ == '__main__':\r\n pass\r\n"
] | [
[
"torch.nn.BatchNorm2d",
"torch.nn.MaxPool2d",
"torch.nn.Dropout2d",
"torch.nn.Softmax2d",
"torch.nn.functional.pad",
"torch.nn.Conv2d",
"torch.nn.Sequential",
"torch.nn.Sigmoid",
"torch.nn.ReLU",
"torch.nn.ConvTranspose2d"
]
] |
romquentin/decod_WM_Selection_and_maintenance | [
"fc1bf2f21959795fbea731f642cc750c2b61bce2"
] | [
"run_decoding/run_decoding_WM_across_epochs_and_conditions.py"
] | [
"\"\"\"Run decoding analyses in sensors space accross memory content and\nvisual perception for the working memory task and save decoding performance\"\"\"\n\n# Authors: Romain Quentin <rom.quentin@gmail.com>\n# Jean-Remi King <jeanremi.king@gmail.com>\n#\n# License: BSD (3-clause)\n\nimport os\nimport os.path as op\nimport numpy as np\nimport mne\nfrom h5io import read_hdf5\nfrom mne.decoding import GeneralizingEstimator, LinearModel\nfrom sklearn.pipeline import make_pipeline\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.linear_model import Ridge\nfrom sklearn.metrics import make_scorer\nfrom sklearn.model_selection import StratifiedKFold\nfrom jr.gat import (AngularRegression, scorer_spearman,\n scorer_angle)\nfrom base import (complete_behavior, get_events_interactions)\nfrom config import path_data\nimport sys\nsubject = sys.argv[1] # read a swarm file for parralel computing on biowulf\n\noutput_folder = '/sensors_accross_epochs_and_conditions/'\n# Create result folder\nresults_folder = op.join(path_data + 'results/' + subject + output_folder)\nif not os.path.exists(results_folder):\n os.makedirs(results_folder)\n\n# read behavior\nfname = op.join(path_data, subject, 'behavior_Target.hdf5')\nevents = read_hdf5(fname)\nevents = complete_behavior(events)\nevents = get_events_interactions(events)\n# read stimulus epochs\nfname = op.join(path_data, subject, 'epochs_Target.fif')\nepochs_target = mne.read_epochs(fname)\nepochs_target.pick_types(meg=True, ref_meg=False)\nepochs_target.crop(-0.2, 0.9)\n# read cue epochs\nfname = op.join(path_data, subject, 'epochs_Cue.fif')\nepochs_cue = mne.read_epochs(fname)\nepochs_cue.pick_types(meg=True, ref_meg=False)\nepochs_cue.crop(0, 1.5)\n# read probe epochs\nfname = op.join(path_data, subject, 'epochs_Probe.fif')\nepochs_probe = mne.read_epochs(fname)\nepochs_probe.pick_types(meg=True, ref_meg=False)\nepochs_probe.crop(0, 0.9)\n# Concatenate the data of the three epochs\nX0 = epochs_target._data\nX1 = epochs_cue._data\nX2 = epochs_probe._data\nX = np.concatenate((X0, X1, X2), axis=2)\n\n# Define pair of analyses (train on the 2nd and test on the 1st )\npaired_analyses = [['target_sfreq_cue_left_sfreq', 'left_sfreq'],\n ['target_sfreq_cue_right_sfreq', 'right_sfreq'],\n ['left_sfreq', 'target_sfreq_cue_left_sfreq'],\n ['right_sfreq', 'target_sfreq_cue_right_sfreq'],\n ['target_angle_cue_left_angle', 'left_angle'],\n ['target_angle_cue_right_angle', 'right_angle'],\n ['left_angle', 'target_angle_cue_left_angle'],\n ['right_angle', 'target_angle_cue_right_angle']]\n# Loop across each pair of analyses\nfor paired_analysis in paired_analyses:\n y_test = np.array(events[paired_analysis[0]])\n y_train = np.array(events[paired_analysis[1]])\n # Define estimators depending on the analysis\n if 'angle' in paired_analysis[0][:14]:\n clf = make_pipeline(StandardScaler(),\n LinearModel(AngularRegression(Ridge(),\n independent=False)))\n scorer = scorer_angle\n kwargs = dict()\n gat = GeneralizingEstimator(clf, scoring=make_scorer(scorer),\n n_jobs=24, **kwargs)\n y_test = np.array(y_test, dtype=float)\n y_train = np.array(y_train, dtype=float)\n elif 'sfreq' in paired_analysis[0][:14]:\n clf = make_pipeline(StandardScaler(), LinearModel(Ridge()))\n scorer = scorer_spearman\n kwargs = dict()\n gat = GeneralizingEstimator(clf, scoring=make_scorer(scorer),\n n_jobs=24, **kwargs)\n y_test = np.array(y_test, dtype=float)\n y_train = np.array(y_train, dtype=float)\n # only consider trials with correct fixation\n sel = np.where(events['is_eye_fixed'] == 1)[0]\n y_train = y_train[sel]\n y_test = y_test[sel]\n X = np.concatenate((X0, X1, X2), axis=2)\n X = X[sel]\n # only consider non NaN values\n # Run decoding accross condition\n cv = StratifiedKFold(7)\n scores = list()\n scs = list()\n if np.isnan(y_train).any():\n sel = np.where(~np.isnan(y_train))[0]\n for train, test in cv.split(X[sel], y_train[sel]):\n gat.fit(X[sel][train], y_train[sel][train])\n score = gat.score(X[sel][test], y_test[sel][test])\n sc = gat.score(X[sel][test], y_train[sel][test]) # test on same\n scores.append(score)\n scs.append(sc)\n scores = np.mean(scores, axis=0)\n scs = np.mean(scs, axis=0)\n else:\n for train, test in cv.split(X, y_train):\n y_te = y_test[test]\n X_te = X[test]\n y_te = y_te[np.where(~np.isnan(y_te))[0]]\n X_te = X_te[np.where(~np.isnan(y_te))[0]]\n y_tr = y_train[train]\n X_tr = X[train]\n y_tr = y_tr[np.where(~np.isnan(y_tr))[0]]\n X_tr = X_tr[np.where(~np.isnan(y_tr))[0]]\n y_tr_te = y_train[test]\n X_tr_te = X[test]\n y_tr_te = y_tr_te[np.where(~np.isnan(y_tr_te))[0]]\n X_tr_te = X_tr_te[np.where(~np.isnan(y_tr_te))[0]]\n gat.fit(X_tr, y_tr)\n score = gat.score(X_te, y_te)\n sc = gat.score(X_tr_te, y_tr_te) # test on same\n scores.append(score)\n scs.append(sc)\n scores = np.mean(scores, axis=0)\n scs = np.mean(scs, axis=0)\n\n # save cross-validated scores\n fname = results_folder +\\\n '%s_scores_%s_cross_%s.npy' % (subject,\n paired_analysis[0],\n paired_analysis[1])\n np.save(fname, np.array(scores)) # save accross condition scores\n fname = results_folder +\\\n '%s_scores_%s.npy' % (subject, paired_analysis[1])\n np.save(fname, np.array(scs)) # save scores test/train on same condition\n"
] | [
[
"sklearn.model_selection.StratifiedKFold",
"sklearn.linear_model.Ridge",
"numpy.where",
"sklearn.metrics.make_scorer",
"sklearn.preprocessing.StandardScaler",
"numpy.array",
"numpy.concatenate",
"numpy.isnan",
"numpy.mean"
]
] |
Pandinosaurus/dsbox | [
"aea56049025ed7e6e66427f8636286f8be1b6e03"
] | [
"dsbox/ml/visualization/metrics.py"
] | [
"import matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\n\nfrom sklearn.metrics import roc_curve, auc\n\n__author__ = \"Aurélien Massiot\"\n__credits__ = \"https://github.com/octo-technology/bdacore\"\n__license__ = \"Apache 2.0\"\n\n\ndef plot_confusion_matrix(confusion_matrix, classes_list, normalize=True, figsize=(10, 7), fontsize=14, cmap=\"Blues\"):\n \"\"\"\n Display a pretty confusion matrix.\n\n Parameters\n ----------\n confusion_matrix : array-like\n\n classes_list : list,\n classes list of the confusion matrix\n\n normalize : boolean,\n normalize confusion matrix\n\n figsize : tuple, optional (default=(10,7))\n set the figure size\n\n fontsize : int, optional (default=14)\n set the font size\n\n cmap : str, optional (default=\"Blues\")\n set the colormap\n\n Returns\n -------\n Confusion matrix figure\n\n\n Examples\n --------\n >>> from dsbox.ml.visualization.metrics import plot_confusion_matrix\n >>> array = [[ 8458, 227, 1730], \\\n [ 1073, 37590, 1613], \\\n [ 2390, 1159, 17540]]\n >>> classes_list = [\"A\", \"B\", \"C\"]\n >>> plot_confusion_matrix(array, classes_list)\n \"\"\"\n confusion_matrix = np.array(confusion_matrix)\n\n fig, ax = plt.subplots(figsize=figsize)\n\n if normalize:\n normalized_cm = np.array(confusion_matrix).astype('float') / np.array(confusion_matrix).sum(axis=1)[:,\n np.newaxis]\n df_cm = pd.DataFrame(\n normalized_cm, index=classes_list, columns=classes_list,\n )\n plt.matshow(df_cm, fignum=0, cmap=cmap)\n else:\n df_cm = pd.DataFrame(\n confusion_matrix, index=classes_list, columns=classes_list,\n )\n plt.matshow(df_cm, fignum=0, cmap=cmap)\n ax.set_xticks(np.arange(len(classes_list)))\n ax.set_yticks(np.arange(len(classes_list)))\n ax.set_xticklabels(classes_list)\n ax.set_yticklabels(classes_list)\n plt.setp(ax.get_xticklabels(), rotation=45, ha=\"right\", rotation_mode=\"anchor\")\n\n for i in range(len(classes_list)):\n for j in range(len(classes_list)):\n ax.text(j, i, confusion_matrix[i, j], ha=\"center\", va=\"center\", color=\"grey\", fontsize=fontsize)\n\n plt.ylabel('True labels')\n plt.xlabel('Predicted labels')\n plt.show()\n\n\ndef plot_roc_curve(y_test, y_pred_probas, proba_step=None):\n \"\"\"\n Plot ROC curve with probabilities thresholds.\n \n Parameters\n ----------\n y_test : array-like\n true labels\n \n y_pred_probas : array-like\n predicted labels\n \n proba_step : int (optional) (default=None)\n if set, give the step for each probability display. If None, nothing is displayed.\n\n Examples\n --------\n \n >>> from dsbox.ml.visualization.metrics import plot_roc_curve\n >>> from sklearn import datasets\n >>> from sklearn.model_selection import train_test_split\n >>> from sklearn.ensemble import RandomForestClassifier\n \n >>> X, y = datasets.make_moons(noise=0.3, random_state=0)\n >>> X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.5, random_state=0)\n \n >>> clf = RandomForestClassifier(n_estimators=10, random_state=42)\n >>> _ = clf.fit(X_train, y_train)\n >>> y_pred_probas = clf.predict_proba(X_test)\n \n >>> plot_roc_curve(y_test, y_pred_probas, proba_step=2)\n\n \"\"\"\n fpr, tpr, thresholds = roc_curve(y_test, y_pred_probas[:, 1])\n auc_score = auc(fpr, tpr)\n\n plt.figure()\n lw = 1\n plt.plot(fpr, tpr, color='darkorange', lw=lw, marker='.')\n plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')\n if proba_step is not None:\n i = 0\n for x, y, txt in zip(fpr, tpr, thresholds):\n if i % proba_step == 0:\n plt.annotate(np.round(txt, 2), (x, y - 0.04), color='darkgray', fontsize=8)\n i += 1\n plt.xlim([0.0, 1.0])\n plt.ylim([0.0, 1.05])\n plt.xlabel('False Positive Rate')\n plt.ylabel('True Positive Rate')\n plt.title('Receiver operating characteristic (ROC) - AUC score: {}'.format(str(np.round(auc_score,3))))\n plt.show()\n"
] | [
[
"sklearn.metrics.roc_curve",
"matplotlib.pyplot.figure",
"sklearn.metrics.auc",
"pandas.DataFrame",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.matshow",
"matplotlib.pyplot.ylim",
"numpy.array",
"matplotlib.pyplot.plot",
"numpy.round",
"matplotlib.pyplot.xlabel"
]
] |
aschueth/MetPy | [
"5e906c0fcfadccdc8514011d15d911243130d405"
] | [
"src/metpy/calc/thermo.py"
] | [
"# Copyright (c) 2008,2015,2016,2017,2018,2019 MetPy Developers.\n# Distributed under the terms of the BSD 3-Clause License.\n# SPDX-License-Identifier: BSD-3-Clause\n\"\"\"Contains a collection of thermodynamic calculations.\"\"\"\nimport warnings\n\nimport numpy as np\nimport scipy.integrate as si\nimport scipy.optimize as so\n\nfrom .tools import (_greater_or_close, _less_or_close, _remove_nans, find_bounding_indices,\n find_intersections, first_derivative, get_layer)\nfrom .. import constants as mpconsts\nfrom ..cbook import broadcast_indices\nfrom ..interpolate.one_dimension import interpolate_1d\nfrom ..package_tools import Exporter\nfrom ..units import check_units, concatenate, units\nfrom ..xarray import preprocess_xarray\n\nexporter = Exporter(globals())\n\nsat_pressure_0c = 6.112 * units.millibar\n\n\n@exporter.export\n@preprocess_xarray\n@check_units('[temperature]', '[temperature]')\ndef relative_humidity_from_dewpoint(temperature, dewpoint):\n r\"\"\"Calculate the relative humidity.\n\n Uses temperature and dewpoint in celsius to calculate relative\n humidity using the ratio of vapor pressure to saturation vapor pressures.\n\n Parameters\n ----------\n temperature : `pint.Quantity`\n air temperature\n dewpoint : `pint.Quantity`\n dewpoint temperature\n\n Returns\n -------\n `pint.Quantity`\n relative humidity\n\n See Also\n --------\n saturation_vapor_pressure\n\n \"\"\"\n e = saturation_vapor_pressure(dewpoint)\n e_s = saturation_vapor_pressure(temperature)\n return (e / e_s)\n\n\n@exporter.export\n@preprocess_xarray\n@check_units('[pressure]', '[pressure]')\ndef exner_function(pressure, reference_pressure=mpconsts.P0):\n r\"\"\"Calculate the Exner function.\n\n .. math:: \\Pi = \\left( \\frac{p}{p_0} \\right)^\\kappa\n\n This can be used to calculate potential temperature from temperature (and visa-versa),\n since\n\n .. math:: \\Pi = \\frac{T}{\\theta}\n\n Parameters\n ----------\n pressure : `pint.Quantity`\n total atmospheric pressure\n reference_pressure : `pint.Quantity`, optional\n The reference pressure against which to calculate the Exner function, defaults to\n metpy.constants.P0\n\n Returns\n -------\n `pint.Quantity`\n The value of the Exner function at the given pressure\n\n See Also\n --------\n potential_temperature\n temperature_from_potential_temperature\n\n \"\"\"\n return (pressure / reference_pressure).to('dimensionless')**mpconsts.kappa\n\n\n@exporter.export\n@preprocess_xarray\n@check_units('[pressure]', '[temperature]')\ndef potential_temperature(pressure, temperature):\n r\"\"\"Calculate the potential temperature.\n\n Uses the Poisson equation to calculation the potential temperature\n given `pressure` and `temperature`.\n\n Parameters\n ----------\n pressure : `pint.Quantity`\n total atmospheric pressure\n temperature : `pint.Quantity`\n air temperature\n\n Returns\n -------\n `pint.Quantity`\n The potential temperature corresponding to the temperature and\n pressure.\n\n See Also\n --------\n dry_lapse\n\n Notes\n -----\n Formula:\n\n .. math:: \\Theta = T (P_0 / P)^\\kappa\n\n Examples\n --------\n >>> from metpy.units import units\n >>> metpy.calc.potential_temperature(800. * units.mbar, 273. * units.kelvin)\n <Quantity(290.9665329591884, 'kelvin')>\n\n \"\"\"\n return temperature / exner_function(pressure)\n\n\n@exporter.export\n@preprocess_xarray\n@check_units('[pressure]', '[temperature]')\ndef temperature_from_potential_temperature(pressure, potential_temperature):\n r\"\"\"Calculate the temperature from a given potential temperature.\n\n Uses the inverse of the Poisson equation to calculate the temperature from a\n given potential temperature at a specific pressure level.\n\n Parameters\n ----------\n pressure : `pint.Quantity`\n total atmospheric pressure\n potential_temperature : `pint.Quantity`\n potential temperature\n\n Returns\n -------\n `pint.Quantity`\n The temperature corresponding to the potential temperature and pressure.\n\n See Also\n --------\n dry_lapse\n potential_temperature\n\n Notes\n -----\n Formula:\n\n .. math:: T = \\Theta (P / P_0)^\\kappa\n\n Examples\n --------\n >>> from metpy.units import units\n >>> from metpy.calc import temperature_from_potential_temperature\n >>> # potential temperature\n >>> theta = np.array([ 286.12859679, 288.22362587]) * units.kelvin\n >>> p = 850 * units.mbar\n >>> T = temperature_from_potential_temperature(p, theta)\n\n \"\"\"\n return potential_temperature * exner_function(pressure)\n\n\n@exporter.export\n@preprocess_xarray\n@check_units('[pressure]', '[temperature]', '[pressure]')\ndef dry_lapse(pressure, temperature, reference_pressure=None):\n r\"\"\"Calculate the temperature at a level assuming only dry processes.\n\n This function lifts a parcel starting at `temperature`, conserving\n potential temperature. The starting pressure can be given by `reference_pressure`.\n\n Parameters\n ----------\n pressure : `pint.Quantity`\n The atmospheric pressure level(s) of interest\n temperature : `pint.Quantity`\n The starting temperature\n reference_pressure : `pint.Quantity`, optional\n The reference pressure. If not given, it defaults to the first element of the\n pressure array.\n\n Returns\n -------\n `pint.Quantity`\n The resulting parcel temperature at levels given by `pressure`\n\n See Also\n --------\n moist_lapse : Calculate parcel temperature assuming liquid saturation processes\n parcel_profile : Calculate complete parcel profile\n potential_temperature\n\n \"\"\"\n if reference_pressure is None:\n reference_pressure = pressure[0]\n return temperature * (pressure / reference_pressure)**mpconsts.kappa\n\n\n@exporter.export\n@preprocess_xarray\n@check_units('[pressure]', '[temperature]', '[pressure]')\ndef moist_lapse(pressure, temperature, reference_pressure=None):\n r\"\"\"Calculate the temperature at a level assuming liquid saturation processes.\n\n This function lifts a parcel starting at `temperature`. The starting pressure can\n be given by `reference_pressure`. Essentially, this function is calculating moist\n pseudo-adiabats.\n\n Parameters\n ----------\n pressure : `pint.Quantity`\n The atmospheric pressure level(s) of interest\n temperature : `pint.Quantity`\n The starting temperature\n reference_pressure : `pint.Quantity`, optional\n The reference pressure. If not given, it defaults to the first element of the\n pressure array.\n\n Returns\n -------\n `pint.Quantity`\n The temperature corresponding to the starting temperature and\n pressure levels.\n\n See Also\n --------\n dry_lapse : Calculate parcel temperature assuming dry adiabatic processes\n parcel_profile : Calculate complete parcel profile\n\n Notes\n -----\n This function is implemented by integrating the following differential\n equation:\n\n .. math:: \\frac{dT}{dP} = \\frac{1}{P} \\frac{R_d T + L_v r_s}\n {C_{pd} + \\frac{L_v^2 r_s \\epsilon}{R_d T^2}}\n\n This equation comes from [Bakhshaii2013]_.\n\n \"\"\"\n def dt(t, p):\n t = units.Quantity(t, temperature.units)\n p = units.Quantity(p, pressure.units)\n rs = saturation_mixing_ratio(p, t)\n frac = ((mpconsts.Rd * t + mpconsts.Lv * rs)\n / (mpconsts.Cp_d + (mpconsts.Lv * mpconsts.Lv * rs * mpconsts.epsilon\n / (mpconsts.Rd * t * t)))).to('kelvin')\n return (frac / p).magnitude\n\n if reference_pressure is None:\n reference_pressure = pressure[0]\n\n pressure = pressure.to('mbar')\n reference_pressure = reference_pressure.to('mbar')\n temperature = np.atleast_1d(temperature)\n\n side = 'left'\n\n pres_decreasing = (pressure[0] > pressure[-1])\n if pres_decreasing:\n # Everything is easier if pressures are in increasing order\n pressure = pressure[::-1]\n side = 'right'\n\n ref_pres_idx = np.searchsorted(pressure.m, reference_pressure.m, side=side)\n\n ret_temperatures = np.empty((0, temperature.shape[0]))\n\n if reference_pressure > pressure.min():\n # Integrate downward in pressure\n pres_down = np.append(reference_pressure.m, pressure[(ref_pres_idx - 1)::-1].m)\n trace_down = si.odeint(dt, temperature.m.squeeze(), pres_down.squeeze())\n ret_temperatures = np.concatenate((ret_temperatures, trace_down[:0:-1]))\n\n if reference_pressure < pressure.max():\n # Integrate upward in pressure\n pres_up = np.append(reference_pressure.m, pressure[ref_pres_idx:].m)\n trace_up = si.odeint(dt, temperature.m.squeeze(), pres_up.squeeze())\n ret_temperatures = np.concatenate((ret_temperatures, trace_up[1:]))\n\n if pres_decreasing:\n ret_temperatures = ret_temperatures[::-1]\n\n return units.Quantity(ret_temperatures.T.squeeze(), temperature.units)\n\n\n@exporter.export\n@preprocess_xarray\n@check_units('[pressure]', '[temperature]', '[temperature]')\ndef lcl(pressure, temperature, dewpoint, max_iters=50, eps=1e-5):\n r\"\"\"Calculate the lifted condensation level (LCL) using from the starting point.\n\n The starting state for the parcel is defined by `temperature`, `dewpoint`,\n and `pressure`. If these are arrays, this function will return a LCL\n for every index. This function does work with surface grids as a result.\n\n Parameters\n ----------\n pressure : `pint.Quantity`\n The starting atmospheric pressure\n temperature : `pint.Quantity`\n The starting temperature\n dewpoint : `pint.Quantity`\n The starting dewpoint\n\n Returns\n -------\n `pint.Quantity`\n The LCL pressure\n `pint.Quantity`\n The LCL temperature\n\n Other Parameters\n ----------------\n max_iters : int, optional\n The maximum number of iterations to use in calculation, defaults to 50.\n eps : float, optional\n The desired relative error in the calculated value, defaults to 1e-5.\n\n See Also\n --------\n parcel_profile\n\n Notes\n -----\n This function is implemented using an iterative approach to solve for the\n LCL. The basic algorithm is:\n\n 1. Find the dewpoint from the LCL pressure and starting mixing ratio\n 2. Find the LCL pressure from the starting temperature and dewpoint\n 3. Iterate until convergence\n\n The function is guaranteed to finish by virtue of the `max_iters` counter.\n\n \"\"\"\n def _lcl_iter(p, p0, w, t):\n td = globals()['dewpoint'](vapor_pressure(units.Quantity(p, pressure.units), w))\n return (p0 * (td / t) ** (1. / mpconsts.kappa)).m\n\n w = mixing_ratio(saturation_vapor_pressure(dewpoint), pressure)\n lcl_p = so.fixed_point(_lcl_iter, pressure.m, args=(pressure.m, w, temperature),\n xtol=eps, maxiter=max_iters)\n\n # np.isclose needed if surface is LCL due to precision error with np.log in dewpoint.\n # Causes issues with parcel_profile_with_lcl if removed. Issue #1187\n lcl_p = np.where(np.isclose(lcl_p, pressure.m), pressure.m, lcl_p) * pressure.units\n\n return lcl_p, globals()['dewpoint'](vapor_pressure(lcl_p, w)).to(temperature.units)\n\n\n@exporter.export\n@preprocess_xarray\n@check_units('[pressure]', '[temperature]', '[temperature]', '[temperature]')\ndef lfc(pressure, temperature, dewpoint, parcel_temperature_profile=None, dewpoint_start=None,\n which='top'):\n r\"\"\"Calculate the level of free convection (LFC).\n\n This works by finding the first intersection of the ideal parcel path and\n the measured parcel temperature. If this intersection occurs below the LCL,\n the LFC is determined to be the same as the LCL, based upon the conditions\n set forth in [USAF1990]_, pg 4-14, where a parcel must be lifted dry adiabatically\n to saturation before it can freely rise.\n\n Parameters\n ----------\n pressure : `pint.Quantity`\n The atmospheric pressure\n temperature : `pint.Quantity`\n The temperature at the levels given by `pressure`\n dewpoint : `pint.Quantity`\n The dewpoint at the levels given by `pressure`\n parcel_temperature_profile: `pint.Quantity`, optional\n The parcel temperature profile from which to calculate the LFC. Defaults to the\n surface parcel profile.\n dewpoint_start: `pint.Quantity`, optional\n The dewpoint of the parcel for which to calculate the LFC. Defaults to the surface\n dewpoint.\n which: str, optional\n Pick which LFC to return. Options are 'top', 'bottom', 'wide', 'most_cape', and 'all'.\n 'top' returns the lowest-pressure LFC, default.\n 'bottom' returns the highest-pressure LFC.\n 'wide' returns the LFC whose corresponding EL is farthest away.\n 'most_cape' returns the LFC that results in the most CAPE in the profile.\n\n Returns\n -------\n `pint.Quantity`\n The LFC pressure, or array of same if which='all'\n `pint.Quantity`\n The LFC temperature, or array of same if which='all'\n\n See Also\n --------\n parcel_profile\n\n \"\"\"\n pressure, temperature, dewpoint = _remove_nans(pressure, temperature, dewpoint)\n # Default to surface parcel if no profile or starting pressure level is given\n if parcel_temperature_profile is None:\n new_stuff = parcel_profile_with_lcl(pressure, temperature, dewpoint)\n pressure, temperature, dewpoint, parcel_temperature_profile = new_stuff\n parcel_temperature_profile = parcel_temperature_profile.to(temperature.units)\n\n if dewpoint_start is None:\n dewpoint_start = dewpoint[0]\n\n # The parcel profile and data may have the same first data point.\n # If that is the case, ignore that point to get the real first\n # intersection for the LFC calculation. Use logarithmic interpolation.\n if np.isclose(parcel_temperature_profile[0].to(temperature.units).m, temperature[0].m):\n x, y = find_intersections(pressure[1:], parcel_temperature_profile[1:],\n temperature[1:], direction='increasing', log_x=True)\n else:\n x, y = find_intersections(pressure, parcel_temperature_profile,\n temperature, direction='increasing', log_x=True)\n\n # Compute LCL for this parcel for future comparisons\n this_lcl = lcl(pressure[0], parcel_temperature_profile[0], dewpoint_start)\n\n # The LFC could:\n # 1) Not exist\n # 2) Exist but be equal to the LCL\n # 3) Exist and be above the LCL\n\n # LFC does not exist or is LCL\n if len(x) == 0:\n # Is there any positive area above the LCL?\n mask = pressure < this_lcl[0]\n if np.all(_less_or_close(parcel_temperature_profile[mask], temperature[mask])):\n # LFC doesn't exist\n x, y = np.nan * pressure.units, np.nan * temperature.units\n else: # LFC = LCL\n x, y = this_lcl\n return x, y\n\n # LFC exists. Make sure it is no lower than the LCL\n else:\n idx = x < this_lcl[0]\n # LFC height < LCL height, so set LFC = LCL\n if not any(idx):\n el_pres, _ = find_intersections(pressure[1:], parcel_temperature_profile[1:],\n temperature[1:], direction='decreasing',\n log_x=True)\n if np.min(el_pres) > this_lcl[0]:\n x, y = np.nan * pressure.units, np.nan * temperature.units\n else:\n x, y = this_lcl\n return x, y\n # Otherwise, find all LFCs that exist above the LCL\n # What is returned depends on which flag as described in the docstring\n else:\n return _multiple_el_lfc_options(x, y, idx, which, pressure,\n parcel_temperature_profile, temperature,\n dewpoint, intersect_type='LFC')\n\n\ndef _multiple_el_lfc_options(intersect_pressures, intersect_temperatures, valid_x,\n which, pressure, parcel_temperature_profile, temperature,\n dewpoint, intersect_type):\n \"\"\"Choose which ELs and LFCs to return from a sounding.\"\"\"\n p_list, t_list = intersect_pressures[valid_x], intersect_temperatures[valid_x]\n if which == 'all':\n x, y = p_list, t_list\n elif which == 'bottom':\n x, y = p_list[0], t_list[0]\n elif which == 'top':\n x, y = p_list[-1], t_list[-1]\n elif which == 'wide':\n x, y = _wide_option(intersect_type, p_list, t_list, pressure,\n parcel_temperature_profile, temperature)\n elif which == 'most_cape':\n x, y = _most_cape_option(intersect_type, p_list, t_list, pressure, temperature,\n dewpoint, parcel_temperature_profile)\n else:\n raise ValueError('Invalid option for \"which\". Valid options are \"top\", \"bottom\", '\n '\"wide\", \"most_cape\", and \"all\".')\n return x, y\n\n\ndef _wide_option(intersect_type, p_list, t_list, pressure, parcel_temperature_profile,\n temperature):\n \"\"\"Calculate the LFC or EL that produces the greatest distance between these points.\"\"\"\n # zip the LFC and EL lists together and find greatest difference\n if intersect_type == 'LFC':\n # Find EL intersection pressure values\n lfc_p_list = p_list\n el_p_list, _ = find_intersections(pressure[1:], parcel_temperature_profile[1:],\n temperature[1:], direction='decreasing',\n log_x=True)\n else: # intersect_type == 'EL'\n el_p_list = p_list\n # Find LFC intersection pressure values\n lfc_p_list, _ = find_intersections(pressure, parcel_temperature_profile,\n temperature, direction='increasing',\n log_x=True)\n diff = [lfc_p.m - el_p.m for lfc_p, el_p in zip(lfc_p_list, el_p_list)]\n return (p_list[np.where(diff == np.max(diff))][0],\n t_list[np.where(diff == np.max(diff))][0])\n\n\ndef _most_cape_option(intersect_type, p_list, t_list, pressure, temperature, dewpoint,\n parcel_temperature_profile):\n \"\"\"Calculate the LFC or EL that produces the most CAPE in the profile.\"\"\"\n # Need to loop through all possible combinations of cape, find greatest cape profile\n cape_list, pair_list = [], []\n for which_lfc in ['top', 'bottom']:\n for which_el in ['top', 'bottom']:\n cape, _ = cape_cin(pressure, temperature, dewpoint, parcel_temperature_profile,\n which_lfc=which_lfc, which_el=which_el)\n cape_list.append(cape.m)\n pair_list.append([which_lfc, which_el])\n (lfc_chosen, el_chosen) = pair_list[np.where(cape_list == np.max(cape_list))[0][0]]\n if intersect_type == 'LFC':\n if lfc_chosen == 'top':\n x, y = p_list[-1], t_list[-1]\n else: # 'bottom' is returned\n x, y = p_list[0], t_list[0]\n else: # EL is returned\n if el_chosen == 'top':\n x, y = p_list[-1], t_list[-1]\n else:\n x, y = p_list[0], t_list[0]\n return x, y\n\n\n@exporter.export\n@preprocess_xarray\n@check_units('[pressure]', '[temperature]', '[temperature]', '[temperature]')\ndef el(pressure, temperature, dewpoint, parcel_temperature_profile=None, which='top'):\n r\"\"\"Calculate the equilibrium level.\n\n This works by finding the last intersection of the ideal parcel path and\n the measured environmental temperature. If there is one or fewer intersections, there is\n no equilibrium level.\n\n Parameters\n ----------\n pressure : `pint.Quantity`\n The atmospheric pressure profile\n temperature : `pint.Quantity`\n The temperature at the levels given by `pressure`\n dewpoint : `pint.Quantity`\n The dewpoint at the levels given by `pressure`\n parcel_temperature_profile: `pint.Quantity`, optional\n The parcel temperature profile from which to calculate the EL. Defaults to the\n surface parcel profile.\n which: str, optional\n Pick which LFC to return. Options are 'top', 'bottom', 'wide', 'most_cape', and 'all'.\n 'top' returns the lowest-pressure EL, default.\n 'bottom' returns the highest-pressure EL.\n 'wide' returns the EL whose corresponding LFC is farthest away.\n 'most_cape' returns the EL that results in the most CAPE in the profile.\n\n Returns\n -------\n `pint.Quantity`\n The EL pressure, or array of same if which='all'\n `pint.Quantity`\n The EL temperature, or array of same if which='all'\n\n See Also\n --------\n parcel_profile\n\n \"\"\"\n pressure, temperature, dewpoint = _remove_nans(pressure, temperature, dewpoint)\n # Default to surface parcel if no profile or starting pressure level is given\n if parcel_temperature_profile is None:\n new_stuff = parcel_profile_with_lcl(pressure, temperature, dewpoint)\n pressure, temperature, dewpoint, parcel_temperature_profile = new_stuff\n parcel_temperature_profile = parcel_temperature_profile.to(temperature.units)\n\n # If the top of the sounding parcel is warmer than the environment, there is no EL\n if parcel_temperature_profile[-1] > temperature[-1]:\n return np.nan * pressure.units, np.nan * temperature.units\n\n # Interpolate in log space to find the appropriate pressure - units have to be stripped\n # and reassigned to allow np.log() to function properly.\n x, y = find_intersections(pressure[1:], parcel_temperature_profile[1:], temperature[1:],\n direction='decreasing', log_x=True)\n lcl_p, _ = lcl(pressure[0], temperature[0], dewpoint[0])\n idx = x < lcl_p\n if len(x) > 0 and x[-1] < lcl_p:\n return _multiple_el_lfc_options(x, y, idx, which, pressure,\n parcel_temperature_profile, temperature, dewpoint,\n intersect_type='EL')\n else:\n return np.nan * pressure.units, np.nan * temperature.units\n\n\n@exporter.export\n@preprocess_xarray\n@check_units('[pressure]', '[temperature]', '[temperature]')\ndef parcel_profile(pressure, temperature, dewpoint):\n r\"\"\"Calculate the profile a parcel takes through the atmosphere.\n\n The parcel starts at `temperature`, and `dewpoint`, lifted up\n dry adiabatically to the LCL, and then moist adiabatically from there.\n `pressure` specifies the pressure levels for the profile.\n\n Parameters\n ----------\n pressure : `pint.Quantity`\n The atmospheric pressure level(s) of interest. This array must be from\n high to low pressure.\n temperature : `pint.Quantity`\n The starting temperature\n dewpoint : `pint.Quantity`\n The starting dewpoint\n\n Returns\n -------\n `pint.Quantity`\n The parcel temperatures at the specified pressure levels.\n\n See Also\n --------\n lcl, moist_lapse, dry_lapse\n\n \"\"\"\n _, _, _, t_l, _, t_u = _parcel_profile_helper(pressure, temperature, dewpoint)\n return concatenate((t_l, t_u))\n\n\n@exporter.export\n@preprocess_xarray\n@check_units('[pressure]', '[temperature]', '[temperature]')\ndef parcel_profile_with_lcl(pressure, temperature, dewpoint):\n r\"\"\"Calculate the profile a parcel takes through the atmosphere.\n\n The parcel starts at `temperature`, and `dewpoint`, lifted up\n dry adiabatically to the LCL, and then moist adiabatically from there.\n `pressure` specifies the pressure levels for the profile. This function returns\n a profile that includes the LCL.\n\n Parameters\n ----------\n pressure : `pint.Quantity`\n The atmospheric pressure level(s) of interest. This array must be from\n high to low pressure.\n temperature : `pint.Quantity`\n The atmospheric temperature at the levels in `pressure`. The first entry should be at\n the same level as the first `pressure` data point.\n dewpoint : `pint.Quantity`\n The atmospheric dewpoint at the levels in `pressure`. The first entry should be at\n the same level as the first `pressure` data point.\n\n Returns\n -------\n pressure : `pint.Quantity`\n The parcel profile pressures, which includes the specified levels and the LCL\n ambient_temperature : `pint.Quantity`\n The atmospheric temperature values, including the value interpolated to the LCL level\n ambient_dew_point : `pint.Quantity`\n The atmospheric dewpoint values, including the value interpolated to the LCL level\n profile_temperature : `pint.Quantity`\n The parcel profile temperatures at all of the levels in the returned pressures array,\n including the LCL.\n\n See Also\n --------\n lcl, moist_lapse, dry_lapse, parcel_profile\n\n \"\"\"\n p_l, p_lcl, p_u, t_l, t_lcl, t_u = _parcel_profile_helper(pressure, temperature[0],\n dewpoint[0])\n new_press = concatenate((p_l, p_lcl, p_u))\n prof_temp = concatenate((t_l, t_lcl, t_u))\n new_temp = _insert_lcl_level(pressure, temperature, p_lcl)\n new_dewp = _insert_lcl_level(pressure, dewpoint, p_lcl)\n return new_press, new_temp, new_dewp, prof_temp\n\n\ndef _parcel_profile_helper(pressure, temperature, dewpoint):\n \"\"\"Help calculate parcel profiles.\n\n Returns the temperature and pressure, above, below, and including the LCL. The\n other calculation functions decide what to do with the pieces.\n\n \"\"\"\n # Find the LCL\n press_lcl, temp_lcl = lcl(pressure[0], temperature, dewpoint)\n press_lcl = press_lcl.to(pressure.units)\n\n # Find the dry adiabatic profile, *including* the LCL. We need >= the LCL in case the\n # LCL is included in the levels. It's slightly redundant in that case, but simplifies\n # the logic for removing it later.\n press_lower = concatenate((pressure[pressure >= press_lcl], press_lcl))\n temp_lower = dry_lapse(press_lower, temperature)\n\n # If the pressure profile doesn't make it to the lcl, we can stop here\n if _greater_or_close(np.nanmin(pressure.m), press_lcl.m):\n return (press_lower[:-1], press_lcl, units.Quantity(np.array([]), press_lower.units),\n temp_lower[:-1], temp_lcl, units.Quantity(np.array([]), temp_lower.units))\n\n # Find moist pseudo-adiabatic profile starting at the LCL\n press_upper = concatenate((press_lcl, pressure[pressure < press_lcl]))\n temp_upper = moist_lapse(press_upper, temp_lower[-1]).to(temp_lower.units)\n\n # Return profile pieces\n return (press_lower[:-1], press_lcl, press_upper[1:],\n temp_lower[:-1], temp_lcl, temp_upper[1:])\n\n\ndef _insert_lcl_level(pressure, temperature, lcl_pressure):\n \"\"\"Insert the LCL pressure into the profile.\"\"\"\n interp_temp = interpolate_1d(lcl_pressure, pressure, temperature)\n\n # Pressure needs to be increasing for searchsorted, so flip it and then convert\n # the index back to the original array\n loc = pressure.size - pressure[::-1].searchsorted(lcl_pressure)\n return np.insert(temperature.m, loc, interp_temp.m) * temperature.units\n\n\n@exporter.export\n@preprocess_xarray\n@check_units('[pressure]', '[dimensionless]')\ndef vapor_pressure(pressure, mixing_ratio):\n r\"\"\"Calculate water vapor (partial) pressure.\n\n Given total `pressure` and water vapor `mixing_ratio`, calculates the\n partial pressure of water vapor.\n\n Parameters\n ----------\n pressure : `pint.Quantity`\n total atmospheric pressure\n mixing_ratio : `pint.Quantity`\n dimensionless mass mixing ratio\n\n Returns\n -------\n `pint.Quantity`\n The ambient water vapor (partial) pressure in the same units as\n `pressure`.\n\n Notes\n -----\n This function is a straightforward implementation of the equation given in many places,\n such as [Hobbs1977]_ pg.71:\n\n .. math:: e = p \\frac{r}{r + \\epsilon}\n\n See Also\n --------\n saturation_vapor_pressure, dewpoint\n\n \"\"\"\n return pressure * mixing_ratio / (mpconsts.epsilon + mixing_ratio)\n\n\n@exporter.export\n@preprocess_xarray\n@check_units('[temperature]')\ndef saturation_vapor_pressure(temperature):\n r\"\"\"Calculate the saturation water vapor (partial) pressure.\n\n Parameters\n ----------\n temperature : `pint.Quantity`\n air temperature\n\n Returns\n -------\n `pint.Quantity`\n The saturation water vapor (partial) pressure\n\n See Also\n --------\n vapor_pressure, dewpoint\n\n Notes\n -----\n Instead of temperature, dewpoint may be used in order to calculate\n the actual (ambient) water vapor (partial) pressure.\n\n The formula used is that from [Bolton1980]_ for T in degrees Celsius:\n\n .. math:: 6.112 e^\\frac{17.67T}{T + 243.5}\n\n \"\"\"\n # Converted from original in terms of C to use kelvin. Using raw absolute values of C in\n # a formula plays havoc with units support.\n return sat_pressure_0c * np.exp(17.67 * (temperature - 273.15 * units.kelvin)\n / (temperature - 29.65 * units.kelvin))\n\n\n@exporter.export\n@preprocess_xarray\n@check_units('[temperature]', '[dimensionless]')\ndef dewpoint_from_relative_humidity(temperature, relative_humidity):\n r\"\"\"Calculate the ambient dewpoint given air temperature and relative humidity.\n\n Parameters\n ----------\n temperature : `pint.Quantity`\n air temperature\n relative_humidity : `pint.Quantity`\n relative humidity expressed as a ratio in the range 0 < relative_humidity <= 1\n\n Returns\n -------\n `pint.Quantity`\n The dewpoint temperature\n\n See Also\n --------\n dewpoint, saturation_vapor_pressure\n\n \"\"\"\n if np.any(relative_humidity > 1.2):\n warnings.warn('Relative humidity >120%, ensure proper units.')\n return dewpoint(relative_humidity * saturation_vapor_pressure(temperature))\n\n\n@exporter.export\n@preprocess_xarray\n@check_units('[pressure]')\ndef dewpoint(vapor_pressure):\n r\"\"\"Calculate the ambient dewpoint given the vapor pressure.\n\n Parameters\n ----------\n e : `pint.Quantity`\n Water vapor partial pressure\n\n Returns\n -------\n `pint.Quantity`\n dewpoint temperature\n\n See Also\n --------\n dewpoint_from_relative_humidity, saturation_vapor_pressure, vapor_pressure\n\n Notes\n -----\n This function inverts the [Bolton1980]_ formula for saturation vapor\n pressure to instead calculate the temperature. This yield the following\n formula for dewpoint in degrees Celsius:\n\n .. math:: T = \\frac{243.5 log(e / 6.112)}{17.67 - log(e / 6.112)}\n\n \"\"\"\n val = np.log(vapor_pressure / sat_pressure_0c)\n return 0. * units.degC + 243.5 * units.delta_degC * val / (17.67 - val)\n\n\n@exporter.export\n@preprocess_xarray\n@check_units('[pressure]', '[pressure]', '[dimensionless]')\ndef mixing_ratio(partial_press, total_press, molecular_weight_ratio=mpconsts.epsilon):\n r\"\"\"Calculate the mixing ratio of a gas.\n\n This calculates mixing ratio given its partial pressure and the total pressure of\n the air. There are no required units for the input arrays, other than that\n they have the same units.\n\n Parameters\n ----------\n partial_press : `pint.Quantity`\n Partial pressure of the constituent gas\n total_press : `pint.Quantity`\n Total air pressure\n molecular_weight_ratio : `pint.Quantity` or float, optional\n The ratio of the molecular weight of the constituent gas to that assumed\n for air. Defaults to the ratio for water vapor to dry air\n (:math:`\\epsilon\\approx0.622`).\n\n Returns\n -------\n `pint.Quantity`\n The (mass) mixing ratio, dimensionless (e.g. Kg/Kg or g/g)\n\n Notes\n -----\n This function is a straightforward implementation of the equation given in many places,\n such as [Hobbs1977]_ pg.73:\n\n .. math:: r = \\epsilon \\frac{e}{p - e}\n\n See Also\n --------\n saturation_mixing_ratio, vapor_pressure\n\n \"\"\"\n return (molecular_weight_ratio * partial_press\n / (total_press - partial_press)).to('dimensionless')\n\n\n@exporter.export\n@preprocess_xarray\n@check_units('[pressure]', '[temperature]')\ndef saturation_mixing_ratio(total_press, temperature):\n r\"\"\"Calculate the saturation mixing ratio of water vapor.\n\n This calculation is given total pressure and the temperature. The implementation\n uses the formula outlined in [Hobbs1977]_ pg.73.\n\n Parameters\n ----------\n total_press: `pint.Quantity`\n Total atmospheric pressure\n temperature: `pint.Quantity`\n air temperature\n\n Returns\n -------\n `pint.Quantity`\n The saturation mixing ratio, dimensionless\n\n \"\"\"\n return mixing_ratio(saturation_vapor_pressure(temperature), total_press)\n\n\n@exporter.export\n@preprocess_xarray\n@check_units('[pressure]', '[temperature]', '[temperature]')\ndef equivalent_potential_temperature(pressure, temperature, dewpoint):\n r\"\"\"Calculate equivalent potential temperature.\n\n This calculation must be given an air parcel's pressure, temperature, and dewpoint.\n The implementation uses the formula outlined in [Bolton1980]_:\n\n First, the LCL temperature is calculated:\n\n .. math:: T_{L}=\\frac{1}{\\frac{1}{T_{D}-56}+\\frac{ln(T_{K}/T_{D})}{800}}+56\n\n Which is then used to calculate the potential temperature at the LCL:\n\n .. math:: \\theta_{DL}=T_{K}\\left(\\frac{1000}{p-e}\\right)^k\n \\left(\\frac{T_{K}}{T_{L}}\\right)^{.28r}\n\n Both of these are used to calculate the final equivalent potential temperature:\n\n .. math:: \\theta_{E}=\\theta_{DL}\\exp\\left[\\left(\\frac{3036.}{T_{L}}\n -1.78\\right)*r(1+.448r)\\right]\n\n Parameters\n ----------\n pressure: `pint.Quantity`\n Total atmospheric pressure\n temperature: `pint.Quantity`\n Temperature of parcel\n dewpoint: `pint.Quantity`\n Dewpoint of parcel\n\n Returns\n -------\n `pint.Quantity`\n The equivalent potential temperature of the parcel\n\n Notes\n -----\n [Bolton1980]_ formula for Theta-e is used, since according to\n [DaviesJones2009]_ it is the most accurate non-iterative formulation\n available.\n\n \"\"\"\n t = temperature.to('kelvin').magnitude\n td = dewpoint.to('kelvin').magnitude\n p = pressure.to('hPa').magnitude\n e = saturation_vapor_pressure(dewpoint).to('hPa').magnitude\n r = saturation_mixing_ratio(pressure, dewpoint).magnitude\n\n t_l = 56 + 1. / (1. / (td - 56) + np.log(t / td) / 800.)\n th_l = t * (1000 / (p - e)) ** mpconsts.kappa * (t / t_l) ** (0.28 * r)\n th_e = th_l * np.exp((3036. / t_l - 1.78) * r * (1 + 0.448 * r))\n\n return th_e * units.kelvin\n\n\n@exporter.export\n@preprocess_xarray\n@check_units('[pressure]', '[temperature]')\ndef saturation_equivalent_potential_temperature(pressure, temperature):\n r\"\"\"Calculate saturation equivalent potential temperature.\n\n This calculation must be given an air parcel's pressure and temperature.\n The implementation uses the formula outlined in [Bolton1980]_ for the\n equivalent potential temperature, and assumes a saturated process.\n\n First, because we assume a saturated process, the temperature at the LCL is\n equivalent to the current temperature. Therefore the following equation\n\n .. math:: T_{L}=\\frac{1}{\\frac{1}{T_{D}-56}+\\frac{ln(T_{K}/T_{D})}{800}}+56\n\n reduces to\n\n .. math:: T_{L} = T_{K}\n\n Then the potential temperature at the temperature/LCL is calculated:\n\n .. math:: \\theta_{DL}=T_{K}\\left(\\frac{1000}{p-e}\\right)^k\n \\left(\\frac{T_{K}}{T_{L}}\\right)^{.28r}\n\n However, because\n\n .. math:: T_{L} = T_{K}\n\n it follows that\n\n .. math:: \\theta_{DL}=T_{K}\\left(\\frac{1000}{p-e}\\right)^k\n\n Both of these are used to calculate the final equivalent potential temperature:\n\n .. math:: \\theta_{E}=\\theta_{DL}\\exp\\left[\\left(\\frac{3036.}{T_{K}}\n -1.78\\right)*r(1+.448r)\\right]\n\n Parameters\n ----------\n pressure: `pint.Quantity`\n Total atmospheric pressure\n temperature: `pint.Quantity`\n Temperature of parcel\n\n Returns\n -------\n `pint.Quantity`\n The saturation equivalent potential temperature of the parcel\n\n Notes\n -----\n [Bolton1980]_ formula for Theta-e is used (for saturated case), since according to\n [DaviesJones2009]_ it is the most accurate non-iterative formulation\n available.\n\n \"\"\"\n t = temperature.to('kelvin').magnitude\n p = pressure.to('hPa').magnitude\n e = saturation_vapor_pressure(temperature).to('hPa').magnitude\n r = saturation_mixing_ratio(pressure, temperature).magnitude\n\n th_l = t * (1000 / (p - e)) ** mpconsts.kappa\n th_es = th_l * np.exp((3036. / t - 1.78) * r * (1 + 0.448 * r))\n\n return th_es * units.kelvin\n\n\n@exporter.export\n@preprocess_xarray\n@check_units('[temperature]', '[dimensionless]', '[dimensionless]')\ndef virtual_temperature(temperature, mixing_ratio, molecular_weight_ratio=mpconsts.epsilon):\n r\"\"\"Calculate virtual temperature.\n\n This calculation must be given an air parcel's temperature and mixing ratio.\n The implementation uses the formula outlined in [Hobbs2006]_ pg.80.\n\n Parameters\n ----------\n temperature: `pint.Quantity`\n air temperature\n mixing_ratio : `pint.Quantity`\n dimensionless mass mixing ratio\n molecular_weight_ratio : `pint.Quantity` or float, optional\n The ratio of the molecular weight of the constituent gas to that assumed\n for air. Defaults to the ratio for water vapor to dry air.\n (:math:`\\epsilon\\approx0.622`).\n\n Returns\n -------\n `pint.Quantity`\n The corresponding virtual temperature of the parcel\n\n Notes\n -----\n .. math:: T_v = T \\frac{\\text{w} + \\epsilon}{\\epsilon\\,(1 + \\text{w})}\n\n \"\"\"\n return temperature * ((mixing_ratio + molecular_weight_ratio)\n / (molecular_weight_ratio * (1 + mixing_ratio)))\n\n\n@exporter.export\n@preprocess_xarray\n@check_units('[pressure]', '[temperature]', '[dimensionless]', '[dimensionless]')\ndef virtual_potential_temperature(pressure, temperature, mixing_ratio,\n molecular_weight_ratio=mpconsts.epsilon):\n r\"\"\"Calculate virtual potential temperature.\n\n This calculation must be given an air parcel's pressure, temperature, and mixing ratio.\n The implementation uses the formula outlined in [Markowski2010]_ pg.13.\n\n Parameters\n ----------\n pressure: `pint.Quantity`\n Total atmospheric pressure\n temperature: `pint.Quantity`\n air temperature\n mixing_ratio : `pint.Quantity`\n dimensionless mass mixing ratio\n molecular_weight_ratio : `pint.Quantity` or float, optional\n The ratio of the molecular weight of the constituent gas to that assumed\n for air. Defaults to the ratio for water vapor to dry air.\n (:math:`\\epsilon\\approx0.622`).\n\n Returns\n -------\n `pint.Quantity`\n The corresponding virtual potential temperature of the parcel\n\n Notes\n -----\n .. math:: \\Theta_v = \\Theta \\frac{\\text{w} + \\epsilon}{\\epsilon\\,(1 + \\text{w})}\n\n \"\"\"\n pottemp = potential_temperature(pressure, temperature)\n return virtual_temperature(pottemp, mixing_ratio, molecular_weight_ratio)\n\n\n@exporter.export\n@preprocess_xarray\n@check_units('[pressure]', '[temperature]', '[dimensionless]', '[dimensionless]')\ndef density(pressure, temperature, mixing_ratio, molecular_weight_ratio=mpconsts.epsilon):\n r\"\"\"Calculate density.\n\n This calculation must be given an air parcel's pressure, temperature, and mixing ratio.\n The implementation uses the formula outlined in [Hobbs2006]_ pg.67.\n\n Parameters\n ----------\n pressure: `pint.Quantity`\n Total atmospheric pressure\n temperature: `pint.Quantity`\n air temperature\n mixing_ratio : `pint.Quantity`\n dimensionless mass mixing ratio\n molecular_weight_ratio : `pint.Quantity` or float, optional\n The ratio of the molecular weight of the constituent gas to that assumed\n for air. Defaults to the ratio for water vapor to dry air.\n (:math:`\\epsilon\\approx0.622`).\n\n Returns\n -------\n `pint.Quantity`\n The corresponding density of the parcel\n\n Notes\n -----\n .. math:: \\rho = \\frac{p}{R_dT_v}\n\n \"\"\"\n virttemp = virtual_temperature(temperature, mixing_ratio, molecular_weight_ratio)\n return (pressure / (mpconsts.Rd * virttemp)).to(units.kilogram / units.meter ** 3)\n\n\n@exporter.export\n@preprocess_xarray\n@check_units('[pressure]', '[temperature]', '[temperature]')\ndef relative_humidity_wet_psychrometric(pressure, dry_bulb_temperature, web_bulb_temperature,\n **kwargs):\n r\"\"\"Calculate the relative humidity with wet bulb and dry bulb temperatures.\n\n This uses a psychrometric relationship as outlined in [WMO8-2014]_, with\n coefficients from [Fan1987]_.\n\n Parameters\n ----------\n pressure: `pint.Quantity`\n Total atmospheric pressure\n dry_bulb_temperature: `pint.Quantity`\n Dry bulb temperature\n web_bulb_temperature: `pint.Quantity`\n Wet bulb temperature\n\n Returns\n -------\n `pint.Quantity`\n Relative humidity\n\n Notes\n -----\n .. math:: relative_humidity = \\frac{e}{e_s}\n\n * :math:`relative_humidity` is relative humidity as a unitless ratio\n * :math:`e` is vapor pressure from the wet psychrometric calculation\n * :math:`e_s` is the saturation vapor pressure\n\n See Also\n --------\n psychrometric_vapor_pressure_wet, saturation_vapor_pressure\n\n \"\"\"\n return (psychrometric_vapor_pressure_wet(pressure, dry_bulb_temperature,\n web_bulb_temperature, **kwargs)\n / saturation_vapor_pressure(dry_bulb_temperature))\n\n\n@exporter.export\n@preprocess_xarray\n@check_units('[pressure]', '[temperature]', '[temperature]')\ndef psychrometric_vapor_pressure_wet(pressure, dry_bulb_temperature, wet_bulb_temperature,\n psychrometer_coefficient=6.21e-4 / units.kelvin):\n r\"\"\"Calculate the vapor pressure with wet bulb and dry bulb temperatures.\n\n This uses a psychrometric relationship as outlined in [WMO8-2014]_, with\n coefficients from [Fan1987]_.\n\n Parameters\n ----------\n pressure: `pint.Quantity`\n Total atmospheric pressure\n dry_bulb_temperature: `pint.Quantity`\n Dry bulb temperature\n wet_bulb_temperature: `pint.Quantity`\n Wet bulb temperature\n psychrometer_coefficient: `pint.Quantity`, optional\n Psychrometer coefficient. Defaults to 6.21e-4 K^-1.\n\n Returns\n -------\n `pint.Quantity`\n Vapor pressure\n\n Notes\n -----\n .. math:: e' = e'_w(T_w) - A p (T - T_w)\n\n * :math:`e'` is vapor pressure\n * :math:`e'_w(T_w)` is the saturation vapor pressure with respect to water at temperature\n :math:`T_w`\n * :math:`p` is the pressure of the wet bulb\n * :math:`T` is the temperature of the dry bulb\n * :math:`T_w` is the temperature of the wet bulb\n * :math:`A` is the psychrometer coefficient\n\n Psychrometer coefficient depends on the specific instrument being used and the ventilation\n of the instrument.\n\n See Also\n --------\n saturation_vapor_pressure\n\n \"\"\"\n return (saturation_vapor_pressure(wet_bulb_temperature) - psychrometer_coefficient\n * pressure * (dry_bulb_temperature - wet_bulb_temperature).to('kelvin'))\n\n\n@exporter.export\n@preprocess_xarray\n@check_units('[pressure]', '[temperature]', '[dimensionless]')\ndef mixing_ratio_from_relative_humidity(pressure, temperature, relative_humidity):\n r\"\"\"Calculate the mixing ratio from relative humidity, temperature, and pressure.\n\n Parameters\n ----------\n pressure: `pint.Quantity`\n Total atmospheric pressure\n temperature: `pint.Quantity`\n Air temperature\n relative_humidity: array_like\n The relative humidity expressed as a unitless ratio in the range [0, 1]. Can also pass\n a percentage if proper units are attached.\n\n Returns\n -------\n `pint.Quantity`\n Dimensionless mixing ratio\n\n Notes\n -----\n Formula adapted from [Hobbs1977]_ pg. 74.\n\n .. math:: w = (relative_humidity)(w_s)\n\n * :math:`w` is mixing ratio\n * :math:`relative_humidity` is relative humidity as a unitless ratio\n * :math:`w_s` is the saturation mixing ratio\n\n See Also\n --------\n relative_humidity_from_mixing_ratio, saturation_mixing_ratio\n\n \"\"\"\n return (relative_humidity\n * saturation_mixing_ratio(pressure, temperature)).to('dimensionless')\n\n\n@exporter.export\n@preprocess_xarray\n@check_units('[pressure]', '[temperature]', '[dimensionless]')\ndef relative_humidity_from_mixing_ratio(pressure, temperature, mixing_ratio):\n r\"\"\"Calculate the relative humidity from mixing ratio, temperature, and pressure.\n\n Parameters\n ----------\n pressure: `pint.Quantity`\n Total atmospheric pressure\n temperature: `pint.Quantity`\n Air temperature\n mixing_ratio: `pint.Quantity`\n Dimensionless mass mixing ratio\n\n Returns\n -------\n `pint.Quantity`\n Relative humidity\n\n Notes\n -----\n Formula based on that from [Hobbs1977]_ pg. 74.\n\n .. math:: relative_humidity = \\frac{w}{w_s}\n\n * :math:`relative_humidity` is relative humidity as a unitless ratio\n * :math:`w` is mixing ratio\n * :math:`w_s` is the saturation mixing ratio\n\n See Also\n --------\n mixing_ratio_from_relative_humidity, saturation_mixing_ratio\n\n \"\"\"\n return mixing_ratio / saturation_mixing_ratio(pressure, temperature)\n\n\n@exporter.export\n@preprocess_xarray\n@check_units('[dimensionless]')\ndef mixing_ratio_from_specific_humidity(specific_humidity):\n r\"\"\"Calculate the mixing ratio from specific humidity.\n\n Parameters\n ----------\n specific_humidity: `pint.Quantity`\n Specific humidity of air\n\n Returns\n -------\n `pint.Quantity`\n Mixing ratio\n\n Notes\n -----\n Formula from [Salby1996]_ pg. 118.\n\n .. math:: w = \\frac{q}{1-q}\n\n * :math:`w` is mixing ratio\n * :math:`q` is the specific humidity\n\n See Also\n --------\n mixing_ratio, specific_humidity_from_mixing_ratio\n\n \"\"\"\n try:\n specific_humidity = specific_humidity.to('dimensionless')\n except AttributeError:\n pass\n return specific_humidity / (1 - specific_humidity)\n\n\n@exporter.export\n@preprocess_xarray\n@check_units('[dimensionless]')\ndef specific_humidity_from_mixing_ratio(mixing_ratio):\n r\"\"\"Calculate the specific humidity from the mixing ratio.\n\n Parameters\n ----------\n mixing_ratio: `pint.Quantity`\n mixing ratio\n\n Returns\n -------\n `pint.Quantity`\n Specific humidity\n\n Notes\n -----\n Formula from [Salby1996]_ pg. 118.\n\n .. math:: q = \\frac{w}{1+w}\n\n * :math:`w` is mixing ratio\n * :math:`q` is the specific humidity\n\n See Also\n --------\n mixing_ratio, mixing_ratio_from_specific_humidity\n\n \"\"\"\n try:\n mixing_ratio = mixing_ratio.to('dimensionless')\n except AttributeError:\n pass\n return mixing_ratio / (1 + mixing_ratio)\n\n\n@exporter.export\n@preprocess_xarray\n@check_units('[pressure]', '[temperature]', '[dimensionless]')\ndef relative_humidity_from_specific_humidity(pressure, temperature, specific_humidity):\n r\"\"\"Calculate the relative humidity from specific humidity, temperature, and pressure.\n\n Parameters\n ----------\n pressure: `pint.Quantity`\n Total atmospheric pressure\n temperature: `pint.Quantity`\n Air temperature\n specific_humidity: `pint.Quantity`\n Specific humidity of air\n\n Returns\n -------\n `pint.Quantity`\n Relative humidity\n\n Notes\n -----\n Formula based on that from [Hobbs1977]_ pg. 74. and [Salby1996]_ pg. 118.\n\n .. math:: relative_humidity = \\frac{q}{(1-q)w_s}\n\n * :math:`relative_humidity` is relative humidity as a unitless ratio\n * :math:`q` is specific humidity\n * :math:`w_s` is the saturation mixing ratio\n\n See Also\n --------\n relative_humidity_from_mixing_ratio\n\n \"\"\"\n return (mixing_ratio_from_specific_humidity(specific_humidity)\n / saturation_mixing_ratio(pressure, temperature))\n\n\n@exporter.export\n@preprocess_xarray\n@check_units('[pressure]', '[temperature]', '[temperature]', '[temperature]')\ndef cape_cin(pressure, temperature, dewpoint, parcel_profile, which_lfc='bottom',\n which_el='top'):\n r\"\"\"Calculate CAPE and CIN.\n\n Calculate the convective available potential energy (CAPE) and convective inhibition (CIN)\n of a given upper air profile and parcel path. CIN is integrated between the surface and\n LFC, CAPE is integrated between the LFC and EL (or top of sounding). Intersection points\n of the measured temperature profile and parcel profile are logarithmically interpolated.\n\n Parameters\n ----------\n pressure : `pint.Quantity`\n The atmospheric pressure level(s) of interest, in order from highest to\n lowest pressure.\n temperature : `pint.Quantity`\n The atmospheric temperature corresponding to pressure.\n dewpoint : `pint.Quantity`\n The atmospheric dewpoint corresponding to pressure.\n parcel_profile : `pint.Quantity`\n The temperature profile of the parcel.\n which_lfc : str\n Choose which LFC to integrate from. Valid options are 'top', 'bottom', 'wide',\n and 'most_cape'. Default is 'bottom'.\n which_el : str\n Choose which EL to integrate to. Valid options are 'top', 'bottom', 'wide',\n and 'most_cape'. Default is 'top'.\n\n Returns\n -------\n `pint.Quantity`\n Convective Available Potential Energy (CAPE).\n `pint.Quantity`\n Convective INhibition (CIN).\n\n Notes\n -----\n Formula adopted from [Hobbs1977]_.\n\n .. math:: \\text{CAPE} = -R_d \\int_{LFC}^{EL} (T_{parcel} - T_{env}) d\\text{ln}(p)\n\n .. math:: \\text{CIN} = -R_d \\int_{SFC}^{LFC} (T_{parcel} - T_{env}) d\\text{ln}(p)\n\n\n * :math:`CAPE` Convective available potential energy\n * :math:`CIN` Convective inhibition\n * :math:`LFC` Pressure of the level of free convection\n * :math:`EL` Pressure of the equilibrium level\n * :math:`SFC` Level of the surface or beginning of parcel path\n * :math:`R_d` Gas constant\n * :math:`g` Gravitational acceleration\n * :math:`T_{parcel}` Parcel temperature\n * :math:`T_{env}` Environment temperature\n * :math:`p` Atmospheric pressure\n\n See Also\n --------\n lfc, el\n\n \"\"\"\n pressure, temperature, dewpoint, parcel_profile = _remove_nans(pressure, temperature,\n dewpoint, parcel_profile)\n # Calculate LFC limit of integration\n lfc_pressure, _ = lfc(pressure, temperature, dewpoint,\n parcel_temperature_profile=parcel_profile, which=which_lfc)\n\n # If there is no LFC, no need to proceed.\n if np.isnan(lfc_pressure):\n return 0 * units('J/kg'), 0 * units('J/kg')\n else:\n lfc_pressure = lfc_pressure.magnitude\n\n # Calculate the EL limit of integration\n el_pressure, _ = el(pressure, temperature, dewpoint,\n parcel_temperature_profile=parcel_profile, which=which_el)\n\n # No EL and we use the top reading of the sounding.\n if np.isnan(el_pressure):\n el_pressure = pressure[-1].magnitude\n else:\n el_pressure = el_pressure.magnitude\n\n # Difference between the parcel path and measured temperature profiles\n y = (parcel_profile - temperature).to(units.degK)\n\n # Estimate zero crossings\n x, y = _find_append_zero_crossings(np.copy(pressure), y)\n\n # CAPE\n # Only use data between the LFC and EL for calculation\n p_mask = _less_or_close(x.m, lfc_pressure) & _greater_or_close(x.m, el_pressure)\n x_clipped = x[p_mask].magnitude\n y_clipped = y[p_mask].magnitude\n cape = (mpconsts.Rd\n * (np.trapz(y_clipped, np.log(x_clipped)) * units.degK)).to(units('J/kg'))\n\n # CIN\n # Only use data between the surface and LFC for calculation\n p_mask = _greater_or_close(x.m, lfc_pressure)\n x_clipped = x[p_mask].magnitude\n y_clipped = y[p_mask].magnitude\n cin = (mpconsts.Rd\n * (np.trapz(y_clipped, np.log(x_clipped)) * units.degK)).to(units('J/kg'))\n\n # Set CIN to 0 if it's returned as a positive value (#1190)\n if cin > 0 * units('J/kg'):\n cin = 0 * units('J/kg')\n return cape, cin\n\n\ndef _find_append_zero_crossings(x, y):\n r\"\"\"\n Find and interpolate zero crossings.\n\n Estimate the zero crossings of an x,y series and add estimated crossings to series,\n returning a sorted array with no duplicate values.\n\n Parameters\n ----------\n x : `pint.Quantity`\n x values of data\n y : `pint.Quantity`\n y values of data\n\n Returns\n -------\n x : `pint.Quantity`\n x values of data\n y : `pint.Quantity`\n y values of data\n\n \"\"\"\n crossings = find_intersections(x[1:], y[1:], np.zeros_like(y[1:]) * y.units, log_x=True)\n x = concatenate((x, crossings[0]))\n y = concatenate((y, crossings[1]))\n\n # Resort so that data are in order\n sort_idx = np.argsort(x)\n x = x[sort_idx]\n y = y[sort_idx]\n\n # Remove duplicate data points if there are any\n keep_idx = np.ediff1d(x.magnitude, to_end=[1]) > 1e-6\n x = x[keep_idx]\n y = y[keep_idx]\n return x, y\n\n\n@exporter.export\n@preprocess_xarray\n@check_units('[pressure]', '[temperature]', '[temperature]')\ndef most_unstable_parcel(pressure, temperature, dewpoint, height=None,\n bottom=None, depth=300 * units.hPa):\n \"\"\"\n Determine the most unstable parcel in a layer.\n\n Determines the most unstable parcel of air by calculating the equivalent\n potential temperature and finding its maximum in the specified layer.\n\n Parameters\n ----------\n pressure: `pint.Quantity`\n Atmospheric pressure profile\n temperature: `pint.Quantity`\n Atmospheric temperature profile\n dewpoint: `pint.Quantity`\n Atmospheric dewpoint profile\n height: `pint.Quantity`, optional\n Atmospheric height profile. Standard atmosphere assumed when None (the default).\n bottom: `pint.Quantity`, optional\n Bottom of the layer to consider for the calculation in pressure or height.\n Defaults to using the bottom pressure or height.\n depth: `pint.Quantity`, optional\n Depth of the layer to consider for the calculation in pressure or height. Defaults\n to 300 hPa.\n\n Returns\n -------\n `pint.Quantity`\n Pressure, temperature, and dewpoint of most unstable parcel in the profile.\n integer\n Index of the most unstable parcel in the given profile\n\n See Also\n --------\n get_layer\n\n \"\"\"\n p_layer, t_layer, td_layer = get_layer(pressure, temperature, dewpoint, bottom=bottom,\n depth=depth, height=height, interpolate=False)\n theta_e = equivalent_potential_temperature(p_layer, t_layer, td_layer)\n max_idx = np.argmax(theta_e)\n return p_layer[max_idx], t_layer[max_idx], td_layer[max_idx], max_idx\n\n\n@exporter.export\n@preprocess_xarray\n@check_units('[temperature]', '[pressure]', '[temperature]')\ndef isentropic_interpolation(levels, pressure, temperature, *args, axis=0,\n temperature_out=False, max_iters=50, eps=1e-6,\n bottom_up_search=True, **kwargs):\n r\"\"\"Interpolate data in isobaric coordinates to isentropic coordinates.\n\n Parameters\n ----------\n levels : array\n One-dimensional array of desired potential temperature surfaces\n pressure : array\n One-dimensional array of pressure levels\n temperature : array\n Array of temperature\n axis : int, optional\n The axis corresponding to the vertical in the temperature array, defaults to 0.\n temperature_out : bool, optional\n If true, will calculate temperature and output as the last item in the output list.\n Defaults to False.\n max_iters : int, optional\n The maximum number of iterations to use in calculation, defaults to 50.\n eps : float, optional\n The desired absolute error in the calculated value, defaults to 1e-6.\n bottom_up_search : bool, optional\n Controls whether to search for levels bottom-up, or top-down. Defaults to\n True, which is bottom-up search.\n args : array, optional\n Any additional variables will be interpolated to each isentropic level.\n\n Returns\n -------\n list\n List with pressure at each isentropic level, followed by each additional\n argument interpolated to isentropic coordinates.\n\n Notes\n -----\n Input variable arrays must have the same number of vertical levels as the pressure levels\n array. Pressure is calculated on isentropic surfaces by assuming that temperature varies\n linearly with the natural log of pressure. Linear interpolation is then used in the\n vertical to find the pressure at each isentropic level. Interpolation method from\n [Ziv1994]_. Any additional arguments are assumed to vary linearly with temperature and will\n be linearly interpolated to the new isentropic levels.\n\n See Also\n --------\n potential_temperature\n\n \"\"\"\n # iteration function to be used later\n # Calculates theta from linearly interpolated temperature and solves for pressure\n def _isen_iter(iter_log_p, isentlevs_nd, ka, a, b, pok):\n exner = pok * np.exp(-ka * iter_log_p)\n t = a * iter_log_p + b\n # Newton-Raphson iteration\n f = isentlevs_nd - t * exner\n fp = exner * (ka * t - a)\n return iter_log_p - (f / fp)\n\n # Get dimensions in temperature\n ndim = temperature.ndim\n\n # Convert units\n pres = pressure.to('hPa')\n temperature = temperature.to('kelvin')\n\n slices = [np.newaxis] * ndim\n slices[axis] = slice(None)\n slices = tuple(slices)\n pres = np.broadcast_to(pres[slices].magnitude, temperature.shape) * pres.units\n\n # Sort input data\n sort_pres = np.argsort(pres.m, axis=axis)\n sort_pres = np.swapaxes(np.swapaxes(sort_pres, 0, axis)[::-1], 0, axis)\n sorter = broadcast_indices(pres, sort_pres, ndim, axis)\n levs = pres[sorter]\n tmpk = temperature[sorter]\n\n levels = np.asarray(levels.m_as('kelvin')).reshape(-1)\n isentlevels = levels[np.argsort(levels)]\n\n # Make the desired isentropic levels the same shape as temperature\n shape = list(temperature.shape)\n shape[axis] = isentlevels.size\n isentlevs_nd = np.broadcast_to(isentlevels[slices], shape)\n\n # exponent to Poisson's Equation, which is imported above\n ka = mpconsts.kappa.m_as('dimensionless')\n\n # calculate theta for each point\n pres_theta = potential_temperature(levs, tmpk)\n\n # Raise error if input theta level is larger than pres_theta max\n if np.max(pres_theta.m) < np.max(levels):\n raise ValueError('Input theta level out of data bounds')\n\n # Find log of pressure to implement assumption of linear temperature dependence on\n # ln(p)\n log_p = np.log(levs.m)\n\n # Calculations for interpolation routine\n pok = mpconsts.P0 ** ka\n\n # index values for each point for the pressure level nearest to the desired theta level\n above, below, good = find_bounding_indices(pres_theta.m, levels, axis,\n from_below=bottom_up_search)\n\n # calculate constants for the interpolation\n a = (tmpk.m[above] - tmpk.m[below]) / (log_p[above] - log_p[below])\n b = tmpk.m[above] - a * log_p[above]\n\n # calculate first guess for interpolation\n isentprs = 0.5 * (log_p[above] + log_p[below])\n\n # Make sure we ignore any nans in the data for solving; checking a is enough since it\n # combines log_p and tmpk.\n good &= ~np.isnan(a)\n\n # iterative interpolation using scipy.optimize.fixed_point and _isen_iter defined above\n log_p_solved = so.fixed_point(_isen_iter, isentprs[good],\n args=(isentlevs_nd[good], ka, a[good], b[good], pok.m),\n xtol=eps, maxiter=max_iters)\n\n # get back pressure from log p\n isentprs[good] = np.exp(log_p_solved)\n\n # Mask out points we know are bad as well as points that are beyond the max pressure\n isentprs[~(good & _less_or_close(isentprs, np.max(pres.m)))] = np.nan\n\n # create list for storing output data\n ret = [isentprs * units.hPa]\n\n # if temperature_out = true, calculate temperature and output as last item in list\n if temperature_out:\n ret.append((isentlevs_nd / ((mpconsts.P0.m / isentprs) ** ka)) * units.kelvin)\n\n # do an interpolation for each additional argument\n if args:\n others = interpolate_1d(isentlevels, pres_theta.m, *(arr[sorter] for arr in args),\n axis=axis, return_list_always=True)\n ret.extend(others)\n\n return ret\n\n\n@exporter.export\n@preprocess_xarray\n@check_units('[pressure]', '[temperature]', '[temperature]')\ndef surface_based_cape_cin(pressure, temperature, dewpoint):\n r\"\"\"Calculate surface-based CAPE and CIN.\n\n Calculate the convective available potential energy (CAPE) and convective inhibition (CIN)\n of a given upper air profile for a surface-based parcel. CIN is integrated\n between the surface and LFC, CAPE is integrated between the LFC and EL (or top of\n sounding). Intersection points of the measured temperature profile and parcel profile are\n logarithmically interpolated.\n\n Parameters\n ----------\n pressure : `pint.Quantity`\n Atmospheric pressure profile. The first entry should be the starting\n (surface) observation, with the array going from high to low pressure.\n temperature : `pint.Quantity`\n Temperature profile corresponding to the `pressure` profile.\n dewpoint : `pint.Quantity`\n Dewpoint profile corresponding to the `pressure` profile.\n\n Returns\n -------\n `pint.Quantity`\n Surface based Convective Available Potential Energy (CAPE).\n `pint.Quantity`\n Surface based Convective INhibition (CIN).\n\n See Also\n --------\n cape_cin, parcel_profile\n\n \"\"\"\n pressure, temperature, dewpoint = _remove_nans(pressure, temperature, dewpoint)\n p, t, td, profile = parcel_profile_with_lcl(pressure, temperature, dewpoint)\n return cape_cin(p, t, td, profile)\n\n\n@exporter.export\n@preprocess_xarray\n@check_units('[pressure]', '[temperature]', '[temperature]')\ndef most_unstable_cape_cin(pressure, temperature, dewpoint, **kwargs):\n r\"\"\"Calculate most unstable CAPE/CIN.\n\n Calculate the convective available potential energy (CAPE) and convective inhibition (CIN)\n of a given upper air profile and most unstable parcel path. CIN is integrated between the\n surface and LFC, CAPE is integrated between the LFC and EL (or top of sounding).\n Intersection points of the measured temperature profile and parcel profile are\n logarithmically interpolated.\n\n Parameters\n ----------\n pressure : `pint.Quantity`\n Pressure profile\n temperature : `pint.Quantity`\n Temperature profile\n dewpoint : `pint.Quantity`\n Dew point profile\n kwargs\n Additional keyword arguments to pass to `most_unstable_parcel`\n\n Returns\n -------\n `pint.Quantity`\n Most unstable Convective Available Potential Energy (CAPE).\n `pint.Quantity`\n Most unstable Convective INhibition (CIN).\n\n See Also\n --------\n cape_cin, most_unstable_parcel, parcel_profile\n\n \"\"\"\n pressure, temperature, dewpoint = _remove_nans(pressure, temperature, dewpoint)\n _, _, _, parcel_idx = most_unstable_parcel(pressure, temperature, dewpoint, **kwargs)\n p, t, td, mu_profile = parcel_profile_with_lcl(pressure[parcel_idx:],\n temperature[parcel_idx:],\n dewpoint[parcel_idx:])\n return cape_cin(p, t, td, mu_profile)\n\n\n@exporter.export\n@preprocess_xarray\n@check_units('[pressure]', '[temperature]', '[temperature]')\ndef mixed_layer_cape_cin(pressure, temperature, dewpoint, **kwargs):\n r\"\"\"Calculate mixed-layer CAPE and CIN.\n\n Calculate the convective available potential energy (CAPE) and convective inhibition (CIN)\n of a given upper air profile and mixed-layer parcel path. CIN is integrated between the\n surface and LFC, CAPE is integrated between the LFC and EL (or top of sounding).\n Intersection points of the measured temperature profile and parcel profile are\n logarithmically interpolated. Kwargs for `mixed_parcel` can be provided, such as `depth`.\n Default mixed-layer depth is 100 hPa.\n\n Parameters\n ----------\n pressure : `pint.Quantity`\n Pressure profile\n temperature : `pint.Quantity`\n Temperature profile\n dewpoint : `pint.Quantity`\n Dewpoint profile\n kwargs\n Additional keyword arguments to pass to `mixed_parcel`\n\n Returns\n -------\n `pint.Quantity`\n Mixed-layer Convective Available Potential Energy (CAPE).\n `pint.Quantity`\n Mixed-layer Convective INhibition (CIN).\n\n See Also\n --------\n cape_cin, mixed_parcel, parcel_profile\n \"\"\"\n depth = kwargs.get('depth', 100 * units.hPa)\n parcel_pressure, parcel_temp, parcel_dewpoint = mixed_parcel(pressure, temperature,\n dewpoint, **kwargs)\n\n # Remove values below top of mixed layer and add in the mixed layer values\n pressure_prof = pressure[pressure < (pressure[0] - depth)]\n temp_prof = temperature[pressure < (pressure[0] - depth)]\n dew_prof = dewpoint[pressure < (pressure[0] - depth)]\n pressure_prof = concatenate([parcel_pressure, pressure_prof])\n temp_prof = concatenate([parcel_temp, temp_prof])\n dew_prof = concatenate([parcel_dewpoint, dew_prof])\n\n p, t, td, ml_profile = parcel_profile_with_lcl(pressure_prof, temp_prof, dew_prof)\n return cape_cin(p, t, td, ml_profile)\n\n\n@exporter.export\n@preprocess_xarray\n@check_units('[pressure]', '[temperature]', '[temperature]')\ndef mixed_parcel(pressure, temperature, dewpoint, parcel_start_pressure=None,\n height=None, bottom=None, depth=100 * units.hPa, interpolate=True):\n r\"\"\"Calculate the properties of a parcel mixed from a layer.\n\n Determines the properties of an air parcel that is the result of complete mixing of a\n given atmospheric layer.\n\n Parameters\n ----------\n pressure : `pint.Quantity`\n Atmospheric pressure profile\n temperature : `pint.Quantity`\n Atmospheric temperature profile\n dewpoint : `pint.Quantity`\n Atmospheric dewpoint profile\n parcel_start_pressure : `pint.Quantity`, optional\n Pressure at which the mixed parcel should begin (default None)\n height: `pint.Quantity`, optional\n Atmospheric heights corresponding to the given pressures (default None)\n bottom : `pint.Quantity`, optional\n The bottom of the layer as a pressure or height above the surface pressure\n (default None)\n depth : `pint.Quantity`, optional\n The thickness of the layer as a pressure or height above the bottom of the layer\n (default 100 hPa)\n interpolate : bool, optional\n Interpolate the top and bottom points if they are not in the given data\n\n Returns\n -------\n `pint.Quantity`\n The pressure of the mixed parcel\n `pint.Quantity`\n The temperature of the mixed parcel\n `pint.Quantity`\n The dewpoint of the mixed parcel\n\n \"\"\"\n # If a parcel starting pressure is not provided, use the surface\n if not parcel_start_pressure:\n parcel_start_pressure = pressure[0]\n\n # Calculate the potential temperature and mixing ratio over the layer\n theta = potential_temperature(pressure, temperature)\n mixing_ratio = saturation_mixing_ratio(pressure, dewpoint)\n\n # Mix the variables over the layer\n mean_theta, mean_mixing_ratio = mixed_layer(pressure, theta, mixing_ratio, bottom=bottom,\n height=height, depth=depth,\n interpolate=interpolate)\n\n # Convert back to temperature\n mean_temperature = mean_theta * exner_function(parcel_start_pressure)\n\n # Convert back to dewpoint\n mean_vapor_pressure = vapor_pressure(parcel_start_pressure, mean_mixing_ratio)\n\n # Using globals() here allows us to keep the dewpoint parameter but still call the\n # function of the same name.\n mean_dewpoint = globals()['dewpoint'](mean_vapor_pressure)\n\n return (parcel_start_pressure, mean_temperature.to(temperature.units),\n mean_dewpoint.to(dewpoint.units))\n\n\n@exporter.export\n@preprocess_xarray\n@check_units('[pressure]')\ndef mixed_layer(pressure, *args, height=None, bottom=None, depth=100 * units.hPa,\n interpolate=True):\n r\"\"\"Mix variable(s) over a layer, yielding a mass-weighted average.\n\n This function will integrate a data variable with respect to pressure and determine the\n average value using the mean value theorem.\n\n Parameters\n ----------\n pressure : array-like\n Atmospheric pressure profile\n datavar : array-like\n Atmospheric variable measured at the given pressures\n height: array-like, optional\n Atmospheric heights corresponding to the given pressures (default None)\n bottom : `pint.Quantity`, optional\n The bottom of the layer as a pressure or height above the surface pressure\n (default None)\n depth : `pint.Quantity`, optional\n The thickness of the layer as a pressure or height above the bottom of the layer\n (default 100 hPa)\n interpolate : bool, optional\n Interpolate the top and bottom points if they are not in the given data (default True)\n\n Returns\n -------\n `pint.Quantity`\n The mixed value of the data variable.\n\n \"\"\"\n layer = get_layer(pressure, *args, height=height, bottom=bottom,\n depth=depth, interpolate=interpolate)\n p_layer = layer[0]\n datavars_layer = layer[1:]\n\n ret = []\n for datavar_layer in datavars_layer:\n actual_depth = abs(p_layer[0] - p_layer[-1])\n ret.append((-1. / actual_depth.m) * np.trapz(datavar_layer.m, p_layer.m)\n * datavar_layer.units)\n return ret\n\n\n@exporter.export\n@preprocess_xarray\n@check_units('[length]', '[temperature]')\ndef dry_static_energy(height, temperature):\n r\"\"\"Calculate the dry static energy of parcels.\n\n This function will calculate the dry static energy following the first two terms of\n equation 3.72 in [Hobbs2006]_.\n\n Notes\n -----\n .. math::\\text{dry static energy} = c_{pd} * T + gz\n\n * :math:`T` is temperature\n * :math:`z` is height\n\n Parameters\n ----------\n height : `pint.Quantity`\n Atmospheric height\n temperature : `pint.Quantity`\n Air temperature\n\n Returns\n -------\n `pint.Quantity`\n The dry static energy\n\n \"\"\"\n return (mpconsts.g * height + mpconsts.Cp_d * temperature).to('kJ/kg')\n\n\n@exporter.export\n@preprocess_xarray\n@check_units('[length]', '[temperature]', '[dimensionless]')\ndef moist_static_energy(height, temperature, specific_humidity):\n r\"\"\"Calculate the moist static energy of parcels.\n\n This function will calculate the moist static energy following\n equation 3.72 in [Hobbs2006]_.\n\n Notes\n -----\n .. math::\\text{moist static energy} = c_{pd} * T + gz + L_v q\n\n * :math:`T` is temperature\n * :math:`z` is height\n * :math:`q` is specific humidity\n\n Parameters\n ----------\n height : `pint.Quantity`\n Atmospheric height\n temperature : `pint.Quantity`\n Air temperature\n specific_humidity : `pint.Quantity`\n Atmospheric specific humidity\n\n Returns\n -------\n `pint.Quantity`\n The moist static energy\n\n \"\"\"\n return (dry_static_energy(height, temperature)\n + mpconsts.Lv * specific_humidity.to('dimensionless')).to('kJ/kg')\n\n\n@exporter.export\n@preprocess_xarray\n@check_units('[pressure]', '[temperature]')\ndef thickness_hydrostatic(pressure, temperature, mixing_ratio=None,\n molecular_weight_ratio=mpconsts.epsilon, bottom=None, depth=None):\n r\"\"\"Calculate the thickness of a layer via the hypsometric equation.\n\n This thickness calculation uses the pressure and temperature profiles (and optionally\n mixing ratio) via the hypsometric equation with virtual temperature adjustment\n\n .. math:: Z_2 - Z_1 = -\\frac{R_d}{g} \\int_{p_1}^{p_2} T_v d\\ln p,\n\n which is based off of Equation 3.24 in [Hobbs2006]_.\n\n This assumes a hydrostatic atmosphere.\n\n Layer bottom and depth specified in pressure.\n\n Parameters\n ----------\n pressure : `pint.Quantity`\n Atmospheric pressure profile\n temperature : `pint.Quantity`\n Atmospheric temperature profile\n mixing_ratio : `pint.Quantity`, optional\n Profile of dimensionless mass mixing ratio. If none is given, virtual temperature\n is simply set to be the given temperature.\n molecular_weight_ratio : `pint.Quantity` or float, optional\n The ratio of the molecular weight of the constituent gas to that assumed\n for air. Defaults to the ratio for water vapor to dry air.\n (:math:`\\epsilon\\approx0.622`).\n bottom : `pint.Quantity`, optional\n The bottom of the layer in pressure. Defaults to the first observation.\n depth : `pint.Quantity`, optional\n The depth of the layer in hPa. Defaults to the full profile if bottom is not given,\n and 100 hPa if bottom is given.\n\n Returns\n -------\n `pint.Quantity`\n The thickness of the layer in meters.\n\n See Also\n --------\n thickness_hydrostatic_from_relative_humidity, pressure_to_height_std, virtual_temperature\n\n \"\"\"\n # Get the data for the layer, conditional upon bottom/depth being specified and mixing\n # ratio being given\n if bottom is None and depth is None:\n if mixing_ratio is None:\n layer_p, layer_virttemp = pressure, temperature\n else:\n layer_p = pressure\n layer_virttemp = virtual_temperature(temperature, mixing_ratio,\n molecular_weight_ratio)\n else:\n if mixing_ratio is None:\n layer_p, layer_virttemp = get_layer(pressure, temperature, bottom=bottom,\n depth=depth)\n else:\n layer_p, layer_temp, layer_w = get_layer(pressure, temperature, mixing_ratio,\n bottom=bottom, depth=depth)\n layer_virttemp = virtual_temperature(layer_temp, layer_w, molecular_weight_ratio)\n\n # Take the integral (with unit handling) and return the result in meters\n return (- mpconsts.Rd / mpconsts.g * np.trapz(\n layer_virttemp.m_as('K'), x=np.log(layer_p.m_as('hPa'))) * units.K).to('m')\n\n\n@exporter.export\n@preprocess_xarray\n@check_units('[pressure]', '[temperature]')\ndef thickness_hydrostatic_from_relative_humidity(pressure, temperature, relative_humidity,\n bottom=None, depth=None):\n r\"\"\"Calculate the thickness of a layer given pressure, temperature and relative humidity.\n\n Similar to ``thickness_hydrostatic``, this thickness calculation uses the pressure,\n temperature, and relative humidity profiles via the hypsometric equation with virtual\n temperature adjustment.\n\n .. math:: Z_2 - Z_1 = -\\frac{R_d}{g} \\int_{p_1}^{p_2} T_v d\\ln p,\n\n which is based off of Equation 3.24 in [Hobbs2006]_. Virtual temperature is calculated\n from the profiles of temperature and relative humidity.\n\n This assumes a hydrostatic atmosphere.\n\n Layer bottom and depth specified in pressure.\n\n Parameters\n ----------\n pressure : `pint.Quantity`\n Atmospheric pressure profile\n temperature : `pint.Quantity`\n Atmospheric temperature profile\n relative_humidity : `pint.Quantity`\n Atmospheric relative humidity profile. The relative humidity is expressed as a\n unitless ratio in the range [0, 1]. Can also pass a percentage if proper units are\n attached.\n bottom : `pint.Quantity`, optional\n The bottom of the layer in pressure. Defaults to the first observation.\n depth : `pint.Quantity`, optional\n The depth of the layer in hPa. Defaults to the full profile if bottom is not given,\n and 100 hPa if bottom is given.\n\n Returns\n -------\n `pint.Quantity`\n The thickness of the layer in meters.\n\n See Also\n --------\n thickness_hydrostatic, pressure_to_height_std, virtual_temperature,\n mixing_ratio_from_relative_humidity\n\n \"\"\"\n mixing = mixing_ratio_from_relative_humidity(pressure, temperature, relative_humidity)\n\n return thickness_hydrostatic(pressure, temperature, mixing_ratio=mixing, bottom=bottom,\n depth=depth)\n\n\n@exporter.export\n@preprocess_xarray\n@check_units('[length]', '[temperature]')\ndef brunt_vaisala_frequency_squared(height, potential_temperature, axis=0):\n r\"\"\"Calculate the square of the Brunt-Vaisala frequency.\n\n Brunt-Vaisala frequency squared (a measure of atmospheric stability) is given by the\n formula:\n\n .. math:: N^2 = \\frac{g}{\\theta} \\frac{d\\theta}{dz}\n\n This formula is based off of Equations 3.75 and 3.77 in [Hobbs2006]_.\n\n Parameters\n ----------\n height : `pint.Quantity`\n One-dimensional profile of atmospheric height\n potential_temperature : `pint.Quantity`\n Atmospheric potential temperature\n axis : int, optional\n The axis corresponding to vertical in the potential temperature array, defaults to 0.\n\n Returns\n -------\n `pint.Quantity`\n The square of the Brunt-Vaisala frequency.\n\n See Also\n --------\n brunt_vaisala_frequency, brunt_vaisala_period, potential_temperature\n\n \"\"\"\n # Ensure validity of temperature units\n potential_temperature = potential_temperature.to('K')\n\n # Calculate and return the square of Brunt-Vaisala frequency\n return mpconsts.g / potential_temperature * first_derivative(potential_temperature,\n x=height, axis=axis)\n\n\n@exporter.export\n@preprocess_xarray\n@check_units('[length]', '[temperature]')\ndef brunt_vaisala_frequency(height, potential_temperature, axis=0):\n r\"\"\"Calculate the Brunt-Vaisala frequency.\n\n This function will calculate the Brunt-Vaisala frequency as follows:\n\n .. math:: N = \\left( \\frac{g}{\\theta} \\frac{d\\theta}{dz} \\right)^\\frac{1}{2}\n\n This formula based off of Equations 3.75 and 3.77 in [Hobbs2006]_.\n\n This function is a wrapper for `brunt_vaisala_frequency_squared` that filters out negative\n (unstable) quantities and takes the square root.\n\n Parameters\n ----------\n height : `pint.Quantity`\n One-dimensional profile of atmospheric height\n potential_temperature : `pint.Quantity`\n Atmospheric potential temperature\n axis : int, optional\n The axis corresponding to vertical in the potential temperature array, defaults to 0.\n\n Returns\n -------\n `pint.Quantity`\n Brunt-Vaisala frequency.\n\n See Also\n --------\n brunt_vaisala_frequency_squared, brunt_vaisala_period, potential_temperature\n\n \"\"\"\n bv_freq_squared = brunt_vaisala_frequency_squared(height, potential_temperature,\n axis=axis)\n bv_freq_squared[bv_freq_squared.magnitude < 0] = np.nan\n\n return np.sqrt(bv_freq_squared)\n\n\n@exporter.export\n@preprocess_xarray\n@check_units('[length]', '[temperature]')\ndef brunt_vaisala_period(height, potential_temperature, axis=0):\n r\"\"\"Calculate the Brunt-Vaisala period.\n\n This function is a helper function for `brunt_vaisala_frequency` that calculates the\n period of oscillation as in Exercise 3.13 of [Hobbs2006]_:\n\n .. math:: \\tau = \\frac{2\\pi}{N}\n\n Returns `NaN` when :math:`N^2 > 0`.\n\n Parameters\n ----------\n height : `pint.Quantity`\n One-dimensional profile of atmospheric height\n potential_temperature : pint.Quantity`\n Atmospheric potential temperature\n axis : int, optional\n The axis corresponding to vertical in the potential temperature array, defaults to 0.\n\n Returns\n -------\n `pint.Quantity`\n Brunt-Vaisala period.\n\n See Also\n --------\n brunt_vaisala_frequency, brunt_vaisala_frequency_squared, potential_temperature\n\n \"\"\"\n bv_freq_squared = brunt_vaisala_frequency_squared(height, potential_temperature,\n axis=axis)\n bv_freq_squared[bv_freq_squared.magnitude <= 0] = np.nan\n\n return 2 * np.pi / np.sqrt(bv_freq_squared)\n\n\n@exporter.export\n@preprocess_xarray\n@check_units('[pressure]', '[temperature]', '[temperature]')\ndef wet_bulb_temperature(pressure, temperature, dewpoint):\n \"\"\"Calculate the wet-bulb temperature using Normand's rule.\n\n This function calculates the wet-bulb temperature using the Normand method. The LCL is\n computed, and that parcel brought down to the starting pressure along a moist adiabat.\n The Normand method (and others) are described and compared by [Knox2017]_.\n\n Parameters\n ----------\n pressure : `pint.Quantity`\n Initial atmospheric pressure\n temperature : `pint.Quantity`\n Initial atmospheric temperature\n dewpoint : `pint.Quantity`\n Initial atmospheric dewpoint\n\n Returns\n -------\n `pint.Quantity`\n Wet-bulb temperature\n\n See Also\n --------\n lcl, moist_lapse\n\n \"\"\"\n if not hasattr(pressure, 'shape'):\n pressure = np.atleast_1d(pressure)\n temperature = np.atleast_1d(temperature)\n dewpoint = np.atleast_1d(dewpoint)\n\n it = np.nditer([pressure, temperature, dewpoint, None],\n op_dtypes=['float', 'float', 'float', 'float'],\n flags=['buffered'])\n\n for press, temp, dewp, ret in it:\n press = press * pressure.units\n temp = temp * temperature.units\n dewp = dewp * dewpoint.units\n lcl_pressure, lcl_temperature = lcl(press, temp, dewp)\n moist_adiabat_temperatures = moist_lapse(concatenate([lcl_pressure, press]),\n lcl_temperature)\n ret[...] = moist_adiabat_temperatures[-1].magnitude\n\n # If we started with a scalar, return a scalar\n if it.operands[3].size == 1:\n return it.operands[3][0] * moist_adiabat_temperatures.units\n return it.operands[3] * moist_adiabat_temperatures.units\n\n\n@exporter.export\n@preprocess_xarray\n@check_units('[pressure]', '[temperature]')\ndef static_stability(pressure, temperature, axis=0):\n r\"\"\"Calculate the static stability within a vertical profile.\n\n .. math:: \\sigma = -\\frac{RT}{p} \\frac{\\partial \\ln \\theta}{\\partial p}\n\n This formula is based on equation 4.3.6 in [Bluestein1992]_.\n\n Parameters\n ----------\n pressure : `pint.Quantity`\n Profile of atmospheric pressure\n temperature : `pint.Quantity`\n Profile of temperature\n axis : int, optional\n The axis corresponding to vertical in the pressure and temperature arrays, defaults\n to 0.\n\n Returns\n -------\n `pint.Quantity`\n The profile of static stability.\n\n \"\"\"\n theta = potential_temperature(pressure, temperature)\n\n return - mpconsts.Rd * temperature / pressure * first_derivative(np.log(theta.m_as('K')),\n x=pressure, axis=axis)\n\n\n@exporter.export\n@preprocess_xarray\n@check_units('[pressure]', '[temperature]', '[dimensionless]')\ndef dewpoint_from_specific_humidity(pressure, temperature, specific_humidity):\n r\"\"\"Calculate the dewpoint from specific humidity, temperature, and pressure.\n\n Parameters\n ----------\n pressure: `pint.Quantity`\n Total atmospheric pressure\n temperature: `pint.Quantity`\n Air temperature\n specific_humidity: `pint.Quantity`\n Specific humidity of air\n\n Returns\n -------\n `pint.Quantity`\n Dew point temperature\n\n See Also\n --------\n relative_humidity_from_mixing_ratio, dewpoint_from_relative_humidity\n\n \"\"\"\n return dewpoint_from_relative_humidity(temperature,\n relative_humidity_from_specific_humidity(\n pressure, temperature, specific_humidity))\n\n\n@exporter.export\n@preprocess_xarray\n@check_units('[length]/[time]', '[pressure]', '[temperature]')\ndef vertical_velocity_pressure(w, pressure, temperature, mixing_ratio=0):\n r\"\"\"Calculate omega from w assuming hydrostatic conditions.\n\n This function converts vertical velocity with respect to height\n :math:`\\left(w = \\frac{Dz}{Dt}\\right)` to that\n with respect to pressure :math:`\\left(\\omega = \\frac{Dp}{Dt}\\right)`\n assuming hydrostatic conditions on the synoptic scale.\n By Equation 7.33 in [Hobbs2006]_,\n\n .. math:: \\omega \\simeq -\\rho g w\n\n Density (:math:`\\rho`) is calculated using the :func:`density` function,\n from the given pressure and temperature. If `mixing_ratio` is given, the virtual\n temperature correction is used, otherwise, dry air is assumed.\n\n Parameters\n ----------\n w: `pint.Quantity`\n Vertical velocity in terms of height\n pressure: `pint.Quantity`\n Total atmospheric pressure\n temperature: `pint.Quantity`\n Air temperature\n mixing_ratio: `pint.Quantity`, optional\n Mixing_ratio ratio of air\n\n Returns\n -------\n `pint.Quantity`\n Vertical velocity in terms of pressure (in Pascals / second)\n\n See Also\n --------\n density, vertical_velocity\n\n \"\"\"\n rho = density(pressure, temperature, mixing_ratio)\n return (-mpconsts.g * rho * w).to('Pa/s')\n\n\n@exporter.export\n@preprocess_xarray\n@check_units('[pressure]/[time]', '[pressure]', '[temperature]')\ndef vertical_velocity(omega, pressure, temperature, mixing_ratio=0):\n r\"\"\"Calculate w from omega assuming hydrostatic conditions.\n\n This function converts vertical velocity with respect to pressure\n :math:`\\left(\\omega = \\frac{Dp}{Dt}\\right)` to that with respect to height\n :math:`\\left(w = \\frac{Dz}{Dt}\\right)` assuming hydrostatic conditions on\n the synoptic scale. By Equation 7.33 in [Hobbs2006]_,\n\n .. math:: \\omega \\simeq -\\rho g w\n\n so that\n\n .. math:: w \\simeq \\frac{- \\omega}{\\rho g}\n\n Density (:math:`\\rho`) is calculated using the :func:`density` function,\n from the given pressure and temperature. If `mixing_ratio` is given, the virtual\n temperature correction is used, otherwise, dry air is assumed.\n\n Parameters\n ----------\n omega: `pint.Quantity`\n Vertical velocity in terms of pressure\n pressure: `pint.Quantity`\n Total atmospheric pressure\n temperature: `pint.Quantity`\n Air temperature\n mixing_ratio: `pint.Quantity`, optional\n Mixing ratio of air\n\n Returns\n -------\n `pint.Quantity`\n Vertical velocity in terms of height (in meters / second)\n\n See Also\n --------\n density, vertical_velocity_pressure\n\n \"\"\"\n rho = density(pressure, temperature, mixing_ratio)\n return (omega / (- mpconsts.g * rho)).to('m/s')\n\n\n@exporter.export\n@preprocess_xarray\n@check_units('[pressure]', '[temperature]')\ndef specific_humidity_from_dewpoint(pressure, dewpoint):\n r\"\"\"Calculate the specific humidity from the dewpoint temperature and pressure.\n\n Parameters\n ----------\n dewpoint: `pint.Quantity`\n dewpoint temperature\n\n pressure: `pint.Quantity`\n pressure\n\n Returns\n -------\n `pint.Quantity`\n Specific humidity\n\n See Also\n --------\n mixing_ratio, saturation_mixing_ratio\n\n \"\"\"\n mixing_ratio = saturation_mixing_ratio(pressure, dewpoint)\n return specific_humidity_from_mixing_ratio(mixing_ratio)\n\n\n@exporter.export\n@preprocess_xarray\n@check_units('[pressure]', '[temperature]', '[temperature]')\ndef lifted_index(pressure, temperature, parcel_profile):\n \"\"\"Calculate Lifted Index from the pressure temperature and parcel profile.\n\n Lifted index formula derived from [Galway1956]_ and referenced by [Doswell-Schultz2006]_:\n LI = T500 - Tp500\n where:\n T500 is the measured temperature at 500 hPa.\n Tp500 is the temperature of the lifted parcel at 500 hPa.\n\n Calculation of the lifted index is defined as the temperature difference between the\n observed 500 hPa temperature and the temperature of a parcel lifted from the\n surface to 500 hPa.\n\n Parameters\n ----------\n pressure : `pint.Quantity`\n The atmospheric pressure level(s) of interest, in order from highest to\n lowest pressure.\n temperature : `pint.Quantity`\n The atmospheric temperature corresponding to pressure.\n parcel_profile : `pint.Quantity`\n The temperature profile of the parcel.\n\n Returns\n -------\n `pint.Quantity`\n Lifted Index.\n\n \"\"\"\n # find the index for the 500 hPa pressure level.\n idx = np.where(pressure == 500 * units.hPa)\n # find the measured temperature at 500 hPa.\n T500 = temperature[idx]\n # find the parcel profile temperature at 500 hPa.\n Tp500 = parcel_profile[idx]\n # calculate the lifted index.\n lifted_index = T500 - Tp500.to(units.degC)\n return lifted_index\n\n\n@exporter.export\n@preprocess_xarray\n@check_units('[length]', '[temperature]', '[speed]', '[speed]')\ndef gradient_richardson_number(height, potential_temperature, u, v, axis=0):\n r\"\"\"Calculate the gradient (or flux) Richardson number.\n\n .. math:: Ri = (g/\\theta) * \\frac{\\left(\\partial \\theta/\\partial z\\)}\n {[\\left(\\partial u / \\partial z\\right)^2 + \\left(\\partial v / \\partial z\\right)^2}\n\n See [Holton2004]_ pg. 121-122. As noted by [Holton2004]_, flux Richardson\n number values below 0.25 indicate turbulence.\n\n Parameters\n ----------\n height : `pint.Quantity`\n Atmospheric height\n potential_temperature : `pint.Quantity`\n Atmospheric potential temperature\n u : `pint.Quantity`\n x component of the wind\n v : `pint.Quantity`\n y component of the wind\n axis : int, optional\n The axis corresponding to vertical, defaults to 0.\n\n Returns\n -------\n `pint.Quantity`\n Gradient Richardson number\n \"\"\"\n dthetadz = first_derivative(potential_temperature, x=height, axis=axis)\n dudz = first_derivative(u, x=height, axis=axis)\n dvdz = first_derivative(v, x=height, axis=axis)\n\n return (mpconsts.g / potential_temperature) * (dthetadz / (dudz ** 2 + dvdz ** 2))\n"
] | [
[
"numpy.any",
"scipy.optimize.fixed_point",
"numpy.argsort",
"numpy.insert",
"numpy.copy",
"numpy.isclose",
"numpy.log",
"numpy.trapz",
"numpy.ediff1d",
"numpy.append",
"numpy.isnan",
"numpy.where",
"numpy.searchsorted",
"numpy.argmax",
"numpy.max",
"numpy.min",
"numpy.broadcast_to",
"numpy.array",
"numpy.zeros_like",
"numpy.empty",
"numpy.swapaxes",
"numpy.exp",
"numpy.atleast_1d",
"numpy.nditer",
"numpy.nanmin",
"numpy.sqrt",
"numpy.concatenate"
]
] |
mrcagney/googlemaps_helpers | [
"75dfcc3e5e788d04c3af3e7608909b349ac83e8d"
] | [
"googlemaps_helpers/main.py"
] | [
"from itertools import product\nimport math\nfrom collections import OrderedDict\nfrom pathlib import Path\nimport logging\n\nimport pandas as pd\nimport numpy as np\nimport geopandas as gpd\nimport shapely.geometry as sg\nimport googlemaps\n\n\n# Configure logging\nlogger = logging.getLogger()\nhandler = logging.StreamHandler()\nformatter = logging.Formatter(\n '%(asctime)s %(name)-12s %(levelname)-8s \\n%(message)s')\nhandler.setFormatter(formatter)\nlogger.addHandler(handler)\nlogger.setLevel(logging.INFO)\n\nWGS84 = {'init': 'epsg:4326'}\n# Maximum number of elements in a Google Maps Distance Matrix API query\nMAX_ELEMENTS = 100\n\ndef flip_coords(xy_list):\n \"\"\"\n Given a list of coordinate pairs, swap the first and second\n coordinates and return the resulting list.\n \"\"\"\n return [(y, x) for (x, y) in xy_list]\n\ndef make_ids(n, prefix='row_'):\n \"\"\"\n Return a list of ``n`` (integer) unique strings of the form\n ``prefix``<number>.\n \"\"\"\n k = int(math.log10(n)) + 1 # Number of digits for padding\n return [prefix + '{num:0{pad}d}'.format(num=i, pad=k) for i in range(n)]\n\ndef to_df(distance_matrix_response, origin_ids=None, destination_ids=None):\n \"\"\"\n Given a (decoded) JSON response to a Google Maps\n Distance Matrix API call, convert it into a DataFrame with the\n following columns.\n\n - ``'origin_address'``\n - ``'origin_id'``: ID of origin; defaults to an element of\n :func:`make_ids`\n - ``'destination_address'``\n - ``'destination_id'``: ID of destination; defaluts to an element of\n :func:`make_ids`\n - ``'duration'``: time from origin to destination; includes\n time in traffic if that's available in the response\n - ``'distance'``: distance from origin to destination\n\n The origin and destination addresses in the response can optionally\n be assigned IDs by setting ``origin_ids`` (list of strings) and\n ``destination_ids`` (list of strings).\n \"\"\"\n # Initialize\n r = distance_matrix_response\n columns = ['origin_address', 'destination_address', 'origin_id',\n 'destination_id', 'duration', 'distance']\n f = pd.DataFrame([], columns=columns)\n\n # Append addresses\n if not r['rows']:\n return f\n\n f['origin_address'], f['destination_address'] = zip(\n *product(r['origin_addresses'], r['destination_addresses']))\n\n # Append IDs\n if origin_ids is None:\n origin_ids = make_ids(len(r['origin_addresses']))\n\n if destination_ids is None:\n destination_ids = make_ids(len(r['destination_addresses']))\n\n f['origin_id'], f['destination_id'] = zip(\n *product(origin_ids, destination_ids))\n\n # Append durations and distances\n durs = []\n dists = []\n for row in r['rows']:\n for e in row['elements']:\n if e['status'] == 'OK':\n if 'duration_in_traffic' in e:\n dur_key = 'duration_in_traffic'\n else:\n dur_key = 'duration'\n durs.append(e[dur_key]['value'])\n dists.append(e['distance']['value'])\n else:\n durs.append(np.nan)\n dists.append(np.nan)\n f['duration'] = durs\n f['distance'] = dists\n\n return f\n\ndef point_df_to_gdf(f, x_col='lon', y_col='lat', from_crs=WGS84):\n \"\"\"\n Given a DataFrame of points with x coordinates\n in the column ``x_col`` and y coordinates in the column ``y_col``,\n with respect to the GeoPandas coordinate reference system\n ``from_crs`` (dictionary), convert the DataFrame into a GeoDataFrame\n with that coordinate reference system and with a ``'geometry'``\n column that corresponds to the points.\n Delete the original x and y columns, and return the result.\n \"\"\"\n f = f.copy()\n f['geometry'] = f[[x_col, y_col]].apply(lambda p: sg.Point(p), axis=1)\n f = f.drop([x_col, y_col], axis=1)\n f = gpd.GeoDataFrame(f)\n f.crs = from_crs\n return f\n\ndef point_gdf_to_df(f, x_col='lon', y_col='lat', to_crs=WGS84):\n \"\"\"\n The inverse of :func:`point_df_to_gdf`.\n Given a GeoDataFrame of points, convert to the coordinate reference\n system ``to_crs`` (dictionary), then split its ``'geometry'`` column\n into x coordinates in the column ``x_col`` and y coordinates in the\n columns ``y_col``, deleting the ``'geometry'`` column afterwards.\n Coerce the result into a DataFrame and return it.\n \"\"\"\n f = f.copy()\n if f.crs is None:\n raise ValueError('GeoDataFrame needs a crs attribute')\n if f.crs != to_crs:\n f = f.to_crs(to_crs)\n\n f[x_col], f[y_col] = zip(*f['geometry'].map(lambda p: p.coords[0]))\n del f['geometry']\n return pd.DataFrame(f)\n\ndef build_distance_matrix_df(client, origins_gdf, destinations_gdf,\n origin_id_col=None, destination_id_col=None,\n max_elements=MAX_ELEMENTS, **distance_matrix_kwargs):\n \"\"\"\n Compute the duration-distance matrix between the given origins\n and destinations, assuming that the number of origins multiplied\n by the number of destinations is at most ``max_elements``.\n To do this, call the Google Maps Distance Matrix API once.\n\n INPUT:\n\n - ``client``: google-maps-services-python Client instance\n - ``origins_gdf``: GeoDataFrame of point; the origins\n - ``destinations_gdf``: GeoDataFrame of points; the destinations\n - ``origin_id_col``: string; name of ID column in ``origins_gdf``\n - ``destination_id_col``: string; name of ID column in\n ``destinations_gdf``\n - ``max_elements``: integer; max number of elements allowable in\n one Google Maps Distance Matrix API call\n - ``distance_matrix_kwargs``: dictionary; keyword arguments for\n Google Maps Distance Matrix API\n\n OUTPUT:\n\n A DataFrame of the form output by :func:`to_df` where the origins\n come from ``origins_gdf`` and the destinations come from\n ``destinations_gdf``.\n\n Return an empty DataFrame with the expected column names if an\n HTTPError on Timeout exception occurs.\n \"\"\"\n # Initialize origin and destinations GeoDataFrames\n o_gdf = origins_gdf.copy()\n d_gdf = destinations_gdf.copy()\n\n n = o_gdf.shape[0]*d_gdf.shape[0]\n if n > max_elements:\n raise ValueError('Number of origins times number of destinations '\n 'is {}, which exceeds threshold of {} elements'.format(\n n, max_elements))\n\n # Prepare origin data\n if o_gdf.crs != WGS84:\n o_gdf = o_gdf.to_crs(WGS84)\n if origin_id_col is None:\n origin_id_col = 'temp_id'\n o_gdf[origin_id_col] = make_ids(o_gdf.shape[0])\n\n o_locs = [geo.coords[0] for geo in o_gdf['geometry']]\n o_ids = o_gdf[origin_id_col].values\n\n # Prepare destination data\n if d_gdf.crs != WGS84:\n d_gdf = d_gdf.to_crs(WGS84)\n if destination_id_col is None:\n destination_id_col = 'temp_id'\n d_gdf[destination_id_col] = make_ids(d_gdf.shape[0])\n\n d_locs = [geo.coords[0] for geo in d_gdf['geometry']]\n d_ids = d_gdf[destination_id_col].values\n\n # Get matrix info\n try:\n r = client.distance_matrix(flip_coords(o_locs),\n flip_coords(d_locs), **distance_matrix_kwargs)\n f = to_df(r, o_ids, d_ids)\n except (googlemaps.exceptions.HTTPError, googlemaps.exceptions.Timeout):\n # Empty DataFrame\n f = pd.DataFrame(columns=[\n 'origin_address',\n 'origin_id',\n 'destination_address',\n 'destination_id',\n 'duration',\n 'distance',\n ])\n\n return f\n\ndef run_distance_matrix_job(client, origins_gdf, destinations_gdf, out_dir,\n origin_id_col=None, destination_id_col=None,\n max_elements=MAX_ELEMENTS, **distance_matrix_kwargs):\n \"\"\"\n Compute the duration-distance matrix between the given origins\n and destinations.\n To do this, call the Google Maps Distance Matrix API repeatedly,\n ensuring that each call uses no more than ``max_elements`` elements.\n\n INPUT:\n\n - ``client``: google-maps-services-python Client instance\n - ``origins_gdf``: GeoDataFrame of points; the origins\n - ``destinations_gdf``: GeoDataFrame of points; the destinations\n - ``out_dir``: string or Path object of a directory at which\n to store the output files; create the directory if it does not\n exist\n - ``origin_id_col``: string; name of ID column in ``origins_gdf``\n - ``destination_id_col``: string; name of ID column in\n ``destinations_gdf``\n - ``max_elements``: integer; max number of elements allowable in\n one Google Maps Distance Matrix API call\n - ``distance_matrix_kwargs``: dictionary; keyword arguments for\n Google Maps Distance Matrix API\n\n OUTPUT:\n\n A collection of CSV files located at ``out_dir`` of the form output\n by :func:`to_df`, where the origins comes from ``origins_gdf`` and\n the destinations come from ``destinations_gdf``.\n Each file will contains one origin points and at most\n ``max_elements`` destination points, for a total of at most\n ``max_elements`` rows.\n An empty DataFrame with the expected column names will be saved to\n file if an HTTPError on Timeout exception occurs.\n This can happen if, for example, the daily query limit is exceeded.\n \"\"\"\n o_gdf = origins_gdf.copy()\n d_gdf = destinations_gdf.copy()\n\n n_o = o_gdf.shape[0]\n n_d = d_gdf.shape[0]\n\n # Create IDs if necessary\n if origin_id_col is None:\n origin_id_col = 'ersatz_origin_id'\n o_gdf[origin_id_col] = make_ids(n_o, 'orig_row_')\n\n if destination_id_col is None:\n destination_id_col = 'ersatz_destination_id'\n d_gdf[destination_id_col] = make_ids(n_d, 'dest_row_')\n\n # Get mode for logging\n mode = distance_matrix_kwargs.get('mode', 'driving')\n\n # Make output directory if it does not exist\n out_dir = Path(out_dir)\n if not out_dir.exists():\n out_dir.mkdir(parents=True)\n\n # Iterate through origins.\n # For each origin segment all destinations into chunks of size\n # at most ``max_elements``.\n # For each destination chunk, build a one-to-many matrix from the\n # origin to all the destinations in the chunk and save it to file.\n for ix, orig_id in o_gdf[[origin_id_col]].itertuples():\n logger.info('Working on origin {} of {} (id {})'.format(\n ix + 1, n_o, orig_id))\n\n # Chunk destinations and build one-to-many matrices from origin\n # to destination chunks.\n # A failed attempt (e.g. through API usage over limit)\n # will build an empty matrix\n for j in range(math.ceil(n_d/max_elements)):\n n1 = max_elements*j\n n2 = min(max_elements*(j + 1), n_d)\n dest_id1, dest_id2 = (\n d_gdf[destination_id_col].iat[n1],\n d_gdf[destination_id_col].iat[n2 - 1]\n )\n path = Path(out_dir)/'{}_from_{}_to_{}--{}.csv'.format(\n mode, orig_id, dest_id1, dest_id2)\n f = build_distance_matrix_df(client, o_gdf.loc[ix:ix],\n d_gdf.iloc[n1:n2],\n origin_id_col=origin_id_col,\n destination_id_col=destination_id_col,\n **distance_matrix_kwargs)\n f.to_csv(path, index=False)\n\n if f.empty:\n logger.info('* Failed to get data for ' + path.stem)\n\ndef compute_cost(n, cost=0.5/1000, num_freebies=0,\n daily_limit=100000, chunk_size=MAX_ELEMENTS):\n \"\"\"\n Estimate the cost of a sequence of Google Maps Distance Matrix\n queries comprising a total of n elements at ``cost`` USD per\n element, where the first ``num_freebies`` (integer) elements are\n free.\n Return a Series that includes the cost and some other metadata.\n \"\"\"\n d = OrderedDict()\n d['#elements'] = n\n d['exceeds {!s}-element daily limit?'.format(daily_limit)] = (\n n > daily_limit)\n d['estimated cost for job in USD'] = max(0, n - num_freebies)*cost\n d['estimated duration for job in minutes'] = n/chunk_size/60\n return pd.Series(d)\n"
] | [
[
"pandas.Series",
"pandas.DataFrame"
]
] |
quantumalaviya/keras | [
"8d874de12ed2e199d9528bfff891f4f60ee2a636"
] | [
"keras/saving/saved_model/layer_serialization.py"
] | [
"# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Classes and functions implementing Layer SavedModel serialization.\"\"\"\n\nfrom keras.mixed_precision import policy\nfrom keras.saving.saved_model import base_serialization\nfrom keras.saving.saved_model import constants\nfrom keras.saving.saved_model import save_impl\nfrom keras.saving.saved_model import serialized_attributes\nfrom keras.utils import generic_utils\nimport tensorflow.compat.v2 as tf\n\n\nclass LayerSavedModelSaver(base_serialization.SavedModelSaver):\n \"\"\"Implements Layer SavedModel serialization.\"\"\"\n\n @property\n def object_identifier(self):\n return constants.LAYER_IDENTIFIER\n\n @property\n def python_properties(self):\n # TODO(kathywu): Add python property validator\n return self._python_properties_internal()\n\n def _python_properties_internal(self):\n \"\"\"Returns dictionary of all python properties.\"\"\"\n # TODO(kathywu): Add support for metrics serialization.\n # TODO(kathywu): Synchronize with the keras spec (go/keras-json-spec) once\n # the python config serialization has caught up.\n metadata = dict(\n name=self.obj.name,\n trainable=self.obj.trainable,\n expects_training_arg=self.obj._expects_training_arg, # pylint: disable=protected-access\n dtype=policy.serialize(self.obj._dtype_policy), # pylint: disable=protected-access\n batch_input_shape=getattr(self.obj, '_batch_input_shape', None),\n stateful=self.obj.stateful,\n must_restore_from_config=self.obj._must_restore_from_config, # pylint: disable=protected-access\n )\n\n metadata.update(get_serialized(self.obj))\n if self.obj.input_spec is not None:\n # Layer's input_spec has already been type-checked in the property setter.\n metadata['input_spec'] = tf.nest.map_structure(\n lambda x: generic_utils.serialize_keras_object(x) if x else None,\n self.obj.input_spec)\n if (self.obj.activity_regularizer is not None and\n hasattr(self.obj.activity_regularizer, 'get_config')):\n metadata['activity_regularizer'] = generic_utils.serialize_keras_object(\n self.obj.activity_regularizer)\n if self.obj._build_input_shape is not None: # pylint: disable=protected-access\n metadata['build_input_shape'] = self.obj._build_input_shape # pylint: disable=protected-access\n return metadata\n\n def objects_to_serialize(self, serialization_cache):\n return (self._get_serialized_attributes(\n serialization_cache).objects_to_serialize)\n\n def functions_to_serialize(self, serialization_cache):\n return (self._get_serialized_attributes(\n serialization_cache).functions_to_serialize)\n\n def _get_serialized_attributes(self, serialization_cache):\n \"\"\"Generates or retrieves serialized attributes from cache.\"\"\"\n keras_cache = serialization_cache.setdefault(constants.KERAS_CACHE_KEY, {})\n if self.obj in keras_cache:\n return keras_cache[self.obj]\n\n serialized_attr = keras_cache[self.obj] = (\n serialized_attributes.SerializedAttributes.new(self.obj))\n\n if (save_impl.should_skip_serialization(self.obj) or\n self.obj._must_restore_from_config): # pylint: disable=protected-access\n return serialized_attr\n\n object_dict, function_dict = self._get_serialized_attributes_internal(\n serialization_cache)\n\n serialized_attr.set_and_validate_objects(object_dict)\n serialized_attr.set_and_validate_functions(function_dict)\n return serialized_attr\n\n def _get_serialized_attributes_internal(self, serialization_cache):\n \"\"\"Returns dictionary of serialized attributes.\"\"\"\n objects = save_impl.wrap_layer_objects(self.obj, serialization_cache)\n functions = save_impl.wrap_layer_functions(self.obj, serialization_cache)\n # Attribute validator requires that the default save signature is added to\n # function dict, even if the value is None.\n functions['_default_save_signature'] = None\n return objects, functions\n\n\n# TODO(kathywu): Move serialization utils (and related utils from\n# generic_utils.py) to a separate file.\ndef get_serialized(obj):\n with generic_utils.skip_failed_serialization():\n # Store the config dictionary, which may be used when reviving the object.\n # When loading, the program will attempt to revive the object from config,\n # and if that fails, the object will be revived from the SavedModel.\n return generic_utils.serialize_keras_object(obj)\n\n\nclass InputLayerSavedModelSaver(base_serialization.SavedModelSaver):\n \"\"\"InputLayer serialization.\"\"\"\n\n @property\n def object_identifier(self):\n return constants.INPUT_LAYER_IDENTIFIER\n\n @property\n def python_properties(self):\n\n return dict(\n class_name=type(self.obj).__name__,\n name=self.obj.name,\n dtype=self.obj.dtype,\n sparse=self.obj.sparse,\n ragged=self.obj.ragged,\n batch_input_shape=self.obj._batch_input_shape, # pylint: disable=protected-access\n config=self.obj.get_config())\n\n def objects_to_serialize(self, serialization_cache):\n return {}\n\n def functions_to_serialize(self, serialization_cache):\n return {}\n\n\nclass RNNSavedModelSaver(LayerSavedModelSaver):\n \"\"\"RNN layer serialization.\"\"\"\n\n @property\n def object_identifier(self):\n return constants.RNN_LAYER_IDENTIFIER\n\n def _get_serialized_attributes_internal(self, serialization_cache):\n objects, functions = (\n super(RNNSavedModelSaver, self)._get_serialized_attributes_internal(\n serialization_cache))\n states = tf.__internal__.tracking.wrap(self.obj.states)\n # SaveModel require all the objects to be Trackable when saving.\n # If the states is still a tuple after wrap_or_unwrap, it means it doesn't\n # contain any trackable item within it, eg empty tuple or (None, None) for\n # stateless ConvLSTM2D. We convert them to list so that wrap_or_unwrap can\n # make it a Trackable again for saving. When loaded, ConvLSTM2D is\n # able to handle the tuple/list conversion.\n if isinstance(states, tuple):\n states = tf.__internal__.tracking.wrap(list(states))\n objects['states'] = states\n return objects, functions\n\n\nclass VocabularySavedModelSaver(LayerSavedModelSaver):\n \"\"\"Handles vocabulary layer serialization.\n\n This class is needed for StringLookup, IntegerLookup, and TextVectorization,\n which all have a vocabulary as part of the config. Currently, we keep this\n vocab as part of the config until saving, when we need to clear it to avoid\n initializing a StaticHashTable twice (once when restoring the config and once\n when restoring restoring module resources). After clearing the vocab, we\n presist a property to the layer indicating it was constructed with a vocab.\n \"\"\"\n\n @property\n def python_properties(self):\n # TODO(kathywu): Add python property validator\n metadata = self._python_properties_internal()\n # Clear the vocabulary from the config during saving.\n metadata['config']['vocabulary'] = None\n # Persist a property to track that a vocabulary was passed on construction.\n metadata['config']['has_input_vocabulary'] = self.obj._has_input_vocabulary # pylint: disable=protected-access\n return metadata\n"
] | [
[
"tensorflow.compat.v2.__internal__.tracking.wrap"
]
] |
Pengchengpcx/Neighbor-Sampling-GCN | [
"4b47385bdbfeb5957a56b05c441482e701dd10de"
] | [
"build/lib/pygcn/utils.py"
] | [
"import numpy as np\nimport scipy.sparse as sp\nimport torch\n\n\ndef encode_onehot(labels):\n classes = set(labels)\n classes_dict = {c: np.identity(len(classes))[i, :] for i, c in\n enumerate(classes)}\n labels_onehot = np.array(list(map(classes_dict.get, labels)),\n dtype=np.int32)\n return labels_onehot\n\n\ndef load_data(path=\"../data/cora/\", dataset=\"cora\"):\n \"\"\"Load citation network dataset (cora only for now)\"\"\"\n print('Loading {} dataset...'.format(dataset))\n\n idx_features_labels = np.genfromtxt(\"{}{}.content\".format(path, dataset),\n dtype=np.dtype(str))\n features = sp.csr_matrix(idx_features_labels[:, 1:-1], dtype=np.float32)\n labels = encode_onehot(idx_features_labels[:, -1])\n\n # build graph\n idx = np.array(idx_features_labels[:, 0], dtype=np.int32)\n idx_map = {j: i for i, j in enumerate(idx)}\n edges_unordered = np.genfromtxt(\"{}{}.cites\".format(path, dataset),\n dtype=np.int32)\n edges = np.array(list(map(idx_map.get, edges_unordered.flatten())),\n dtype=np.int32).reshape(edges_unordered.shape)\n adj = sp.coo_matrix((np.ones(edges.shape[0]), (edges[:, 0], edges[:, 1])),\n shape=(labels.shape[0], labels.shape[0]),\n dtype=np.float32)\n\n # build symmetric adjacency matrix\n adj = adj + adj.T.multiply(adj.T > adj) - adj.multiply(adj.T > adj)\n\n features = normalize(features)\n adj = normalize(adj + sp.eye(adj.shape[0]))\n\n idx_train = range(140)\n idx_val = range(200, 500)\n idx_test = range(500, 1500)\n\n features = torch.FloatTensor(np.array(features.todense()))\n labels = torch.LongTensor(np.where(labels)[1])\n adj = sparse_mx_to_torch_sparse_tensor(adj)\n\n idx_train = torch.LongTensor(idx_train)\n idx_val = torch.LongTensor(idx_val)\n idx_test = torch.LongTensor(idx_test)\n\n return adj, features, labels, idx_train, idx_val, idx_test\n\n\ndef normalize(mx):\n \"\"\"Row-normalize sparse matrix\"\"\"\n rowsum = np.array(mx.sum(1))\n r_inv = np.power(rowsum, -1).flatten()\n r_inv[np.isinf(r_inv)] = 0.\n r_mat_inv = sp.diags(r_inv)\n mx = r_mat_inv.dot(mx)\n return mx\n\n\ndef accuracy(output, labels):\n preds = output.max(1)[1].type_as(labels)\n correct = preds.eq(labels).double()\n correct = correct.sum()\n return correct / len(labels)\n\n\ndef sparse_mx_to_torch_sparse_tensor(sparse_mx):\n \"\"\"Convert a scipy sparse matrix to a torch sparse tensor.\"\"\"\n sparse_mx = sparse_mx.tocoo().astype(np.float32)\n indices = torch.from_numpy(\n np.vstack((sparse_mx.row, sparse_mx.col)).astype(np.int64))\n values = torch.from_numpy(sparse_mx.data)\n shape = torch.Size(sparse_mx.shape)\n return torch.sparse.FloatTensor(indices, values, shape)\n\ndef sub_graph(adj, num):\n '''\n Monte carlo sample a number of neighbors for each node given the adjacent matrix\n adj: normalized and processed graph adjacent matrix\n num: the number of samples for each neighbor\n '''\n nodes = adj.shape[0]\n neighbor_number = torch.sum(adj>0,dim=1).reshape(node,1)/num\n sub_graph = torch.randint(0,nodes, (nodes,num))\n sub_graph = sub_graph.reshape(-1).cpu().tolist()\n sub_graph = list(set(sub_graph))\n mask = torch.zeros(nodes,nodes)\n mask[sub_graph,sub_graph] = 1\n \n return adj*mask*neighbor_number\n\n \n\n"
] | [
[
"numpy.vstack",
"numpy.ones",
"torch.sum",
"torch.Size",
"torch.randint",
"numpy.dtype",
"numpy.isinf",
"scipy.sparse.csr_matrix",
"torch.zeros",
"scipy.sparse.diags",
"scipy.sparse.eye",
"torch.from_numpy",
"torch.sparse.FloatTensor",
"numpy.power",
"numpy.array",
"numpy.where",
"torch.LongTensor"
]
] |
EricLina/attn2d | [
"12c3a53887c985ae24199ecef2f7b2335fe214c6"
] | [
"examples/pervasive/modules/archive/expanding_resnet.py"
] | [
"# Copyright (c) 2017-present, Facebook, Inc.\n# All rights reserved.\n#\n# This source code is licensed under the license found in the LICENSE file in\n# the root directory of this source tree. An additional grant of patent rights\n# can be found in the PATENTS file in the same directory.\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n# import torch.utils.checkpoint as cp\n\nfrom fairseq.modules import (\n MaskedConvolution, MultiheadMaskedConvolution\n)\n\n\nclass ExpandingResNet(nn.Module):\n \"\"\" A network of residual convolutional layers\"\"\"\n\n def __init__(self, num_init_features, args):\n super().__init__()\n num_layers = args.num_layers\n num_features = num_init_features\n self.reduce_channels = Linear(num_features, num_features // args.divide_channels) if args.divide_channels > 1 else None\n num_features = num_features // args.divide_channels\n self.output_channels = num_features\n self.add_up_scale = 1 / (num_layers + 1)\n\n self.residual_blocks = nn.ModuleList([])\n for i in range(num_layers):\n kernel_size = 2 * (i + 1) + 1\n print('Layer ', i, kernel_size)\n self.residual_blocks.append(_ResLayer(num_features, kernel_size, args))\n \n def forward(self, x, \n encoder_mask=None,\n decoder_mask=None,\n incremental_state=None):\n \"\"\"\n Input : N, Tt, Ts, C\n Output : N, Tt, Ts, C\n \"\"\"\n if self.reduce_channels is not None:\n x = self.reduce_channels(x)\n add_up = self.add_up_scale * x\n for layer in self.residual_blocks:\n x = layer(x,\n encoder_mask=encoder_mask,\n decoder_mask=decoder_mask,\n incremental_state=incremental_state)\n add_up += self.add_up_scale * x\n return add_up\n\n\nclass _ResLayer(nn.Module):\n \"\"\" Single residual layer\n\n num_input_features - number of input channels to the layer\n kernel_size - size of masked convolution, k x (k // 2)\n drop_rate - dropout rate\n \"\"\"\n\n def __init__(self, num_features, kernel_size, args):\n super().__init__()\n self.drop_rate = args.convolution_dropout\n ffn_dim = args.ffn_dim\n mid_features = args.reduce_dim\n stride = args.conv_stride # source dimension stride\n dilsrc = args.source_dilation\n diltrg = args.target_dilation\n resolution = args.maintain_resolution\n if resolution:\n if not stride == 1:\n raise ValueError('Could not maintain the resolution with stride=%d' % stride)\n\n # choose the padding accordingly:\n padding_trg = diltrg * (kernel_size - 1) // 2\n padding_src = dilsrc * (kernel_size - 1) // 2\n padding = (padding_trg, padding_src)\n else:\n # must maintain the target resolution:\n padding = (diltrg * (kernel_size - 1) // 2, 0)\n\n # Reduce dim should be dividible by groups\n self.conv1 = nn.Conv2d(num_features,\n mid_features,\n kernel_size=1,\n stride=1,\n bias=False)\n\n self.mconv2 = MaskedConvolution(\n mid_features, num_features,\n kernel_size, args,\n padding=padding,\n )\n self.fc1 = Linear(num_features, ffn_dim)\n self.fc2 = Linear(ffn_dim, num_features)\n self.scale = 0.5 ** .5\n\n def forward(self, x, \n encoder_mask=None,\n decoder_mask=None,\n incremental_state=None):\n residual = x\n x = x.permute(0, 3, 1, 2)\n x = self.conv1(x)\n # x = F.relu(x)\n x = self.mconv2(x, incremental_state)\n if self.training:\n if encoder_mask is not None:\n x = x.masked_fill(encoder_mask.unsqueeze(1).unsqueeze(1), 0)\n if decoder_mask is not None:\n x = x.masked_fill(decoder_mask.unsqueeze(1).unsqueeze(-1), 0)\n\n if self.drop_rate:\n x = F.dropout(x, p=self.drop_rate, training=self.training)\n x = x.permute(0, 2, 3, 1)\n x = self.scale * (x + residual) # N, C, Tt, Ts\n # FFN:\n residual = x\n x = self.fc1(x)\n x = F.relu(x)\n x = self.fc2(x)\n if self.drop_rate:\n x = F.dropout(x, p=self.drop_rate, training=self.training)\n x = self.scale * (x + residual)\n return x\n\n\ndef Linear(in_features, out_features, bias=True):\n m = nn.Linear(in_features, out_features, bias)\n nn.init.xavier_uniform_(m.weight)\n if bias:\n nn.init.constant_(m.bias, 0.)\n return m\n\n"
] | [
[
"torch.nn.init.xavier_uniform_",
"torch.nn.Linear",
"torch.nn.init.constant_",
"torch.nn.functional.dropout",
"torch.nn.functional.relu",
"torch.nn.Conv2d",
"torch.nn.ModuleList"
]
] |
watsonjj/gammapy | [
"8d2498c8f63f73d1fbe4ba81ab02d9e72552df67",
"8d2498c8f63f73d1fbe4ba81ab02d9e72552df67"
] | [
"gammapy/utils/fitting/tests/test_iminuit.py",
"gammapy/catalog/tests/test_hess.py"
] | [
"# Licensed under a 3-clause BSD style license - see LICENSE.rst\nimport pytest\nfrom numpy.testing import assert_allclose\nfrom .. import Parameter, Parameters, optimize_iminuit\n\npytest.importorskip(\"iminuit\")\n\n\ndef fcn(parameters):\n x = parameters[\"x\"].value\n y = parameters[\"y\"].value\n z = parameters[\"z\"].value\n x_opt, y_opt, z_opt = 2, 3e5, 4e-5\n x_err, y_err, z_err = 0.2, 3e4, 4e-6\n return ((x - x_opt) / x_err) ** 2 + ((y - y_opt) / y_err) ** 2 + ((z - z_opt) / z_err) ** 2\n\n\n@pytest.fixture()\ndef pars():\n x = Parameter(\"x\", 2.1)\n y = Parameter(\"y\", 3.1, scale=1e5)\n z = Parameter(\"z\", 4.1, scale=1e-5)\n return Parameters([x, y, z])\n\n\ndef test_iminuit_basic(pars):\n factors, info, minuit = optimize_iminuit(function=fcn, parameters=pars)\n\n assert info[\"success\"]\n assert_allclose(fcn(pars), 0, atol=1e-5)\n\n # Check the result in parameters is OK\n assert_allclose(pars[\"x\"].value, 2, rtol=1e-3)\n assert_allclose(pars[\"y\"].value, 3e5, rtol=1e-3)\n # Precision of estimate on \"z\" is very poor (0.040488). Why is it so bad?\n assert_allclose(pars[\"z\"].value, 4e-5, rtol=2e-2)\n\n # Check that minuit sees the parameter factors correctly\n assert_allclose(factors, [2, 3, 4], rtol=1e-3)\n assert_allclose(minuit.values[\"par_000_x\"], 2, rtol=1e-3)\n assert_allclose(minuit.values[\"par_001_y\"], 3, rtol=1e-3)\n assert_allclose(minuit.values[\"par_002_z\"], 4, rtol=1e-3)\n\n\ndef test_iminuit_frozen(pars):\n pars[\"y\"].frozen = True\n\n factors, info, minuit = optimize_iminuit(function=fcn, parameters=pars)\n\n assert info[\"success\"]\n\n assert_allclose(pars[\"x\"].value, 2, rtol=1e-4)\n assert_allclose(pars[\"y\"].value, 3.1e5)\n assert_allclose(pars[\"z\"].value, 4.e-5, rtol=1e-4)\n assert_allclose(fcn(pars), 0.111112, rtol=1e-5)\n\n assert minuit.list_of_fixed_param() == [\"par_001_y\"]\n\n\ndef test_iminuit_limits(pars):\n pars[\"y\"].min = 301000\n\n factors, info, minuit = optimize_iminuit(function=fcn, parameters=pars)\n\n assert info[\"success\"]\n\n # Check the result in parameters is OK\n assert_allclose(pars[\"x\"].value, 2, rtol=1e-2)\n assert_allclose(pars[\"y\"].value, 301000, rtol=1e-3)\n\n # Check that minuit sees the limit factors correctly\n states = minuit.get_param_states()\n assert not states[0][\"has_limits\"]\n\n y = states[1]\n assert y[\"has_limits\"]\n assert_allclose(y[\"lower_limit\"], 3.01)\n\n # The next assert can be added when we no longer test on iminuit 1.2\n # See https://github.com/gammapy/gammapy/pull/1771\n # assert states[1][\"upper_limit\"] is None\n",
"# Licensed under a 3-clause BSD style license - see LICENSE.rst\nfrom collections import Counter\nimport pytest\nimport numpy as np\nfrom numpy.testing import assert_allclose\nfrom astropy import units as u\nfrom astropy.coordinates import SkyCoord, Angle\nfrom astropy.table import Table\nfrom ...utils.testing import assert_quantity_allclose\nfrom ...utils.testing import requires_data, requires_dependency\nfrom ...spectrum.models import PowerLaw, ExponentialCutoffPowerLaw\nfrom ..hess import SourceCatalogHGPS, SourceCatalogLargeScaleHGPS\n\n\n@pytest.fixture(scope=\"session\")\ndef cat():\n return SourceCatalogHGPS(\"$GAMMAPY_DATA/catalogs/hgps_catalog_v1.fits.gz\")\n\n\n@requires_data(\"gammapy-data\")\nclass TestSourceCatalogHGPS:\n @staticmethod\n def test_source_table(cat):\n assert cat.name == \"hgps\"\n assert len(cat.table) == 78\n\n @staticmethod\n def test_table_components(cat):\n assert len(cat.table_components) == 98\n\n @staticmethod\n def test_table_associations(cat):\n assert len(cat.table_associations) == 223\n\n @staticmethod\n def test_table_identifications(cat):\n assert len(cat.table_identifications) == 31\n\n @staticmethod\n def test_gaussian_component(cat):\n # Row index starts at 0, component numbers at 1\n # Thus we expect `HGPSC 084` at row 83\n c = cat.gaussian_component(83)\n assert c.name == \"HGPSC 084\"\n\n @staticmethod\n def test_large_scale_component(cat):\n assert isinstance(cat.large_scale_component, SourceCatalogLargeScaleHGPS)\n\n\n@requires_data(\"gammapy-data\")\nclass TestSourceCatalogObjectHGPS:\n @pytest.fixture(scope=\"class\")\n def source(self, cat):\n return cat[\"HESS J1843-033\"]\n\n @staticmethod\n @pytest.mark.slow\n def test_all_sources(cat):\n \"\"\"Check that properties and methods work for all sources,\n i.e. don't raise an error.\"\"\"\n for source in cat:\n str(source)\n source.energy_range\n source.spectral_model_type\n source.spectral_model()\n source.spatial_model_type\n source.is_pointlike\n source.sky_model()\n source.flux_points\n\n @staticmethod\n def test_basics(source):\n assert source.name == \"HESS J1843-033\"\n assert source.index == 64\n data = source.data\n assert data[\"Source_Class\"] == \"Unid\"\n assert \"SourceCatalogObjectHGPS\" in repr(source)\n\n ss = str(source)\n assert \"Source name : HESS J1843-033\" in ss\n assert \"Component HGPSC 083:\" in ss\n\n @staticmethod\n def test_str(cat):\n source = cat[\"HESS J1930+188\"]\n assert source.data[\"Spatial_Model\"] == \"Gaussian\"\n assert \"Spatial components : HGPSC 097\" in str(source)\n\n source = cat[\"HESS J1825-137\"]\n assert source.data[\"Spatial_Model\"] == \"3-Gaussian\"\n assert \"Spatial components : HGPSC 065, HGPSC 066, HGPSC 067\" in str(source)\n\n source = cat[\"HESS J1713-397\"]\n assert source.data[\"Spatial_Model\"] == \"Shell\"\n assert \"Source name : HESS J1713-397\" in str(source)\n\n @staticmethod\n def test_components(source):\n components = source.components\n assert len(components) == 2\n c = components[1]\n assert c.name == \"HGPSC 084\"\n\n @staticmethod\n def test_energy_range(source):\n energy_range = source.energy_range\n assert energy_range.unit == \"TeV\"\n assert_allclose(energy_range.value, [0.21544346, 61.89658356])\n\n @staticmethod\n def test_spectral_model_type(cat):\n spec_types = Counter([_.spectral_model_type for _ in cat])\n assert spec_types == {\"pl\": 66, \"ecpl\": 12}\n\n @staticmethod\n @requires_dependency(\"uncertainties\")\n def test_spectral_model_pl(cat):\n source = cat[\"HESS J1843-033\"]\n\n model = source.spectral_model()\n\n assert isinstance(model, PowerLaw)\n pars = model.parameters\n assert_allclose(pars[\"amplitude\"].value, 9.140179932365378e-13)\n assert_allclose(pars[\"index\"].value, 2.1513476371765137)\n assert_allclose(pars[\"reference\"].value, 1.867810606956482)\n\n val, err = model.integral_error(1 * u.TeV, 1e5 * u.TeV).value\n assert_allclose(val, source.data[\"Flux_Spec_Int_1TeV\"].value, rtol=0.01)\n assert_allclose(err, source.data[\"Flux_Spec_Int_1TeV_Err\"].value, rtol=0.01)\n\n @staticmethod\n @requires_dependency(\"uncertainties\")\n def test_spectral_model_ecpl(cat):\n source = cat[\"HESS J0835-455\"]\n\n model = source.spectral_model()\n assert isinstance(model, ExponentialCutoffPowerLaw)\n\n pars = model.parameters\n assert_allclose(pars[\"amplitude\"].value, 6.408420542586617e-12)\n assert_allclose(pars[\"index\"].value, 1.3543991614920847)\n assert_allclose(pars[\"reference\"].value, 1.696938754239)\n assert_allclose(pars[\"lambda_\"].value, 0.081517637)\n\n val, err = model.integral_error(1 * u.TeV, 1e5 * u.TeV).value\n assert_allclose(val, source.data[\"Flux_Spec_Int_1TeV\"].value, rtol=0.01)\n assert_allclose(err, source.data[\"Flux_Spec_Int_1TeV_Err\"].value, rtol=0.01)\n\n model = source.spectral_model(\"pl\")\n assert isinstance(model, PowerLaw)\n\n pars = model.parameters\n assert_allclose(pars[\"amplitude\"].value, 1.833056926733856e-12)\n assert_allclose(pars[\"index\"].value, 1.8913707)\n assert_allclose(pars[\"reference\"].value, 3.0176312923431396)\n\n val, err = model.integral_error(1 * u.TeV, 1e5 * u.TeV).value\n assert_allclose(val, source.data[\"Flux_Spec_PL_Int_1TeV\"].value, rtol=0.01)\n assert_allclose(err, source.data[\"Flux_Spec_PL_Int_1TeV_Err\"].value, rtol=0.01)\n\n @staticmethod\n def test_spatial_model_type(cat):\n morph_types = Counter([_.spatial_model_type for _ in cat])\n assert morph_types == {\n \"gaussian\": 52,\n \"2-gaussian\": 8,\n \"shell\": 7,\n \"point-like\": 6,\n \"3-gaussian\": 5,\n }\n\n @staticmethod\n def test_sky_model_point(cat):\n model = cat[\"HESS J1826-148\"].sky_model()\n p = model.parameters\n assert_allclose(p[\"amplitude\"].value, 9.815771242691063e-13)\n assert_allclose(p[\"lon_0\"].value, 16.882482528686523)\n assert_allclose(p[\"lat_0\"].value, -1.2889292240142822)\n\n @staticmethod\n def test_sky_model_gaussian(cat):\n model = cat[\"HESS J1119-614\"].sky_model()\n p = model.parameters\n assert_allclose(p[\"amplitude\"].value, 7.959899015960725e-13)\n assert_allclose(p[\"lon_0\"].value, 292.1280822753906)\n assert_allclose(p[\"lat_0\"].value, -0.5332353711128235)\n assert_allclose(p[\"sigma\"].value, 0.09785966575145721)\n\n @staticmethod\n def test_sky_model_gaussian2(cat):\n model = cat[\"HESS J1843-033\"].sky_model()\n\n p = model.skymodels[0].parameters\n assert_allclose(p[\"amplitude\"].value, 4.259815e-13, rtol=1e-5)\n assert_allclose(p[\"lon_0\"].value, 29.047216415405273)\n assert_allclose(p[\"lat_0\"].value, 0.24389676749706268)\n assert_allclose(p[\"sigma\"].value, 0.12499100714921951)\n\n p = model.skymodels[1].parameters\n assert_allclose(p[\"amplitude\"].value, 4.880365e-13, rtol=1e-5)\n assert_allclose(p[\"lon_0\"].value, 28.77037811279297)\n assert_allclose(p[\"lat_0\"].value, -0.0727819949388504)\n assert_allclose(p[\"sigma\"].value, 0.2294706553220749)\n\n @staticmethod\n def test_sky_model_gaussian3(cat):\n model = cat[\"HESS J1825-137\"].sky_model()\n\n p = model.skymodels[0].parameters\n assert_allclose(p[\"amplitude\"].value, 1.8952104218765842e-11)\n assert_allclose(p[\"lon_0\"].value, 16.988601684570312)\n assert_allclose(p[\"lat_0\"].value, -0.4913068115711212)\n assert_allclose(p[\"sigma\"].value, 0.47650089859962463)\n\n p = model.skymodels[1].parameters\n assert_allclose(p[\"amplitude\"].value, 4.4639763971527836e-11)\n assert_allclose(p[\"lon_0\"].value, 17.71169090270996)\n assert_allclose(p[\"lat_0\"].value, -0.6598004102706909)\n assert_allclose(p[\"sigma\"].value, 0.3910967707633972)\n\n p = model.skymodels[2].parameters\n assert_allclose(p[\"amplitude\"].value, 5.870712920658374e-12)\n assert_allclose(p[\"lon_0\"].value, 17.840524673461914)\n assert_allclose(p[\"lat_0\"].value, -0.7057178020477295)\n assert_allclose(p[\"sigma\"].value, 0.10932201147079468)\n\n @staticmethod\n def test_sky_model_gaussian_extern(cat):\n # special test for the only extern source with a gaussian morphology\n model = cat[\"HESS J1801-233\"].sky_model()\n p = model.parameters\n assert_allclose(p[\"amplitude\"].value, 7.499999970031479e-13)\n assert_allclose(p[\"lon_0\"].value, 6.656888961791992)\n assert_allclose(p[\"lat_0\"].value, -0.267688125371933)\n assert_allclose(p[\"sigma\"].value, 0.17)\n\n @staticmethod\n def test_sky_model_shell(cat):\n model = cat[\"Vela Junior\"].sky_model()\n p = model.parameters\n assert_allclose(p[\"amplitude\"].value, 3.2163001428830995e-11)\n assert_allclose(p[\"lon_0\"].value, 266.2873840332031)\n assert_allclose(p[\"lat_0\"].value, -1.243260383605957)\n assert_allclose(p[\"radius\"].value, 0.95)\n assert_allclose(p[\"width\"].value, 0.05)\n\n\n@requires_data(\"gammapy-data\")\nclass TestSourceCatalogObjectHGPSComponent:\n @pytest.fixture(scope=\"class\")\n def component(self, cat):\n return cat.gaussian_component(83)\n\n @staticmethod\n def test_repr(component):\n assert \"SourceCatalogObjectHGPSComponent\" in repr(component)\n\n @staticmethod\n def test_str(component):\n assert \"Component HGPSC 084\" in str(component)\n\n @staticmethod\n def test_name(component):\n assert component.name == \"HGPSC 084\"\n\n @staticmethod\n def test_index(component):\n assert component.index == 83\n\n @staticmethod\n def test_spatial_model(component):\n model = component.spatial_model\n p = model.parameters\n assert_allclose(p[\"lon_0\"].value, 28.77037811279297)\n assert_allclose(p.error(\"lon_0\"), 0.058748625218868256)\n assert_allclose(p[\"lat_0\"].value, -0.0727819949388504)\n assert_allclose(p.error(\"lat_0\"), 0.06880396604537964)\n assert_allclose(p[\"sigma\"].value, 0.2294706553220749)\n assert_allclose(p.error(\"sigma\"), 0.04618723690509796)\n\n\nclass TestSourceCatalogLargeScaleHGPS:\n def setup(self):\n table = Table()\n table[\"GLON\"] = [-30, -10, 10, 20] * u.deg\n table[\"Surface_Brightness\"] = [0, 1, 10, 0] * u.Unit(\"cm-2 s-1 sr-1\")\n table[\"GLAT\"] = [-1, 0, 1, 0] * u.deg\n table[\"Width\"] = [0.4, 0.5, 0.3, 1.0] * u.deg\n self.table = table\n self.model = SourceCatalogLargeScaleHGPS(table)\n\n def test_evaluate(self):\n x = np.linspace(-100, 20, 5)\n y = np.linspace(-2, 2, 7)\n x, y = np.meshgrid(x, y)\n coords = SkyCoord(x, y, unit=\"deg\", frame=\"galactic\")\n image = self.model.evaluate(coords)\n desired = 1.223962643740966 * u.Unit(\"cm-2 s-1 sr-1\")\n assert_quantity_allclose(image.sum(), desired)\n\n def test_parvals(self):\n glon = Angle(10, unit=\"deg\")\n assert_quantity_allclose(\n self.model.peak_brightness(glon), 10 * u.Unit(\"cm-2 s-1 sr-1\")\n )\n assert_quantity_allclose(self.model.peak_latitude(glon), 1 * u.deg)\n assert_quantity_allclose(self.model.width(glon), 0.3 * u.deg)\n"
] | [
[
"numpy.testing.assert_allclose"
],
[
"numpy.meshgrid",
"numpy.linspace",
"numpy.testing.assert_allclose"
]
] |
mbilos/stribor | [
"76082c255653d6bd8d506519223183e5d8395578"
] | [
"stribor/flow.py"
] | [
"import torch\r\nimport torch.nn as nn\r\nimport torch.nn.functional as F\r\nimport torch.distributions as td\r\n\r\nclass Flow(nn.Module):\r\n \"\"\"\r\n Building both normalizing flows and neural flows.\r\n\r\n Example:\r\n >>> import stribor as st\r\n >>> torch.manual_seed(123)\r\n >>> dim = 2\r\n >>> flow = st.Flow(st.UnitNormal(dim), [st.Affine(dim)])\r\n >>> x = torch.rand(1, dim)\r\n >>> y, ljd = flow(x)\r\n >>> y_inv, ljd_inv = flow.inverse(y)\r\n\r\n Args:\r\n base_dist (Type[torch.distributions]): Base distribution\r\n transforms (List[st.flows]): List of invertible transformations\r\n \"\"\"\r\n def __init__(self, base_dist=None, transforms=[]):\r\n super().__init__()\r\n self.base_dist = base_dist\r\n self.transforms = nn.ModuleList(transforms)\r\n\r\n def forward(self, x, latent=None, mask=None, t=None, reverse=False, **kwargs):\r\n \"\"\"\r\n Args:\r\n x (tensor): Input sampled from base density with shape (..., dim)\r\n latent (tensor, optional): Conditional vector with shape (..., latent_dim)\r\n Default: None\r\n mask (tensor): Masking tensor with shape (..., 1)\r\n Default: None\r\n t (tensor, optional): Flow time end point. Default: None\r\n reverse (bool, optional): Whether to perform an inverse. Default: False\r\n\r\n Returns:\r\n y (tensor): Output that follows target density (..., dim)\r\n log_jac_diag (tensor): Log-Jacobian diagonal (..., dim)\r\n \"\"\"\r\n transforms = self.transforms[::-1] if reverse else self.transforms\r\n _mask = 1 if mask is None else mask\r\n\r\n log_jac_diag = torch.zeros_like(x).to(x)\r\n for f in transforms:\r\n if reverse:\r\n x, ld = f.inverse(x * _mask, latent=latent, mask=mask, t=t, **kwargs)\r\n else:\r\n x, ld = f.forward(x * _mask, latent=latent, mask=mask, t=t, **kwargs)\r\n log_jac_diag += ld * _mask\r\n return x, log_jac_diag\r\n\r\n def inverse(self, y, latent=None, mask=None, t=None, **kwargs):\r\n \"\"\" Inverse of forward function with the same arguments. \"\"\"\r\n return self.forward(y, latent=latent, mask=mask, t=t, reverse=True, **kwargs)\r\n\r\n def log_prob(self, x, **kwargs):\r\n \"\"\"\r\n Calculates log-probability of a sample.\r\n\r\n Args:\r\n x (tensor): Input with shape (..., dim)\r\n\r\n Returns:\r\n log_prob (tensor): Log-probability of the input with shape (..., 1)\r\n \"\"\"\r\n if self.base_dist is None:\r\n raise ValueError('Please define `base_dist` if you need log-probability')\r\n x, log_jac_diag = self.inverse(x, **kwargs)\r\n log_prob = self.base_dist.log_prob(x) + log_jac_diag.sum(-1)\r\n return log_prob.unsqueeze(-1)\r\n\r\n def sample(self, num_samples, latent=None, mask=None, **kwargs):\r\n \"\"\"\r\n Transforms samples from the base to the target distribution.\r\n Uses reparametrization trick.\r\n\r\n Args:\r\n num_samples (tuple or int): Shape of samples\r\n latent (tensor): Latent conditioning vector with shape (..., latent_dim)\r\n\r\n Returns:\r\n x (tensor): Samples from target distribution with shape (*num_samples, dim)\r\n \"\"\"\r\n if self.base_dist is None:\r\n raise ValueError('Please define `base_dist` if you need sampling')\r\n if isinstance(num_samples, int):\r\n num_samples = (num_samples,)\r\n\r\n x = self.base_dist.rsample(num_samples)\r\n x, log_jac_diag = self.forward(x, **kwargs)\r\n return x\r\n"
] | [
[
"torch.zeros_like",
"torch.nn.ModuleList"
]
] |
xiaodashuaiya/fairseq | [
"9e3850bd87f4da751671d503406115730b99ea8a"
] | [
"fairseq/utils.py"
] | [
"# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport contextlib\nimport copy\nimport importlib.util\nimport logging\nimport math\nimport os\nimport sys\nimport warnings\nfrom collections import defaultdict\nfrom itertools import accumulate\nfrom typing import Callable, Dict, List, Optional\n\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\nfrom fairseq.logging.meters import safe_round\nfrom fairseq.modules import gelu, gelu_accurate, sin, swish\nfrom fairseq.modules.multihead_attention import MultiheadAttention\nfrom torch import Tensor\n\ntry:\n from amp_C import multi_tensor_l2norm\n multi_tensor_l2norm_available = True\nexcept ImportError:\n multi_tensor_l2norm_available = False\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef split_paths(paths: str) -> List[str]:\n return paths.split(os.pathsep) if \"://\" not in paths else paths.split(\"|\")\n\n\ndef load_ensemble_for_inference(filenames, task, model_arg_overrides=None):\n from fairseq import checkpoint_utils\n\n deprecation_warning(\n \"utils.load_ensemble_for_inference is deprecated. \"\n \"Please use checkpoint_utils.load_model_ensemble instead.\"\n )\n return checkpoint_utils.load_model_ensemble(\n filenames, arg_overrides=model_arg_overrides, task=task\n )\n\n\ndef apply_to_sample(f, sample):\n if hasattr(sample, '__len__') and len(sample) == 0:\n return {}\n\n def _apply(x):\n if torch.is_tensor(x):\n return f(x)\n elif isinstance(x, dict):\n return {key: _apply(value) for key, value in x.items()}\n elif isinstance(x, list):\n return [_apply(x) for x in x]\n elif isinstance(x, tuple):\n return tuple(_apply(x) for x in x)\n elif isinstance(x, set):\n return {_apply(x) for x in x}\n else:\n return x\n\n return _apply(sample)\n\n\ndef move_to_cuda(sample):\n def _move_to_cuda(tensor):\n return tensor.cuda()\n\n return apply_to_sample(_move_to_cuda, sample)\n\n\ndef move_to_cpu(sample):\n def _move_to_cpu(tensor):\n # PyTorch has poor support for half tensors (float16) on CPU.\n # Move any such tensors to float32.\n if tensor.dtype in {torch.bfloat16, torch.float16}:\n tensor = tensor.to(dtype=torch.float32)\n return tensor.cpu()\n\n return apply_to_sample(_move_to_cpu, sample)\n\n\ndef get_incremental_state(\n module: MultiheadAttention,\n incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]],\n key: str,\n) -> Optional[Dict[str, Optional[Tensor]]]:\n \"\"\"Helper for getting incremental state for an nn.Module.\"\"\"\n return module.get_incremental_state(incremental_state, key)\n\n\ndef set_incremental_state(\n module: MultiheadAttention,\n incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]],\n key: str,\n value: Dict[str, Optional[Tensor]],\n) -> Optional[Dict[str, Dict[str, Optional[Tensor]]]]:\n \"\"\"Helper for setting incremental state for an nn.Module.\"\"\"\n if incremental_state is not None:\n result = module.set_incremental_state(incremental_state, key, value)\n if result is not None:\n incremental_state = result\n return incremental_state\n\n\ndef load_align_dict(replace_unk):\n if replace_unk is None:\n align_dict = None\n elif isinstance(replace_unk, str) and len(replace_unk) > 0:\n # Load alignment dictionary for unknown word replacement if it was passed as an argument.\n align_dict = {}\n with open(replace_unk, \"r\") as f:\n for line in f:\n cols = line.split()\n align_dict[cols[0]] = cols[1]\n else:\n # No alignment dictionary provided but we still want to perform unknown word replacement by copying the\n # original source word.\n align_dict = {}\n return align_dict\n\n\ndef print_embed_overlap(embed_dict, vocab_dict):\n embed_keys = set(embed_dict.keys())\n vocab_keys = set(vocab_dict.symbols)\n overlap = len(embed_keys & vocab_keys)\n logger.info(\"found {}/{} types in embedding file\".format(overlap, len(vocab_dict)))\n\n\ndef parse_embedding(embed_path):\n \"\"\"Parse embedding text file into a dictionary of word and embedding tensors.\n\n The first line can have vocabulary size and dimension. The following lines\n should contain word and embedding separated by spaces.\n\n Example:\n 2 5\n the -0.0230 -0.0264 0.0287 0.0171 0.1403\n at -0.0395 -0.1286 0.0275 0.0254 -0.0932\n \"\"\"\n embed_dict = {}\n with open(embed_path) as f_embed:\n next(f_embed) # skip header\n for line in f_embed:\n pieces = line.rstrip().split(\" \")\n embed_dict[pieces[0]] = torch.Tensor(\n [float(weight) for weight in pieces[1:]]\n )\n return embed_dict\n\n\ndef load_embedding(embed_dict, vocab, embedding):\n for idx in range(len(vocab)):\n token = vocab[idx]\n if token in embed_dict:\n embedding.weight.data[idx] = embed_dict[token]\n return embedding\n\n\ndef replace_unk(hypo_str, src_str, alignment, align_dict, unk):\n from fairseq import tokenizer\n\n # Tokens are strings here\n hypo_tokens = tokenizer.tokenize_line(hypo_str)\n # TODO: Very rare cases where the replacement is '<eos>' should be handled gracefully\n src_tokens = tokenizer.tokenize_line(src_str) + [\"<eos>\"]\n for i, ht in enumerate(hypo_tokens):\n if ht == unk:\n src_token = src_tokens[alignment[i]]\n # Either take the corresponding value in the aligned dictionary or just copy the original value.\n hypo_tokens[i] = align_dict.get(src_token, src_token)\n return \" \".join(hypo_tokens)\n\n\ndef post_process_prediction(\n hypo_tokens, src_str, alignment, align_dict, tgt_dict, remove_bpe=None, extra_symbols_to_ignore=None\n):\n hypo_str = tgt_dict.string(hypo_tokens, remove_bpe, extra_symbols_to_ignore=extra_symbols_to_ignore)\n if align_dict is not None:\n hypo_str = replace_unk(\n hypo_str, src_str, alignment, align_dict, tgt_dict.unk_string()\n )\n if align_dict is not None or remove_bpe is not None:\n # Convert back to tokens for evaluating with unk replacement or without BPE\n # Note that the dictionary can be modified inside the method.\n hypo_tokens = tgt_dict.encode_line(hypo_str, add_if_not_exist=True)\n return hypo_tokens, hypo_str, alignment\n\n\ndef make_positions(tensor, padding_idx: int, onnx_trace: bool = False):\n \"\"\"Replace non-padding symbols with their position numbers.\n\n Position numbers begin at padding_idx+1. Padding symbols are ignored.\n \"\"\"\n # The series of casts and type-conversions here are carefully\n # balanced to both work with ONNX export and XLA. In particular XLA\n # prefers ints, cumsum defaults to output longs, and ONNX doesn't know\n # how to handle the dtype kwarg in cumsum.\n mask = tensor.ne(padding_idx).int()\n return (torch.cumsum(mask, dim=1).type_as(mask) * mask).long() + padding_idx\n\n\ndef strip_pad(tensor, pad):\n return tensor[tensor.ne(pad)]\n\n\ndef buffered_arange(max):\n if not hasattr(buffered_arange, \"buf\"):\n buffered_arange.buf = torch.LongTensor()\n if max > buffered_arange.buf.numel():\n buffered_arange.buf.resize_(max)\n torch.arange(max, out=buffered_arange.buf)\n return buffered_arange.buf[:max]\n\n\ndef convert_padding_direction(\n src_tokens, padding_idx, right_to_left: bool = False, left_to_right: bool = False\n):\n assert right_to_left ^ left_to_right\n pad_mask = src_tokens.eq(padding_idx)\n if not pad_mask.any():\n # no padding, return early\n return src_tokens\n if left_to_right and not pad_mask[:, 0].any():\n # already right padded\n return src_tokens\n if right_to_left and not pad_mask[:, -1].any():\n # already left padded\n return src_tokens\n max_len = src_tokens.size(1)\n buffered = torch.empty(0).long()\n if max_len > 0:\n torch.arange(max_len, out=buffered)\n range = buffered.type_as(src_tokens).expand_as(src_tokens)\n num_pads = pad_mask.long().sum(dim=1, keepdim=True)\n if right_to_left:\n index = torch.remainder(range - num_pads, max_len)\n else:\n index = torch.remainder(range + num_pads, max_len)\n return src_tokens.gather(1, index)\n\n\ndef item(tensor):\n if hasattr(tensor, \"item\"):\n return tensor.item()\n if hasattr(tensor, \"__getitem__\"):\n return tensor[0]\n return tensor\n\n\ndef multi_tensor_total_norm(grads, chunk_size=2048*32) -> torch.Tensor:\n per_device_grads = {}\n norms = []\n for grad in grads:\n device = grad.device\n cur_device_grads = per_device_grads.get(device)\n if cur_device_grads is None:\n cur_device_grads = []\n per_device_grads[device] = cur_device_grads\n cur_device_grads.append(grad)\n for device in per_device_grads.keys():\n cur_device_grads = per_device_grads[device]\n if device.type == \"cuda\":\n # TODO(msb) return has_inf\n has_inf = torch.zeros((1, 1), dtype=torch.int, device=device)\n with torch.cuda.device(device):\n norm = multi_tensor_l2norm(chunk_size, has_inf, [cur_device_grads], False)\n norms.append(norm[0])\n else:\n norms += [torch.norm(g, p=2, dtype=torch.float32) for g in cur_device_grads]\n total_norm = torch.norm(torch.stack(norms))\n return total_norm\n\n\ndef clip_grad_norm_(params, max_norm, aggregate_norm_fn=None) -> torch.Tensor:\n if isinstance(params, torch.Tensor):\n params = [params]\n params = list(params)\n grads = [p.grad.detach() for p in filter(lambda p: p.grad is not None, params)]\n if len(grads) == 0:\n if len(params) > 0:\n return params[0].new_tensor(0.)\n else:\n return torch.tensor(0.)\n\n if len(grads) == 1:\n total_norm = torch.norm(grads[0], p=2, dtype=torch.float32)\n else:\n if multi_tensor_l2norm_available:\n total_norm = multi_tensor_total_norm(grads)\n else:\n warnings.warn(\n \"amp_C fused kernels unavailable, disabling multi_tensor_l2norm; \"\n \"you may get better performance by installing NVIDIA's apex library\"\n )\n total_norm = torch.norm(\n torch.stack([torch.norm(g, p=2, dtype=torch.float32) for g in grads])\n )\n\n if aggregate_norm_fn is not None:\n total_norm = aggregate_norm_fn(total_norm)\n\n if max_norm > 0:\n max_norm = float(max_norm)\n clip_coef = (max_norm / (total_norm + 1e-6)).clamp_(max=1)\n for g in grads:\n g.mul_(clip_coef)\n return total_norm\n\n\ndef fill_with_neg_inf(t):\n \"\"\"FP16-compatible function that fills a tensor with -inf.\"\"\"\n return t.float().fill_(float(\"-inf\")).type_as(t)\n\n\ndef _match_types(arg1, arg2):\n \"\"\"Convert the numerical argument to the same type as the other argument\"\"\"\n\n def upgrade(arg_number, arg_structure):\n if isinstance(arg_structure, tuple):\n return tuple([arg_number] * len(arg_structure))\n elif isinstance(arg_structure, dict):\n arg = copy.deepcopy(arg_structure)\n for k in arg:\n arg[k] = upgrade(arg_number, arg_structure[k])\n return arg\n else:\n return arg_number\n\n if isinstance(arg1, float) or isinstance(arg1, int):\n return upgrade(arg1, arg2), arg2\n elif isinstance(arg2, float) or isinstance(arg2, int):\n return arg1, upgrade(arg2, arg1)\n\n return arg1, arg2\n\n\ndef resolve_max_positions(*args):\n \"\"\"Resolve max position constraints from multiple sources.\"\"\"\n\n def map_value_update(d1, d2):\n updated_value = copy.deepcopy(d1)\n for key in d2:\n if key not in updated_value:\n updated_value[key] = d2[key]\n else:\n updated_value[key] = min(d1[key], d2[key])\n return updated_value\n\n def nullsafe_min(l):\n minim = None\n for item in l:\n if minim is None:\n minim = item\n elif item is not None and item < minim:\n minim = item\n return minim\n\n max_positions = None\n for arg in args:\n if max_positions is None:\n max_positions = arg\n elif arg is not None:\n max_positions, arg = _match_types(max_positions, arg)\n if isinstance(arg, float) or isinstance(arg, int):\n max_positions = min(max_positions, arg)\n elif isinstance(arg, dict):\n max_positions = map_value_update(max_positions, arg)\n else:\n max_positions = tuple(map(nullsafe_min, zip(max_positions, arg)))\n\n return max_positions\n\n\ndef import_user_module(args):\n module_path = getattr(args, \"user_dir\", None)\n if module_path is not None:\n module_path = os.path.abspath(args.user_dir)\n if not os.path.exists(module_path):\n fairseq_rel_path = os.path.join(\n os.path.dirname(__file__), \"..\", args.user_dir\n )\n if os.path.exists(fairseq_rel_path):\n module_path = fairseq_rel_path\n module_parent, module_name = os.path.split(module_path)\n\n if module_name not in sys.modules:\n sys.path.insert(0, module_parent)\n importlib.import_module(module_name)\n\n\ndef softmax(x, dim: int, onnx_trace: bool = False):\n if onnx_trace:\n return F.softmax(x.float(), dim=dim)\n else:\n return F.softmax(x, dim=dim, dtype=torch.float32)\n\n\ndef log_softmax(x, dim: int, onnx_trace: bool = False):\n if onnx_trace:\n return F.log_softmax(x.float(), dim=dim)\n else:\n return F.log_softmax(x, dim=dim, dtype=torch.float32)\n\n\ndef get_perplexity(loss, round=2, base=2):\n if loss is None:\n return 0.\n try:\n return safe_round(base ** loss, round)\n except OverflowError:\n return float('inf')\n\n\ndef deprecation_warning(message, stacklevel=3):\n # don't use DeprecationWarning, since it's ignored by default\n warnings.warn(message, stacklevel=stacklevel)\n\n\ndef get_activation_fn(activation: str) -> Callable:\n \"\"\" Returns the activation function corresponding to `activation` \"\"\"\n if activation == \"relu\":\n return F.relu\n elif activation == \"gelu\":\n return gelu\n elif activation == \"gelu_fast\":\n deprecation_warning(\n \"--activation-fn=gelu_fast has been renamed to gelu_accurate\"\n )\n return gelu_accurate\n elif activation == \"gelu_accurate\":\n return gelu_accurate\n elif activation == 'sin':\n return sin\n elif activation == 'swish':\n return swish\n elif activation == \"tanh\":\n return torch.tanh\n elif activation == \"linear\":\n return lambda x: x\n else:\n raise RuntimeError(\"--activation-fn {} not supported\".format(activation))\n\n\ndef get_available_activation_fns() -> List:\n return [\n \"relu\",\n \"gelu\",\n \"gelu_fast\", # deprecated\n \"gelu_accurate\",\n \"sin\",\n \"swish\",\n \"tanh\",\n \"linear\",\n ]\n\n\n@contextlib.contextmanager\ndef eval(model):\n is_training = model.training\n model.eval()\n yield\n model.train(is_training)\n\n\ndef has_parameters(module):\n try:\n next(module.parameters())\n return True\n except StopIteration:\n return False\n\n\ndef set_torch_seed(seed):\n # Set seed based on args.seed and the update number so that we get\n # reproducible results when resuming from checkpoints\n assert isinstance(seed, int)\n torch.manual_seed(seed)\n torch.cuda.manual_seed(seed)\n\n\n@contextlib.contextmanager\ndef with_torch_seed(seed):\n assert isinstance(seed, int)\n rng_state = torch.get_rng_state()\n cuda_rng_state = torch.cuda.get_rng_state()\n set_torch_seed(seed)\n yield\n torch.set_rng_state(rng_state)\n torch.cuda.set_rng_state(cuda_rng_state)\n\n\ndef parse_alignment(line):\n \"\"\"\n Parses a single line from the alingment file.\n\n Args:\n line (str): String containing the alignment of the format:\n <src_idx_1>-<tgt_idx_1> <src_idx_2>-<tgt_idx_2> ..\n <src_idx_m>-<tgt_idx_m>. All indices are 0 indexed.\n\n Returns:\n torch.IntTensor: packed alignments of shape (2 * m).\n \"\"\"\n alignments = line.strip().split()\n parsed_alignment = torch.IntTensor(2 * len(alignments))\n for idx, alignment in enumerate(alignments):\n src_idx, tgt_idx = alignment.split(\"-\")\n parsed_alignment[2 * idx] = int(src_idx)\n parsed_alignment[2 * idx + 1] = int(tgt_idx)\n return parsed_alignment\n\n\ndef get_token_to_word_mapping(tokens, exclude_list):\n n = len(tokens)\n word_start = [int(token not in exclude_list) for token in tokens]\n word_idx = list(accumulate(word_start))\n token_to_word = {i: word_idx[i] for i in range(n)}\n return token_to_word\n\n\ndef extract_hard_alignment(attn, src_sent, tgt_sent, pad, eos):\n tgt_valid = ((tgt_sent != pad) & (tgt_sent != eos)).nonzero().squeeze(dim=-1)\n src_invalid = ((src_sent == pad) | (src_sent == eos)).nonzero().squeeze(dim=-1)\n src_token_to_word = get_token_to_word_mapping(src_sent, [eos, pad])\n tgt_token_to_word = get_token_to_word_mapping(tgt_sent, [eos, pad])\n alignment = []\n if len(tgt_valid) != 0 and len(src_invalid) < len(src_sent):\n attn_valid = attn[tgt_valid]\n attn_valid[:, src_invalid] = float(\"-inf\")\n _, src_indices = attn_valid.max(dim=1)\n for tgt_idx, src_idx in zip(tgt_valid, src_indices):\n alignment.append(\n (\n src_token_to_word[src_idx.item()] - 1,\n tgt_token_to_word[tgt_idx.item()] - 1,\n )\n )\n return alignment\n\n\ndef new_arange(x, *size):\n \"\"\"\n Return a Tensor of `size` filled with a range function on the device of x.\n If size is empty, using the size of the variable x.\n \"\"\"\n if len(size) == 0:\n size = x.size()\n return torch.arange(size[-1], device=x.device).expand(*size).contiguous()\n\n\ndef get_tpu_device(args):\n import torch_xla.core.xla_model as xm\n return xm.xla_device()\n\n\ndef logging_multiple_line_messages(msg):\n msg_arr = msg.split(\"\\n\")\n for line in msg_arr:\n logger.info(line)\n\n\nclass CudaEnvironment(object):\n def __init__(self):\n cur_device = torch.cuda.current_device()\n prop = torch.cuda.get_device_properties(\"cuda:{}\".format(cur_device))\n self.name = prop.name\n self.major = prop.major\n self.minor = prop.minor\n self.total_memory_in_GB = prop.total_memory / 1024 / 1024 / 1024\n\n @staticmethod\n def pretty_print_cuda_env_list(cuda_env_list):\n \"\"\"\n Given a list of CudaEnviorments, pretty print them\n \"\"\"\n num_workers = len(cuda_env_list)\n center = \"CUDA enviroments for all {} workers\".format(num_workers)\n banner_len = 40 - len(center) // 2\n first_line = \"*\" * banner_len + center + \"*\" * banner_len\n msg_arr = [first_line]\n for r, env in enumerate(cuda_env_list):\n msg_arr.append(\n \"rank {:3d}: \".format(r)\n + \"capabilities = {:2d}.{:<2d} ; \".format(env.major, env.minor)\n + \"total memory = {:.3f} GB ; \".format(env.total_memory_in_GB)\n + \"name = {:40s}\".format(env.name)\n )\n msg_arr.append(first_line)\n logging_multiple_line_messages(\"\\n\".join(msg_arr))\n"
] | [
[
"torch.empty",
"torch.stack",
"torch.cuda.manual_seed",
"torch.cuda.get_rng_state",
"torch.nn.functional.softmax",
"torch.cumsum",
"torch.cuda.set_rng_state",
"torch.norm",
"torch.arange",
"torch.set_rng_state",
"torch.manual_seed",
"torch.tensor",
"torch.cuda.current_device",
"torch.cuda.device",
"torch.nn.functional.log_softmax",
"torch.get_rng_state",
"torch.is_tensor",
"torch.remainder",
"torch.zeros",
"torch.LongTensor"
]
] |
MaryZolfaghar/WCSLS | [
"fcb3bfd11c19bb90690ec772f91bbd107832d636"
] | [
"utils/analyze.py"
] | [
"from numpy.core.fromnumeric import reshape\nimport torch \nimport numpy as np\nimport pickle\nfrom itertools import combinations, permutations\nfrom sklearn.decomposition import PCA\nfrom sklearn.manifold import MDS, TSNE\nfrom scipy.stats import pearsonr, ttest_ind\nimport statsmodels.api as sm\nfrom dataset import get_loaders, WineGrid\n\ndef analyze_episodic(model, test_data, args):\n # Collect attention weights for each sample in test set\n model.eval()\n m, x_ = test_data[0] # only 1 episode in test data\n m = m.to(args.device) # m: [1, n_train, sample_dim]\n x = x_[:,:,:-1].to(args.device) # x: [1, n_test, sample_dim]\n y = x_[:,:,-1].type(torch.long).to(args.device)\n y = y.squeeze() # y: [1, n_test]\n with torch.no_grad():\n y_hat, attention = model(x, m) \n attention = attention[0] # first (only) memory layer\n attention = np.squeeze(attention)\n # attention: [n_train, n_test]\n \n # Check the retrieval weights of relevant vs. irrelevant training samples\n grid = test_data.grid\n train = grid.train # train *samples* in test *episode*\n test = grid.test # test *samples* in test *episode*\n n_train = len(train)\n n_test = len(test)\n rel_ids = grid.hub_sample_ids # relevant memory ids (train samples)\n attn_ranks = np.zeros_like(attention)\n for i in range(n_test):\n argsorted_attn = np.argsort(attention[i])\n ranks = np.zeros([n_train])\n ranks[argsorted_attn] = np.arange(n_train)\n attn_ranks[i] = ranks\n relevant = []\n irrelevant = []\n for i in range(n_test):\n for j in range(n_train):\n if j in rel_ids[i]:\n relevant.append(attn_ranks[i,j])\n else:\n irrelevant.append(attn_ranks[i,j])\n rank_data = {\"relevant\": relevant, \"irrelevant\": irrelevant}\n\n # Check how often a legitimate \"path\" was retrieved in the top 5%\n k = 8 # top k memories with highest weights (k = 8 means 5 percent)\n used_hub = []\n for i in range(n_test):\n highest_attn = np.argsort(attention[i])[-k:]\n test_f1, test_f2, test_ctx, test_y = test[i]\n\n # Get relevant hubs for current test sample\n hubs = []\n for rel_id in rel_ids[i]:\n train_sample = train[rel_id]\n train_f1, train_f2 = train_sample[0], train_sample[1]\n if train_f1 in [test_f1, test_f2]: \n hubs.append(train_f2)\n if train_f2 in [test_f1, test_f2]:\n hubs.append(train_f1)\n hubs = list(set(hubs))\n hubs_dict = {h:[] for h in hubs}\n assert len(hubs) == 2, \"shouldn't be more than 2 hubs?\"\n\n # Check if one of the hubs appears with f1 and f2\n attended_train = [train[idx] for idx in highest_attn]\n for sample in attended_train:\n train_f1, train_f2, train_ctx, train_y = sample\n if train_ctx != test_ctx:\n continue # must be samples testing the same axis to be relevant\n if hubs[0] == train_f1:\n hubs_dict[hubs[0]].append(sample[1])\n if hubs[1] == sample[0]:\n hubs_dict[hubs[1]].append(sample[1])\n if hubs[0] == sample[1]:\n hubs_dict[hubs[0]].append(sample[0])\n if hubs[1] == sample[1]:\n hubs_dict[hubs[1]].append(sample[0])\n if test_f1 in hubs_dict[hubs[0]] and test_f2 in hubs_dict[hubs[0]]:\n used_hub.append(True)\n elif test_f1 in hubs_dict[hubs[1]] and test_f2 in hubs_dict[hubs[1]]:\n used_hub.append(True)\n else:\n used_hub.append(False)\n p_used_hub = np.mean(used_hub)\n print(\"Proportion that episodic system retrieved a hub path:\", p_used_hub)\n\n results = {\"rank_data\":rank_data, \"p_used_hub\": p_used_hub}\n return results\n\ndef analyze_cortical(model, test_data, analyze_loader, args):\n # Useful dictionaries from test dataset\n n_states = test_data.n_states \n loc2idx = test_data.loc2idx \n idx2loc = {idx:loc for loc, idx in loc2idx.items()}\n idxs = [idx for idx in range(n_states)]\n # locs = [idx2loc[idx] for idx in idxs]\n idx2tensor = test_data.idx2tensor \n\n model.eval()\n # Get embeddings from model for each face\n face_embedding = model.face_embedding\n face_embedding.to(args.device)\n embeddings = []\n # Get hiddens from the recurrent model for each face\n \n # if the model was stepwisemlp\n if args.cortical_model=='stepwisemlp':\n hiddens = [[] for i in range(2)]\n hiddens_cong = [[] for i in range(2)]\n hiddens_incong = [[] for i in range(2)] \n hiddens_ctxs = [[[] for j in range(args.N_contexts)] for i in range(2)]\n else:\n hiddens = [] # hidden reps. for both contexts\n hiddens_incong = []\n hiddens_cong = []\n hiddens_ctxs = [[] for i in range(args.N_contexts)]\n \n idxs1 = []\n idxs2 = []\n idxs1_ctxs = [[] for i in range(args.N_contexts)]\n idxs2_ctxs = [[] for i in range(args.N_contexts)]\n samples = []\n samples_ctxs = [[] for i in range(args.N_contexts)]\n samples_cong = []\n samples_incong = []\n\n with torch.no_grad():\n for idx in range(n_states):\n face_tensor = idx2tensor[idx].unsqueeze(0).to(args.device) \n embedding = face_embedding(face_tensor) # [1, state_dim]\n embedding = embedding.cpu().numpy()\n embeddings.append(embedding)\n embeddings = np.concatenate(embeddings, axis=0) # [n_states, state_dim]\n for batch in analyze_loader:\n if args.cortical_task == 'face_task':\n f1, f2, ctx, out, idx1, idx2 = batch\n elif args.cortical_task == 'wine_task':\n f1, f2, ctx, out1, out2, idx1, idx2 = batch\n idx1 = idx1[0]\n idx2 = idx2[0]\n samples.append(batch)\n (x1, y1), (x2, y2) = idx2loc[idx1], idx2loc[idx2]\n f1 = f1.to(args.device) \n f2 = f2.to(args.device) \n ctx = ctx.to(args.device)\n\n # create congruent and incongruent groups\n grid_angle = np.arctan2((y2-y1),(x2-x1))\n phi = np.sin(2*grid_angle)\n if np.abs(phi)<1e-5:\n # for congrunet trials, \n # zero out those very close to zero angles\n # so it won't turn into 1 or -1 by sign\n cong = 0 \n else:\n cong = np.sign(phi) # 1: congruent, -1:incongruent, 0:none\n\n # get the hidden reps. \n y_hat, out = model(f1, f2, ctx) \n # y_hat: [1, 2]\n # rnn_out: [seq_length, 1, hidden_dim]: [3, 1, 128]\n # mlp_out: [1, hidden_dim]: [1, 128]\n if args.order_ctx == 'first':\n f1_ind = 1\n f2_ind = 2\n elif args.order_ctx == 'last':\n f1_ind = 0\n f2_ind = 1\n if args.cortical_model=='stepwisemlp':\n out1, out2 = out\n out1 = out1.cpu().numpy()\n out2 = out2.cpu().numpy()\n hiddens[0].append(out1)\n hiddens[1].append(out2)\n hiddens_ctxs[0][ctx].append(out1)\n hiddens_ctxs[1][ctx].append(out2)\n else:\n out = out.cpu().numpy()\n hiddens.append(out)\n hiddens_ctxs[ctx].append(out)\n \n ctx = ctx[0].cpu().numpy()\n idxs1.append(idx1)\n idxs2.append(idx2)\n idxs1_ctxs[ctx].append(idx1)\n idxs2_ctxs[ctx].append(idx2)\n samples_ctxs[ctx].append(batch)\n if ((cong==1) and ((ctx==0) or (ctx==1))):\n if args.cortical_model=='stepwisemlp':\n hiddens_cong[0].append(out1)\n hiddens_cong[1].append(out2)\n else:\n hiddens_cong.append(out)\n samples_cong.append(batch)\n elif ((cong==-1) and ((ctx==0) or (ctx==1))):\n if args.cortical_model=='stepwisemlp':\n hiddens_incong[0].append(out1)\n hiddens_incong[1].append(out2)\n else:\n hiddens_incong.append(out)\n samples_incong.append(batch)\n\n hiddens = np.asarray(hiddens).squeeze() \n # for n_ctx=2, data_len = 16*12*2=384 (n_states:16, n_states-ties:12, permutation:2)\n # rnn hiddens: [data_len, seq_length, hidden_dim] : [384, 3, 128]\n # mlp hiddens: [data_len, hidden_dim]: [384, 128]\n # stepwisemlp hiddens: [num_hidds, data_len, hidden_dim]: [2, 384, 128]\n # with diagonals - wine task = data_len = (n_ctx-n_diag)*192+n_diag*212 \n # [n_ctx:2, data_len:384], [n_ctx:4, data_len:768], [n_ctx:8, data_len: 1616]\n hiddens_incong = np.asarray(hiddens_incong).squeeze() \n hiddens_cong = np.asarray(hiddens_cong).squeeze() \n # rnn hiddens_cong/incong: [144, 3, 128]\n # mlp hiddens_cong/incong: [144, 128]\n # stepwise mlp hiddens_cong/incong: [2, 144, 128]\n \n # hiddens_ctx: even tho it is 384, but it is ordered based on the contexts\n if args.cortical_model=='stepwisemlp':\n hiddens_ctx = np.concatenate(np.asarray(hiddens_ctxs).squeeze(), axis=1)\n # hiddens_ctxs: [n_hidds=2, n_ctx, 192, 1, 128]\n # hiddens_ctx: [n_hidds=2, 384, 128]\n hiddens_inc_c = np.concatenate((hiddens_incong, hiddens_cong), axis=1) \n # hiddens_inc_c: [n_hidds, 384-ties, 128]: [2, 288, 128]\n else:\n hiddens_ctx = np.concatenate(hiddens_ctxs, axis = 0).squeeze()\n # mlp hiddens_ctxs: [n_ctx, 192, 1, 128]\n # rnn hiddens_ctxs: [n_ctx, n_trials=192, 3, 1, 128]\n # rnn hiddens_ctx: [384, 3, 128]\n # mlp hiddens_ctx: [384, 128]\n hiddens_inc_c = np.concatenate((hiddens_incong, hiddens_cong), axis=0) \n # rnn hiddens_inc_c: [384-ties, seq_length, 128]: [288, 3, 128]\n # mlp hiddens_inc_c: [384-ties, 128]: [288, 128]\n\n if ((args.cortical_model=='rnn') or (args.cortical_model=='rnncell')):\n hiddens_ctx = hiddens_ctx[:, -1, :] # [384, 128]\n hiddens_inc_c = hiddens_inc_c[:, -1, :] #[288, 128]\n samples_inc_c = np.concatenate((samples_incong, samples_cong), axis=0)\n \n if args.cortical_model=='stepwisemlp':\n avg_hidden = np.zeros([2, n_states, hiddens.shape[-1]])\n avg_hidden_ctxs = np.zeros([2, args.N_contexts, n_states, hiddens.shape[-1]])\n else:\n avg_hidden = np.zeros([n_states, hiddens.shape[-1]])\n avg_hidden_ctxs = np.zeros([args.N_contexts, n_states, hiddens.shape[-1]])\n \n if ((args.cortical_model=='rnn') or (args.cortical_model=='rnncell')):\n hiddens_ctxs = np.asarray(hiddens_ctxs).squeeze() # [n_ctx, n_tirals=192, seq_len=3, hidd_dim=128]\n # Take average for each face based on its location\n for f in range(n_states):\n temp1 = [np.expand_dims(hiddens[i,f1_ind,:], axis=0) \n for i, idx1 in enumerate(idxs1) if idx1==f]\n temp2 = [np.expand_dims(hiddens[i,f2_ind,:], axis=0)\n for i, idx2 in enumerate(idxs2) if idx2==f]\n if len(temp1 + temp2)>1:\n avg_hidden[f] = np.concatenate(temp1 + temp2, axis=0).mean(axis=0) \n for ctx in range(args.N_contexts):\n temp1_ctxs = [hiddens_ctxs[ctx,i,f1_ind,:] \n for i, idx1 in enumerate(idxs1_ctxs[ctx]) if idx1==f]\n temp2_ctxs = [hiddens_ctxs[ctx,i,f2_ind,:] \n for i, idx2 in enumerate(idxs2_ctxs[ctx]) if idx2==f]\n if len(temp1_ctxs + temp2_ctxs)>1:\n m = np.zeros([2,hiddens_ctxs.shape[-1]])\n m[0] = np.mean(np.asarray(temp1_ctxs), axis=0)\n m[1] = np.mean(np.asarray(temp2_ctxs), axis=0)\n avg_hidden_ctxs[ctx, f, :] = np.mean(m, axis=0)\n # avg_hidden_ctxs[ctx, f, :] = np.concatenate(temp1_ctxs + temp2_ctxs, axis=0).mean(axis=0)\n # avg_hidden_ctxs: [n_ctx, n_states, hidden_dim]: [2, 16, 128] \n avg_hidden_ctx = np.concatenate(avg_hidden_ctxs, axis=0)\n elif args.cortical_model in ['mlp', 'mlp_cc']:\n for f in range(n_states):\n temp = [hiddens[i,:] \n for i, (idx1, idx2) in enumerate(zip(idxs1, idxs2))\n if ((idx1==f) | (idx2==f))]\n if len(temp)>1:\n avg_hidden[f] = np.mean(temp, axis=0)\n for ctx in range(args.N_contexts): \n temp_ctxs = [hiddens_ctxs[ctx][i]\n for i, (idx1, idx2) in enumerate(zip(idxs1_ctxs[ctx], idxs2_ctxs[ctx]))\n if ((idx1==f) | (idx2==f))]\n if len(temp_ctxs)>1:\n avg_hidden_ctxs[ctx, f, :] = np.mean(temp_ctxs, axis=0)\n # avg_hidden_ctxs: [n_contexts, n_states, hidden_dim]: [2, 16, 128] \n avg_hidden_ctx = np.concatenate(avg_hidden_ctxs, axis=0)\n elif args.cortical_model=='stepwisemlp':\n # todo: how to do the averaging? over both hidden reps?\n # hiddens_ctxs anf hiddens_inc_c for the pca results should have two dimensions, \n hiddens_ctxs = np.asarray(hiddens_ctxs).squeeze()\n for f in range(n_states):\n temp1 = [hiddens[0,i,:] \n for i, idx1 in enumerate(idxs1) if idx1==f]\n temp2 = [hiddens[1,i,:] \n for i, idx2 in enumerate(idxs2) if idx2==f]\n if len(temp1)>1:\n avg_hidden[0,f,:] = np.mean(temp1, axis=0)\n if len(temp2)>1:\n avg_hidden[1,f,:] = np.mean(temp2, axis=0)\n # avg_hidden: [n_hidd, n_states, hidd_dim]: [2,16,128]\n for ctx in range(args.N_contexts):\n temp1_ctxs = [hiddens_ctxs[0,ctx,i,:] \n for i, idx1 in enumerate(idxs1_ctxs[ctx]) if idx1==f]\n temp2_ctxs = [hiddens_ctxs[1,ctx,i,:] \n for i, idx2 in enumerate(idxs2_ctxs[ctx]) if idx2==f] \n if len(temp1_ctxs)>1:\n avg_hidden_ctxs[0,ctx,f,:] = np.mean(temp1_ctxs, axis=0)\n if len(temp2_ctxs)>1:\n avg_hidden_ctxs[1,ctx,f,:] = np.mean(temp2_ctxs, axis=0)\n # avg_hidden_ctxs: [n_hidd, n_contexts, n_states, hidden_dim]: [2, 2, 16, 128] \n avg_hidden_ctx = np.concatenate(avg_hidden_ctxs, axis=1)\n samples_res = {'samples': samples, \n 'samples_ctxs': samples_ctxs,\n 'samples_inc_c': samples_inc_c}\n\n results = {'samples_res':samples_res,\n 'idxs1': idxs1, 'idxs2': idxs2,\n 'embeddings': embeddings, # [16, 32]\n 'hiddens_ctx':hiddens_ctx, # mlp/rnn: [384,128] or in stepwisedmlp: [2,384,128]\n 'hiddens_ctxs':hiddens_ctxs, # mlp: [n_ctx, 192, 1, 128], rnn: [n_ctx, 192, 3, 128]\n 'avg_hidden':avg_hidden, # [16, 128] or [n_hidd=2, 16, 128]\n 'avg_hidden_ctx':avg_hidden_ctx, # mlp/rnn: [32, 128] or stepwisedmlp: [n_hidd=2, 32, 128]\n # the reaosn to have these is because the concat for each model is diff and want to deal with it here\n 'avg_hidden_ctxs':avg_hidden_ctxs, # [mlp/rnn: n_ctx, 16, 128] or stepwisedmlp: [n_hidd=2, n_ctx, 16, 128]\n 'hiddens_inc_c': hiddens_inc_c} # mlp/rnn: [288, 128] or stepwisedmlp: [n_hidd=2, 288, 128]\n return results\n\ndef analyze_accs(args, test_data, cortical_result, dist_results):\n resutls = {'train_acc': cortical_result['train_acc'],\n 'test_acc': cortical_result['test_acc'],\n 'cong_train_acc': cortical_result['cong_train_acc'],\n 'incong_train_acc': cortical_result['incong_train_acc'],\n 'cong_test_acc': cortical_result['cong_test_acc'],\n 'incong_test_acc': cortical_result['incong_test_acc']}\n return resutls\n \n # cortical_analyze_acc = cortical_result['analyze_acc']\n # cortical_analyze_correct = cortical_result['analyze_correct']\n\ndef analyze_credit_assignment(args, test_data, cortical_result, dist_results):\n resutls = {'grad_ctx': cortical_result['grad_ctx'],\n 'grad_f1': cortical_result['grad_f1'],\n 'grad_f2': cortical_result['grad_f2'],\n 'grad_ctx_cong': cortical_result['grad_ctx_cong'],\n 'grad_f1_cong': cortical_result['grad_f1_cong'],\n 'grad_f2_cong': cortical_result['grad_f2_cong'],\n 'grad_ctx_incong': cortical_result['grad_ctx_incong'],\n 'grad_f1_incong': cortical_result['grad_f1_incong'],\n 'grad_f2_incong': cortical_result['grad_f2_incong']\n }\n return resutls\n\ndef proportions(args, test_data, cortical_result, dist_results):\n hiddens_ctxs = cortical_result['hiddens_ctxs'] # list of len [n_ctx]\n hiddens_ctxs = [np.concatenate(h, axis=0) for h in hiddens_ctxs] # list of len [n_ctx] each has either [192,128] or [224,128]\n # when n_ctx=8, we have diff number of ties, therefore, \n # in the first 4 contexts we have [192, 128], and in \n # the second 4 contexts (diagonals) we have [224, 128]\n # that is why we go over each of the hiddens in hiddens_ctxs\n # and then concat them to create [n_trials, hidden_dim] for each\n ps = []\n p_pies = []\n for h in hiddens_ctxs: # h: [n_trials, hidden_dim]\n p_pies.append(np.any(h>0, axis=0)) # list of len [n_ctx], each shape [128,]\n ps.append(np.mean(h>0, axis=0)) # [n_ctx, 128]\n ps = np.asarray(ps) \n # ps: [n_ctx, 128]\n # avg num of the trials that were active for each unit, and for each context\n s = np.sum(ps, axis=0, keepdims=True) \n # s: [1, hidden_dim], overall activity of each hidden unit, \n # if that unit was active at all, over all trials (regardless of the context)\n n = ps / s \n # n: [n_ctx, hidden_dim] \n # normalized - how much each unit is active for each ctx over trials \n # normalized by the overall activity of that unit for all ctx and trials\n # f = n > threshold\n # there are some NaNs\n prop_results = {'hiddens_ctxs': hiddens_ctxs,\n 'p_pies': p_pies, # which trials are active for each hidden unit, \n 'ps': ps, # on average, how many trials were active for each hidden unit\n 'n': n}\n return prop_results\n\ndef calc_dist_ctx(args, test_data, cortical_result, dist_results):\n N_contexts = 2 #ToDo: for now it works only for x and y, because of the angles\n # Useful dictionaries from test dataset\n n_states = test_data.n_states \n loc2idx = test_data.loc2idx \n idx2loc = {idx:loc for loc, idx in loc2idx.items()}\n idxs = [idx for idx in range(n_states)]\n N_contexts = args.N_contexts\n N_responses = args.N_responses\n avg_hidden_ctxs = cortical_result['avg_hidden_ctxs'] # [2, 16, 128]\n # Correlation\n grid_dists = []\n hidd_dists_ctxs = [[] for i in range(N_contexts)]\n grid_1ds_ctxs = [[] for i in range(N_contexts)]\n grid_angles = []\n samples = []\n\n for idx1, idx2 in combinations(idxs, 2):\n (x1, y1), (x2, y2) = idx2loc[idx1], idx2loc[idx2]\n samples.append((idx1, idx2))\n grid_dist = np.sqrt((x1-x2)**2 + (y1-y2)**2)\n grid_dists.append(grid_dist)\n for ctx in range(N_contexts):\n # Euclidean distance between hidden reps. in context ctx\n if args.cortical_model=='stepwisemlp':\n hidd_dist = np.zeros([2])\n hidd1, hidd2 = avg_hidden_ctxs[0,ctx,idx1,:], avg_hidden_ctxs[0,ctx,idx2,:]\n hidd_dist[0] = np.linalg.norm(hidd1 - hidd2)\n hidd1, hidd2 = avg_hidden_ctxs[1,ctx,idx1,:], avg_hidden_ctxs[1,ctx,idx2,:]\n hidd_dist[1] = np.linalg.norm(hidd1 - hidd2)\n else:\n hidd1, hidd2 = avg_hidden_ctxs[ctx][idx1], avg_hidden_ctxs[ctx][idx2]\n hidd_dist = np.linalg.norm(hidd1 - hidd2)\n hidd_dists_ctxs[ctx].append(hidd_dist)\n # 1D rank - Manhattan distance\n loc1 = [x1, y1]\n loc2 = [x2, y2]\n winegrid = WineGrid(N_responses, N_contexts)\n r1, r2 = winegrid.ctx_to_r(ctx, loc1, loc2) \n grid_1ds_ctxs[ctx].append(np.abs(r1-r2))\n # create on and off diagonal groups\n \n grid_angle = np.arctan2((y2-y1),(x2-x1))\n grid_angles.append(grid_angle)\n \n grid_dists = np.array(grid_dists) # [(n_states*(nstates-1))/2]: [120]\n grid_angles = np.array(grid_angles) # [120]\n samples = np.array(samples)\n hidd_dists_ctxs = np.array(hidd_dists_ctxs) # [n_ctx, sampels, n_hidds]: in mlp: [2,120], in stepwisemlp: [2,120,2]\n\n phi = np.sin(2*grid_angles)\n binary_phi = np.sign(phi)\n for i, p in enumerate(phi):\n if np.abs(p)<1e-5:\n binary_phi[i] = 0\n\n angle_results = {'grid_angles': grid_angles,\n 'phi': phi,\n 'binary_phi': binary_phi}\n dist_results = {'samples': samples,\n 'hidd_dists_ctxs': hidd_dists_ctxs,\n 'grid_1ds_ctxs': grid_1ds_ctxs,\n 'grid_dists': grid_dists,\n 'angle_results': angle_results}\n return dist_results\n\ndef calc_dist(args, test_data, cortical_result, dist_results=None):\n # Useful dictionaries from test dataset\n n_states = test_data.n_states \n loc2idx = test_data.loc2idx \n idx2loc = {idx:loc for loc, idx in loc2idx.items()}\n idxs = [idx for idx in range(n_states)]\n\n # Correlation\n grid_dists = []\n cong_grid_dists = []\n incong_grid_dists = []\n embed_dists = []\n hidd_dists = []\n cong_hidd_dists = []\n incong_hidd_dists = []\n cong_embed_dists = []\n incong_embed_dists = []\n grid_angles = []\n cong_grid_angles = []\n incong_grid_angles = []\n samples = []\n\n embeddings = cortical_result['embeddings']\n avg_hidden = cortical_result['avg_hidden'] # [16, 128]\n\n for idx1, idx2 in combinations(idxs, 2):\n (x1, y1), (x2, y2) = idx2loc[idx1], idx2loc[idx2]\n samples.append((idx1, idx2))\n grid_dist = np.sqrt((x1-x2)**2 + (y1-y2)**2)\n grid_dists.append(grid_dist)\n # Euclidean distance between embeddings\n emb1, emb2 = embeddings[idx1], embeddings[idx2]\n embed_dist = np.linalg.norm(emb1 - emb2)\n embed_dists.append(embed_dist)\n # Euclidean distance between hidden reps.\n if args.cortical_model=='stepwisemlp':\n hidd_dist = np.zeros([2])\n hidd1, hidd2 = avg_hidden[0,idx1], avg_hidden[0,idx2]\n hidd_dist[0] = np.linalg.norm(hidd1 - hidd2)\n hidd1, hidd2 = avg_hidden[1,idx1], avg_hidden[1,idx2]\n hidd_dist[1] = np.linalg.norm(hidd1 - hidd2)\n else:\n hidd1, hidd2 = avg_hidden[idx1], avg_hidden[idx2]\n hidd_dist = np.linalg.norm(hidd1 - hidd2)\n hidd_dists.append(hidd_dist)\n # create on and off diagonal groups\n grid_angle = np.arctan2((y2-y1),(x2-x1))\n grid_angles.append(grid_angle)\n phi = np.sin(2*grid_angle)\n if np.abs(phi)<1e-5:\n # for congrunet trials, \n # zero out those very close to zero angles\n # so it won't turn into 1 or -1 by sign\n cong = 0\n else:\n cong = np.sign(phi) # 1: congruent, -1:incongruent, 0:none\n if cong==1:\n cong_hidd_dists.append(hidd_dist)\n cong_grid_dists.append(grid_dist)\n cong_embed_dists.append(embed_dist)\n cong_grid_angles.append(grid_angle)\n if cong==-1:\n incong_hidd_dists.append(hidd_dist)\n incong_grid_dists.append(grid_dist)\n incong_embed_dists.append(embed_dist)\n incong_grid_angles.append(grid_angle) \n grid_dists = np.array(grid_dists) # [(n_states*(nstates-1))/2]: [120]\n embed_dists = np.array(embed_dists)\n hidd_dists = np.array(hidd_dists)\n cong_grid_dists = np.array(cong_grid_dists) # [36]\n incong_grid_dists = np.array(incong_grid_dists) # [36]\n cong_hidd_dists = np.array(cong_hidd_dists)\n incong_hidd_dists = np.array(incong_hidd_dists)\n cong_embed_dists = np.array(cong_embed_dists)\n incong_embed_dists = np.array(incong_embed_dists)\n grid_angles = np.array(grid_angles) # [120]\n cong_grid_angles = np.array(cong_grid_angles) # [36]\n incong_grid_angles = np.array(incong_grid_angles) # [36]\n samples = np.array(samples)\n\n phi = np.sin(2*grid_angles)\n binary_phi = np.sign(phi)\n for i, p in enumerate(phi):\n if np.abs(p)<1e-5:\n binary_phi[i] = 0\n\n cong_dist_results = {'cong_grid_dists': cong_grid_dists,\n 'cong_hidd_dists': cong_hidd_dists,\n 'cong_embed_dists': cong_embed_dists}\n incong_dist_results = {'incong_grid_dists': incong_grid_dists,\n 'incong_hidd_dists': incong_hidd_dists,\n 'incong_embed_dists': incong_embed_dists}\n angle_results = {'grid_angles': grid_angles,\n 'cong_grid_angles': cong_grid_angles, \n 'incong_grid_angles': incong_grid_angles,\n 'phi': phi,\n 'binary_phi': binary_phi}\n dist_results = {'samples': samples, \n 'grid_dists': grid_dists,\n 'embed_dists': embed_dists,\n 'hidd_dists':hidd_dists,\n 'cong_dist_results': cong_dist_results,\n 'incong_dist_results': incong_dist_results,\n 'angle_results': angle_results}\n return dist_results\n\ndef analyze_dim_red(args, test_data, cortical_result, dist_results, n_components=2):\n method = args.dimred_method\n n_states = test_data.n_states \n loc2idx = test_data.loc2idx \n idx2loc = {idx:loc for loc, idx in loc2idx.items()}\n idxs = [idx for idx in range(n_states)]\n locs = [idx2loc[idx] for idx in idxs]\n embeddings = cortical_result['embeddings'] # [16, 32]\n hiddens_ctx = cortical_result['hiddens_ctx'] # [384, 128] or in stepwisemlp: [2,384,128]\n avg_hidden = cortical_result['avg_hidden'] # [16, 128] or in stepwisemlp: [2,16,128]\n avg_hidden_ctx = cortical_result['avg_hidden_ctx'] # [32, 128] or in stepwisemlp: [2,32,128]\n hiddens_inc_c = cortical_result['hiddens_inc_c'] # [288, 128] or in stepwisemlp: [2,288,128]\n # hiddens_ctx = np.asarray(hiddens_ctxs)\n # hiddens_ctxs = np.concatenate(hiddens_ctxs, axis=0).squeeze() # [384, 128] or [384, 3, 128]\n # if ((args.cortical_model == 'rnn') or (args.cortical_model == 'rnncell')):\n # hiddens_ctx = hiddens_ctx[:,-1, :]\n # avg_hidden_ctxs = np.concatenate(avg_hidden_ctxs, axis=0) # [32, 128]\n \n results = {}\n # PCA\n if method == 'pca':\n pca = PCA(n_components=n_components)\n pca_2d_embed = pca.fit_transform(embeddings)\n if args.cortical_model=='stepwisemlp':\n pca_2d_hidd = np.zeros([hiddens_ctx.shape[0], hiddens_ctx.shape[1], n_components])\n pca_2d_avg_hidd = np.zeros([avg_hidden.shape[0], avg_hidden.shape[1], n_components])\n pca_2d_ctx_hidd = np.zeros([avg_hidden_ctx.shape[0], avg_hidden_ctx.shape[1], n_components])\n pca_2d_incong_cong = np.zeros([hiddens_inc_c.shape[0], hiddens_inc_c.shape[1], n_components])\n for h in range(hiddens_ctx.shape[0]):\n pca_2d_hidd[h,:,:] = pca.fit_transform(hiddens_ctx[h,:,:]) # this is all the hiddens, no averaging for each face\n pca_2d_avg_hidd[h,:,:] = pca.fit_transform(avg_hidden[h,:,:]) \n pca_2d_ctx_hidd[h,:,:] = pca.fit_transform(avg_hidden_ctx[h,:,:])\n pca_2d_incong_cong[h,:,:] = pca.fit_transform(hiddens_inc_c[h,:,:])\n \n else:\n pca_2d_hidd = pca.fit_transform(hiddens_ctx) # this is all the hiddens, no averaging for each face\n pca_2d_avg_hidd = pca.fit_transform(avg_hidden) # I might need to save this at all\n pca_2d_ctx_hidd = pca.fit_transform(avg_hidden_ctx)\n pca_2d_incong_cong = pca.fit_transform(hiddens_inc_c)\n results = {'embed_2d': pca_2d_embed, \n 'hidd_2d': pca_2d_hidd,\n 'avg_hidd_2d': pca_2d_avg_hidd,\n 'ctx_hidd_2d': pca_2d_ctx_hidd,\n 'incong_cong_2d': pca_2d_incong_cong,\n 'grid_locations': locs,\n 'samples_res': cortical_result['samples_res']}\n elif method == 'mds':\n # MDS\n mds = MDS(n_components=n_components)\n mds_2d_embed = mds.fit_transform(embeddings)\n mds_2d_hidd = mds.fit_transform(hiddens_ctx) # this is all the hiddens, no averaging for each face\n mds_2d_avg_hidd = mds.fit_transform(avg_hidden) # I might need to save this at all\n mds_2d_ctx_hidd = mds.fit_transform(avg_hidden_ctx)\n mds_2d_incong_cong = mds.fit_transform(hiddens_inc_c)\n results = {'embed_2d': mds_2d_embed, \n 'hidd_2d': mds_2d_hidd,\n 'avg_hidd_2d': mds_2d_avg_hidd,\n 'ctx_hidd_2d': mds_2d_ctx_hidd,\n 'incong_cong_2d': mds_2d_incong_cong}\n elif method == 'tsne':\n # tSNE\n tsne = TSNE(n_components=n_components)\n tsne_2d_embed = tsne.fit_transform(embeddings)\n tsne_2d_hidd = tsne.fit_transform(hiddens_ctx) # this is all the hiddens, no averaging for each face\n tsne_2d_avg_hidd = tsne.fit_transform(avg_hidden) # I might need to save this at all\n tsne_2d_ctx_hidd = tsne.fit_transform(avg_hidden_ctx)\n tsne_2d_incong_cong = tsne.fit_transform(hiddens_inc_c)\n results = {'embed_2d': tsne_2d_embed, \n 'hidd_2d': tsne_2d_hidd,\n 'avg_hidd_2d': tsne_2d_avg_hidd,\n 'ctx_hidd_2d': tsne_2d_ctx_hidd,\n 'incong_cong_2d': tsne_2d_incong_cong}\n return results\n\ndef hist_data(args, test_data, cortical_result, dist_results):\n # embeddings\n cong_embed_dists = dist_results['cong_dist_results']['cong_embed_dists']\n incong_embed_dists = dist_results['incong_dist_results']['incong_embed_dists']\n \n # hiddens\n cong_hidd_dists = dist_results['cong_dist_results']['cong_hidd_dists']\n incong_hidd_dists = dist_results['incong_dist_results']['incong_hidd_dists']\n \n dist_c_inc_results = {'cong_embed_dist': cong_embed_dists, \n 'incong_embed_dist': incong_embed_dists,\n 'cong_hidd_dist': cong_hidd_dists,\n 'incong_hidd_dist': incong_hidd_dists}\n \n return dist_c_inc_results\n\ndef calc_ratio(args, test_data, cortical_result, dist_results):\n # embeddings\n cong_embed_dists = dist_results['cong_dist_results']['cong_embed_dists']\n incong_embed_dists = dist_results['incong_dist_results']['incong_embed_dists']\n avg_cong_embed = np.mean(cong_embed_dists)\n avg_incong_embed = np.mean(incong_embed_dists)\n ratio_embed = (avg_cong_embed/avg_incong_embed)\n \n # hiddens\n cong_hidd_dists = dist_results['cong_dist_results']['cong_hidd_dists']\n incong_hidd_dists = dist_results['incong_dist_results']['incong_hidd_dists']\n avg_cong_hidd = np.mean(cong_hidd_dists, axis=0)\n avg_incong_hidd = np.mean(incong_hidd_dists, axis=0)\n # ratio_hidd = (avg_cong_hidd/avg_incong_hidd)\n ratio_hidd = (avg_incong_hidd/avg_cong_hidd)\n \n ratio_results = {'ratio_embed': ratio_embed, 'ratio_hidd': ratio_hidd,\\\n 'avg_cong_hidd': avg_cong_hidd, 'avg_incong_hidd': avg_incong_hidd}\n \n return ratio_results\n\ndef extract_hidd_dist(dist_results):\n # hiddens\n cong_hidd_dists = dist_results['cong_dist_results']['cong_hidd_dists']\n incong_hidd_dists = dist_results['incong_dist_results']['incong_hidd_dists']\n dist_result_hidd = {'cong_hidd_dists': cong_hidd_dists, 'incong_hidd_dists': incong_hidd_dists}\n \n return dist_result_hidd\n\ndef analyze_ttest(args, test_data, cortical_result, dist_results): \n cong_res = dist_results['cong_dist_results']\n incong_res = dist_results['incong_dist_results']\n \n incong_hidd_dists = incong_res['incong_hidd_dists']\n cong_hidd_dists = cong_res['cong_hidd_dists']\n if args.cortical_model == 'stepwisemlp':\n t_hidd, t_p_val_hidd = np.zeros([2]), np.zeros([2])\n for h in range(2):\n t_hidd[h], t_p_val_hidd[h] = ttest_ind(cong_hidd_dists[:,h], incong_hidd_dists[:,h])\n else:\n t_hidd, t_p_val_hidd = ttest_ind(cong_res['cong_hidd_dists'], \n incong_res['incong_hidd_dists'])\n t_embed, t_p_val_embed = ttest_ind(cong_res['cong_embed_dists'], \n incong_res['incong_embed_dists'])\n t_grid, t_p_val_grid = ttest_ind(cong_res['cong_grid_dists'], \n incong_res['incong_grid_dists'])\n ttest_results = {'t_stat_hidd':t_hidd, 't_p_val_hidd': t_p_val_hidd,\n 't_stat_embed':t_embed, 't_p_val_embed': t_p_val_embed,\n 't_grid':t_grid, 't_p_val_grid': t_p_val_grid}\n return ttest_results\n\ndef analyze_corr(args, test_data, cortical_result, dist_results):\n grid_dists = dist_results['grid_dists']\n embed_dists = dist_results['embed_dists'] \n hidd_dists = dist_results['hidd_dists'] \n cong_res = dist_results['cong_dist_results']\n incong_res = dist_results['incong_dist_results']\n r_embed, p_val_embed = pearsonr(grid_dists, embed_dists)\n if args.cortical_model == 'stepwisemlp':\n r_hidd, p_val_hidd = np.zeros([2]), np.zeros([2])\n r_cong_hidd, p_val_cong_hidd, r_incong_hidd, p_val_incong_hidd = \\\n np.zeros([2]), np.zeros([2]), np.zeros([2]), np.zeros([2])\n cong_hidd_dists, incong_hidd_dists = cong_res['cong_hidd_dists'], \\\n incong_res['incong_hidd_dists']\n for h in range(2):\n r_hidd[h], p_val_hidd[h] = pearsonr(grid_dists, hidd_dists[:,h])\n r_cong_hidd[h], p_val_cong_hidd[h] = pearsonr(cong_res['cong_grid_dists'], \n cong_hidd_dists[:,h]) \n r_incong_hidd[h], p_val_incong_hidd[h] = pearsonr(incong_res['incong_grid_dists'],\n incong_hidd_dists[:,h]) \n else:\n r_hidd, p_val_hidd = pearsonr(grid_dists, hidd_dists)\n r_cong_hidd, p_val_cong_hidd = pearsonr(cong_res['cong_grid_dists'], \n cong_res['cong_hidd_dists'])\n r_incong_hidd, p_val_incong_hidd = pearsonr(incong_res['incong_grid_dists'],\n incong_res['incong_hidd_dists'])\n r_cong_embed, p_val_cong_embed = pearsonr(cong_res['cong_grid_dists'], \n cong_res['cong_embed_dists'])\n r_incong_embed, p_val_incong_embed = pearsonr(incong_res['incong_grid_dists'], \n incong_res['incong_embed_dists']) \n corr_results = {'r_embed': r_embed, 'p_val_embed': p_val_embed,\n 'r_cong_embed': r_cong_embed, \n 'p_val_cong_embed': p_val_cong_embed,\n 'r_incong_embed': r_incong_embed, \n 'p_val_incong_embed': p_val_incong_embed,\n 'r_hidd': r_hidd, 'p_val_hidd': p_val_hidd,\n 'r_cong_hidd': r_cong_hidd, \n 'p_val_cong_hidd': p_val_cong_hidd,\n 'r_incong_hidd': r_incong_hidd, \n 'p_val_incong_hidd': p_val_incong_hidd}\n return corr_results\n\ndef analyze_regression(args, test_data, cortical_result, dist_results):\n hidd_dists = dist_results['hidd_dists']\n grid_dists = dist_results['grid_dists']\n phi = dist_results['angle_results']['phi']\n binary_phi = dist_results['angle_results']['binary_phi']\n # prepare data for the regression analysis\n x_cat = np.concatenate((grid_dists.reshape((-1,1)), binary_phi.reshape((-1,1))),axis=1)\n x_con = np.concatenate((grid_dists.reshape((-1,1)), phi.reshape((-1,1))),axis=1)\n\n # categorical regression analysis\n x_cat = sm.add_constant(x_cat)\n if args.cortical_model == 'stepwisemlp':\n p_val, t_val, param, bse = ([[] for i in range(2)] for i in range(4))\n y_hat_E = np.zeros(hidd_dists.shape)\n y = np.zeros(hidd_dists.shape)\n for h in range(2):\n y[:,h] = hidd_dists[:,h]\n y_hat_E[:,h], p_val[h], t_val[h], param[h], bse[h] = run_regression(x_cat,y[:,h],grid_dists)\n else:\n y = hidd_dists\n y_hat_E, p_val, t_val, param, bse = run_regression(x_cat,y,grid_dists) \n cat_reg = {'p_val': p_val,\n 't_val': t_val,\n 'param': param,\n 'y_hat_E': y_hat_E,\n 'y': y,\n 'bse': bse}\n\n # continuous regression analysis\n x_con = sm.add_constant(x_con)\n if args.cortical_model == 'stepwisemlp':\n p_val, t_val, param, bse = ([[] for i in range(2)] for i in range(4))\n y_hat_E = np.zeros(hidd_dists.shape)\n y = np.zeros(hidd_dists.shape)\n for h in range(2):\n y[:,h] = hidd_dists[:,h]\n y_hat_E[:,h], p_val[h], t_val[h], param[h], bse[h] = run_regression(x_con,y[:,h],grid_dists)\n else:\n y = hidd_dists\n y_hat_E, p_val, t_val, param, bse = run_regression(x_con,y,grid_dists) \n con_reg = {'p_val': p_val,\n 't_val': t_val,\n 'param': param,\n 'y_hat_E': y_hat_E,\n 'y': y,\n 'bse': bse}\n\n reg_results = {'cat_reg': cat_reg, \n 'con_reg': con_reg}\n return reg_results\n\ndef run_regression(x,y,grid_dist):\n stats_model = sm.OLS(y,x).fit() \n y_hat_E = stats_model.params[0] + (stats_model.params[1]*grid_dist) \n p_val, t_val, param, bse = stats_model.pvalues, stats_model.tvalues, \\\n stats_model.params, stats_model.bse\n return y_hat_E, p_val, t_val, param, bse\n\ndef analyze_regression_1D(args, test_data, cortical_result, dist_results):\n # make sure dist_results is dist_ctx_results\n hidd_dists_ctxs = dist_results['hidd_dists_ctxs']\n hidd_dists_ctx0 = hidd_dists_ctxs[0]\n hidd_dists_ctx1 = hidd_dists_ctxs[1]\n grid_1ds_ctxs = dist_results['grid_1ds_ctxs']\n grid_1ds_ctx0 = grid_1ds_ctxs[0]\n grid_1ds_ctx1 = grid_1ds_ctxs[1]\n grid_dists = dist_results['grid_dists']\n \n phi = dist_results['angle_results']['phi']\n binary_phi = dist_results['angle_results']['binary_phi']\n \n hidd_dists_ctx = np.concatenate((hidd_dists_ctx0, hidd_dists_ctx1), axis=0)\n grid_1ds_ctx = np.concatenate((grid_1ds_ctx0, grid_1ds_ctx1), axis=0)\n grid_dists_ctx = np.concatenate((grid_dists, grid_dists), axis=0)\n binary_phi_ctx = np.concatenate((binary_phi, binary_phi), axis=0)\n phi_ctx = np.concatenate((phi, phi), axis=0)\n # prepare data for the regression analysis\n x_cat = np.concatenate((grid_dists_ctx.reshape((-1,1)), grid_1ds_ctx.reshape((-1,1)),\n binary_phi_ctx.reshape((-1,1))),axis=1) # [240, 3]\n x_con = np.concatenate((grid_dists_ctx.reshape((-1,1)), grid_1ds_ctx.reshape((-1,1)),\n phi_ctx.reshape((-1,1))),axis=1)\n \n # categorical regression analysis\n x_cat = sm.add_constant(x_cat)\n if args.cortical_model == 'stepwisemlp':\n p_val, t_val, param, y_hat_E, y, bse = ([[] for i in range(2)] for i in range(6))\n y_hat_E = np.zeros(hidd_dists_ctx.shape)\n y = np.zeros(hidd_dists_ctx.shape)\n for h in range(2):\n y[:,h] = hidd_dists_ctx[:,h]\n y_hat_E[:,h], p_val[h], t_val[h], param[h], bse[h] = run_regression(x_cat,y[:,h],grid_dists_ctx)\n else:\n y = hidd_dists_ctx\n y_hat_E, p_val, t_val, param, bse = run_regression(x_cat,y,grid_dists_ctx)\n cat_reg = {'p_val': p_val,\n 't_val': t_val,\n 'param': param,\n 'y_hat_E': y_hat_E,\n 'y': y,\n 'bse': bse}\n # continuous regression analysis\n x_con = sm.add_constant(x_con)\n if args.cortical_model == 'stepwisemlp':\n p_val, t_val, param, bse = ([[] for i in range(2)] for i in range(4))\n y_hat_E = np.zeros(hidd_dists_ctx.shape)\n y = np.zeros(hidd_dists_ctx.shape)\n for h in range(2):\n y[:,h] = hidd_dists_ctx[:,h]\n y_hat_E[:,h], p_val[h], t_val[h], param[h], bse[h] = run_regression(x_con,y[:,h],grid_dists_ctx)\n else:\n y = hidd_dists_ctx\n y_hat_E, p_val, t_val, param, bse = run_regression(x_con,y,grid_dists_ctx)\n con_reg = {'p_val': p_val,\n 't_val': t_val,\n 'param': param,\n 'y_hat_E': y_hat_E,\n 'y': y,\n 'bse': bse}\n\n reg_results = {'cat_reg': cat_reg, \n 'con_reg': con_reg}\n return reg_results\n\ndef analyze_regression_exc(args, test_data, cortical_result, dist_results):\n # Useful dictionaries from test dataset\n n_states = test_data.n_states \n hidd_dists = dist_results['hidd_dists'] #[n_combinations]: [120]\n grid_dists = dist_results['grid_dists']\n binary_phi = dist_results['angle_results']['binary_phi'] # [120]\n samples = dist_results['samples'] # [120, 2]\n states=[]\n if args.cortical_model=='stepwisemlp':\n p_vals, t_vals, params, bses = ([[] for i in range(2)] for i in range(4))\n else:\n p_vals, t_vals, params, bses = ([] for i in range(4))\n\n for state in range(n_states):\n s_idxs = [i for i, sample in enumerate(samples) if state not in sample] # [105]\n # prepare data for the regression analysis\n x_cat = np.concatenate((grid_dists[s_idxs].reshape((-1,1)), binary_phi[s_idxs].reshape((-1,1))),axis=1)\n # regression analysis\n x_cat = sm.add_constant(x_cat)\n if args.cortical_model == 'stepwisemlp':\n for h in range(2):\n y = hidd_dists[s_idxs,h]\n _ , p_val, t_val, param, bse = run_regression(x_cat,y,grid_dists)\n p_vals[h].append(p_val)\n t_vals[h].append(t_val)\n params[h].append(param)\n bses[h].append(bse)\n else:\n y = hidd_dists[s_idxs]\n _, p_val, t_val, param, bse = run_regression(x_cat,y,grid_dists)\n p_vals.append(p_val)\n t_vals.append(t_val)\n params.append(param)\n bses.append(bse)\n states.append(state)\n \n # regression analysis - after removing (0,0) and (3,3)\n s_idxs = [i for i, sample in enumerate(samples) if ((0 not in sample) & (15 not in sample))] # [91]\n x_cat = np.concatenate((grid_dists[s_idxs].reshape((-1,1)), binary_phi[s_idxs].reshape((-1,1))),axis=1)\n x_cat = sm.add_constant(x_cat)\n if args.cortical_model == 'stepwisemlp':\n for h in range(2):\n y = hidd_dists[s_idxs,h]\n _, p_val, t_val, param, bse = run_regression(x_cat,y,grid_dists)\n p_vals[h].append(p_val)\n t_vals[h].append(t_val)\n params[h].append(param)\n bses[h].append(bse)\n else:\n y = hidd_dists[s_idxs]\n _, p_val, t_val, param, bse = run_regression(x_cat,y,grid_dists)\n p_vals.append(p_val)\n t_vals.append(t_val)\n params.append(param)\n bses.append(bse)\n states.append(16)\n \n # regression analysis - after removing (0,0) and (3,3), (3,0) and (0.3)\n s_idxs = [i for i, sample in enumerate(samples) if ((0 not in sample) & (15 not in sample) &\n (3 not in sample) & (12 not in sample))] #[66]\n x_cat = np.concatenate((grid_dists[s_idxs].reshape((-1,1)), binary_phi[s_idxs].reshape((-1,1))),axis=1)\n x_cat = sm.add_constant(x_cat)\n if args.cortical_model == 'stepwisemlp':\n for h in range(2):\n y = hidd_dists[s_idxs,h] \n _, p_val, t_val, param, bse = run_regression(x_cat,y,grid_dists)\n p_vals[h].append(p_val)\n t_vals[h].append(t_val)\n params[h].append(param)\n bses[h].append(bse)\n else:\n y = hidd_dists[s_idxs]\n _, p_val, t_val, param, bse = run_regression(x_cat,y,grid_dists)\n p_vals.append(p_val)\n t_vals.append(t_val)\n params.append(param)\n bses.append(bse)\n states.append(17)\n\n states = np.array(states)\n p_vals = np.array(p_vals)\n t_vals = np.array(t_vals)\n params = np.array(params)\n bses = np.array(bses)\n \n exc_reg_results = {'excluded_states': states,\n 'p_vals': p_vals,\n 't_vals': t_vals,\n 'params': params,\n 'bses': bses} \n\n return exc_reg_results\n\ndef analyze_test_seq(args, test_data, cortical_result, dist_results):\n import sys\n sys.path.append(\"..\")\n data = get_loaders(batch_size=32, meta=False,\n use_images=True, image_dir='./images/',\n n_episodes=None,\n N_responses=args.N_responses, N_contexts=args.N_contexts,\n cortical_task = args.cortical_task, #ToDo:check why it was set to cortical_task='face_task',\n balanced = args.balanced)\n train_data, train_loader, test_data, test_loader, analyze_data, analyze_loader = data\n\n idx2loc = {idx:loc for loc, idx in test_data.loc2idx.items()}\n\n # ctx_order = 'first'\n # ctx_order_str = 'ctxF'\n \n analyze_correct = cortical_result['analyze_correct'] # [n_trials, time_steps]: [384, 3]\n analyze_correct = np.asarray(analyze_correct).squeeze()\n\n hidd_t_idx = 1 # at what time step, t = 1 means at the time of face1 \n # and t = 2 means at the time of face2\n # in axis First (axis is at t=0), it should be t = 1\n # create groups based on the row or columns\n # e.g, for context0 (xaxis), first column is group 1, sec col is group 2, and so on.\n # 4 groups for each axis/context; total 8 groups\n\n # ToDo: why it is always loc1???\n\n ctx0_g0=[]\n ctx0_g1=[]\n ctx0_g2=[]\n ctx0_g3=[]\n\n ctx1_g0=[]\n ctx1_g1=[]\n ctx1_g2=[]\n ctx1_g3=[]\n\n for i, batch in enumerate(analyze_loader):\n if args.cortical_task == 'face_task':\n f1, f2, ctx, y, idx1, idx2 = batch # face1, face2, context, y, index1, index2\n elif args.cortical_task == 'wine_task':\n f1, f2, ctx, y1, y2, idx1, idx2 = batch # face1, face2, context, y1, y2, index1, index2 \n msg = 'analyze_test_seq is only implemented for one response, two contexts'\n assert args.N_responses == 'one' and args.N_contexts == 2, msg\n\n if args.N_responses == 'one':\n y = y1\n # f1, f2, ax, y, idx1, idx2 = batch\n acc = analyze_correct[i][hidd_t_idx]\n ctx = ctx.cpu().numpy().squeeze()\n idx1 = idx1[0]\n idx2 = idx2[0]\n loc1 = idx2loc[idx1]\n loc2 = idx2loc[idx2]\n if ctx==0:\n if loc1[ctx]==0: ctx0_g0.append(acc) # (len(all_perms)/2) / 4 = [48]\n elif loc1[ctx]==1: ctx0_g1.append(acc)\n elif loc1[ctx]==2: ctx0_g2.append(acc)\n elif loc1[ctx]==3: ctx0_g3.append(acc)\n elif ctx==1:\n if loc1[ctx]==0: ctx1_g0.append(acc)\n elif loc1[ctx]==1: ctx1_g1.append(acc)\n elif loc1[ctx]==2: ctx1_g2.append(acc)\n elif loc1[ctx]==3: ctx1_g3.append(acc)\n ctx0_accs = [np.mean(ctx0_g0), np.mean(ctx0_g1), \n np.mean(ctx0_g2), np.mean(ctx0_g3) ]\n ctx1_accs = [np.mean(ctx1_g0), np.mean(ctx1_g1), \n np.mean(ctx1_g2), np.mean(ctx1_g3) ]\n \n # print('Accuracy at t=%s (face%s) contex 0:' %(hidd_t_idx,hidd_t_idx), ctx0_accs)\n # print('Accuracy at t=%s (face%s) contex 1:' %(hidd_t_idx,hidd_t_idx), ctx1_accs)\n return ctx0_accs, ctx1_accs"
] | [
[
"numpy.sum",
"scipy.stats.ttest_ind",
"torch.no_grad",
"numpy.argsort",
"numpy.asarray",
"numpy.any",
"sklearn.manifold.MDS",
"numpy.abs",
"sklearn.manifold.TSNE",
"numpy.expand_dims",
"numpy.mean",
"scipy.stats.pearsonr",
"numpy.zeros",
"numpy.arange",
"numpy.array",
"numpy.linalg.norm",
"sklearn.decomposition.PCA",
"numpy.zeros_like",
"numpy.arctan2",
"numpy.sign",
"numpy.squeeze",
"numpy.sqrt",
"numpy.sin",
"numpy.concatenate"
]
] |
jirivrany/kagle-statoil | [
"8c70691fc7ca7d8a6a33a3544f76b22f1b508f7a"
] | [
"cnn_2bands.py"
] | [
"\n# coding: utf-8\n\n\"\"\"\n\n\"\"\"\n\n\nimport pandas as pd \nimport numpy as np \nimport cv2 # Used to manipulated the images \nfrom scipy.signal import wiener\n\nnp.random.seed(1207) # The seed I used - pick your own or comment out for a random seed. A constant seed allows for better comparisons though\n\n# Import Keras \nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout, Flatten, Activation\nfrom keras.layers import Conv2D, MaxPooling2D\nfrom keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau\nfrom keras.layers.normalization import BatchNormalization\nfrom keras.optimizers import Adam\nfrom sklearn.model_selection import train_test_split\n\n\n# ## Load Training Data\n\n# In[2]:\n\n\ndf_train = pd.read_json('./input/train.json') # this is a dataframe\n\n\n# Need to reshape and feature scale the images:\n\n# In[3]:\n\n\ndef get_scaled_imgs(df):\n imgs = []\n \n for i, row in df.iterrows():\n band_1 = np.array(row['band_1'])\n band_2 = np.array(row['band_2'])\n\n #make 75x75 image\n band_1 = band_1.reshape(75, 75)\n band_2 = band_2.reshape(75, 75)\n #band_3 = band_1 + band_2 # plus since log(x*y) = log(x) + log(y)\n \n # Rescale\n a = (band_1 - band_1.mean()) / (band_1.max() - band_1.min())\n b = (band_2 - band_2.mean()) / (band_2.max() - band_2.min())\n #c = (band_3 - band_3.mean()) / (band_3.max() - band_3.min())\n\n imgs.append(np.dstack((a, b)))\n\n return np.array(imgs)\n\n\n\n\ndef get_more_images(imgs):\n \n more_images = []\n vert_flip_imgs = []\n hori_flip_imgs = []\n \n for i in range(0,imgs.shape[0]):\n a=imgs[i,:,:,0]\n b=imgs[i,:,:,1]\n #c=imgs[i,:,:,2]\n \n av=cv2.flip(a,1)\n ah=cv2.flip(a,0)\n bv=cv2.flip(b,1)\n bh=cv2.flip(b,0)\n #cv=cv2.flip(c,1)\n #ch=cv2.flip(c,0)\n \n #vert_flip_imgs.append(np.dstack((av, bv, cv)))\n #hori_flip_imgs.append(np.dstack((ah, bh, ch)))\n vert_flip_imgs.append(np.dstack((av, bv)))\n hori_flip_imgs.append(np.dstack((ah, bh)))\n \n v = np.array(vert_flip_imgs)\n h = np.array(hori_flip_imgs)\n \n more_images = np.concatenate((imgs,v,h))\n \n return more_images\n\n\ndef getModel():\n #Build keras model\n \n model=Sequential()\n \n # CNN 1\n model.add(Conv2D(64, kernel_size=(3, 3),activation='relu', input_shape=(75, 75, 2)))\n model.add(Conv2D(64, kernel_size=(3, 3), activation='relu' ))\n model.add(Conv2D(64, kernel_size=(3, 3), activation='relu' ))\n model.add(MaxPooling2D(pool_size=(3, 3), strides=(2, 2)))\n \n # CNN 2\n model.add(Conv2D(128, kernel_size=(3, 3), activation='relu' ))\n model.add(Conv2D(128, kernel_size=(3, 3), activation='relu' ))\n model.add(Conv2D(128, kernel_size=(3, 3), activation='relu' ))\n model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))\n #model.add(Dropout(0.2))\n\n # CNN 3\n model.add(Conv2D(128, kernel_size=(3, 3), activation='relu'))\n model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))\n #model.add(Dropout(0.2))\n\n #CNN 4\n model.add(Conv2D(256, kernel_size=(3, 3), activation='relu'))\n model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))\n \n # You must flatten the data for the dense layers\n model.add(Flatten())\n\n #Dense 1\n model.add(Dense(1024, activation='relu'))\n model.add(Dropout(0.5))\n\n #Dense 2\n model.add(Dense(256, activation='relu'))\n model.add(Dropout(0.2))\n\n # Output \n model.add(Dense(1, activation=\"sigmoid\"))\n\n optimizer = Adam(lr=0.0001, decay=0.0)\n model.compile(loss='binary_crossentropy', optimizer=optimizer, metrics=['accuracy'])\n \n return model\n\n\n\nXtrain = get_scaled_imgs(df_train)\nYtrain = np.array(df_train['is_iceberg'])\ndf_train.inc_angle = df_train.inc_angle.replace('na',0)\nidx_tr = np.where(df_train.inc_angle>0)\n\nYtrain = Ytrain[idx_tr[0]]\nXtrain = Xtrain[idx_tr[0],...]\n\n#Xtr_more = get_more_images(Xtrain) \n#Ytr_more = np.concatenate((Ytrain,Ytrain,Ytrain))\n\nX_train, X_valid, y_train, y_valid = train_test_split(Xtrain, Ytrain, test_size=0.1)\n\nX_train_more = get_more_images(X_train)\ny_train_more = np.concatenate([y_train, y_train, y_train])\nX_valid_more = get_more_images(X_valid)\ny_valid_more = np.concatenate([y_valid, y_valid, y_valid])\n\n\nmodel = getModel()\nmodel.summary()\n\nbatch_size = 32\nmodel_file = '.mdl_2l2_wts.hdf5'\n\nearly_stopping = EarlyStopping(monitor='val_loss', patience=10, verbose=0, mode='min')\nmcp_save = ModelCheckpoint(model_file, save_best_only=True, monitor='val_loss', mode='min')\nreduce_lr_loss = ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=5, verbose=1, epsilon=1e-6, mode='min')\n\n\n#model.fit(Xtr_more, Ytr_more, batch_size=batch_size, epochs=50, verbose=1, callbacks=[earlyStopping, mcp_save, reduce_lr_loss], validation_split=0.25)\n#model.fit(Xtr_more, Ytr_more, batch_size=batch_size, epochs=60, verbose=1, callbacks=[mcp_save, reduce_lr_loss], validation_split=0.2)\n\nmodel.fit(X_train_more, y_train_more, batch_size=32, epochs=60, verbose=1,\n callbacks=[mcp_save, reduce_lr_loss],\n validation_data=(X_valid, y_valid))\n\n\nmodel.load_weights(filepath = model_file)\n\nscore = model.evaluate(Xtrain, Ytrain, verbose=1)\nprint('Train score:', score[0])\nprint('Train accuracy:', score[1])\n\n\ndf_test = pd.read_json('./input/test.json')\ndf_test.inc_angle = df_test.inc_angle.replace('na',0)\nXtest = (get_scaled_imgs(df_test))\npred_test = model.predict(Xtest)\n\nsubmission = pd.DataFrame({'id': df_test[\"id\"], 'is_iceberg': pred_test.reshape((pred_test.shape[0]))})\nprint(submission.head(10))\n\nsubmission.to_csv('sub-2bands-nodrop-aug.csv', index=False)\n\n"
] | [
[
"numpy.concatenate",
"numpy.random.seed",
"pandas.read_json",
"numpy.dstack",
"numpy.array",
"numpy.where",
"sklearn.model_selection.train_test_split"
]
] |
pagun12/predictive-monitoring-benchmark | [
"78a3c2723406dd85aec3b5b01e1ae2edb657f8e2"
] | [
"bucketers/StateBasedBucketer.py"
] | [
"import pandas as pd\nimport numpy as np\nfrom time import time\nimport sys\n\nclass StateBasedBucketer(object):\n \n def __init__(self, encoder):\n self.encoder = encoder\n \n self.dt_states = None\n self.n_states = 0\n \n \n def fit(self, X, y=None):\n \n dt_encoded = self.encoder.fit_transform(X)\n \n self.dt_states = dt_encoded.drop_duplicates()\n self.dt_states = self.dt_states.assign(state = range(len(self.dt_states)))\n \n self.n_states = len(self.dt_states)\n \n return self\n \n \n def predict(self, X, y=None):\n \n dt_encoded = self.encoder.transform(X)\n \n dt_transformed = pd.merge(dt_encoded, self.dt_states, how='left')\n dt_transformed.fillna(-1, inplace=True)\n \n return dt_transformed[\"state\"].astype(int).as_matrix()\n \n \n def fit_predict(self, X, y=None):\n \n self.fit(X)\n return self.predict(X)"
] | [
[
"pandas.merge"
]
] |
EkremBayar/bayar | [
"aad1a32044da671d0b4f11908416044753360b39",
"aad1a32044da671d0b4f11908416044753360b39",
"aad1a32044da671d0b4f11908416044753360b39",
"aad1a32044da671d0b4f11908416044753360b39",
"aad1a32044da671d0b4f11908416044753360b39",
"aad1a32044da671d0b4f11908416044753360b39",
"aad1a32044da671d0b4f11908416044753360b39",
"aad1a32044da671d0b4f11908416044753360b39"
] | [
"venv/Lib/site-packages/pandas/tests/plotting/test_hist_method.py",
"venv/Lib/site-packages/statsmodels/tsa/filters/tests/test_filters.py",
"venv/Lib/site-packages/plotnine/scales/scale_stroke.py",
"venv/Lib/site-packages/scipy/sparse/linalg/isolve/tests/test_gcrotmk.py",
"venv/Lib/site-packages/scipy/sparse/linalg/tests/test_interface.py",
"venv/Lib/site-packages/statsmodels/discrete/tests/test_margins.py",
"venv/Lib/site-packages/pandas/io/excel/_pyxlsb.py",
"venv/Lib/site-packages/mpl_toolkits/tests/test_axes_grid.py"
] | [
"\"\"\" Test cases for .hist method \"\"\"\n\nimport numpy as np\nimport pytest\n\nimport pandas.util._test_decorators as td\n\nfrom pandas import DataFrame, Index, Series, to_datetime\nimport pandas._testing as tm\nfrom pandas.tests.plotting.common import TestPlotBase, _check_plot_works\n\npytestmark = pytest.mark.slow\n\n\n@td.skip_if_no_mpl\nclass TestSeriesPlots(TestPlotBase):\n def setup_method(self, method):\n TestPlotBase.setup_method(self, method)\n import matplotlib as mpl\n\n mpl.rcdefaults()\n\n self.ts = tm.makeTimeSeries()\n self.ts.name = \"ts\"\n\n def test_hist_legacy(self):\n _check_plot_works(self.ts.hist)\n _check_plot_works(self.ts.hist, grid=False)\n _check_plot_works(self.ts.hist, figsize=(8, 10))\n # _check_plot_works adds an ax so catch warning. see GH #13188\n with tm.assert_produces_warning(UserWarning):\n _check_plot_works(self.ts.hist, by=self.ts.index.month)\n with tm.assert_produces_warning(UserWarning):\n _check_plot_works(self.ts.hist, by=self.ts.index.month, bins=5)\n\n fig, ax = self.plt.subplots(1, 1)\n _check_plot_works(self.ts.hist, ax=ax)\n _check_plot_works(self.ts.hist, ax=ax, figure=fig)\n _check_plot_works(self.ts.hist, figure=fig)\n tm.close()\n\n fig, (ax1, ax2) = self.plt.subplots(1, 2)\n _check_plot_works(self.ts.hist, figure=fig, ax=ax1)\n _check_plot_works(self.ts.hist, figure=fig, ax=ax2)\n\n with pytest.raises(ValueError):\n self.ts.hist(by=self.ts.index, figure=fig)\n\n def test_hist_bins_legacy(self):\n df = DataFrame(np.random.randn(10, 2))\n ax = df.hist(bins=2)[0][0]\n assert len(ax.patches) == 2\n\n def test_hist_layout(self):\n df = self.hist_df\n with pytest.raises(ValueError):\n df.height.hist(layout=(1, 1))\n\n with pytest.raises(ValueError):\n df.height.hist(layout=[1, 1])\n\n def test_hist_layout_with_by(self):\n df = self.hist_df\n\n # _check_plot_works adds an `ax` kwarg to the method call\n # so we get a warning about an axis being cleared, even\n # though we don't explicing pass one, see GH #13188\n with tm.assert_produces_warning(UserWarning):\n axes = _check_plot_works(df.height.hist, by=df.gender, layout=(2, 1))\n self._check_axes_shape(axes, axes_num=2, layout=(2, 1))\n\n with tm.assert_produces_warning(UserWarning):\n axes = _check_plot_works(df.height.hist, by=df.gender, layout=(3, -1))\n self._check_axes_shape(axes, axes_num=2, layout=(3, 1))\n\n with tm.assert_produces_warning(UserWarning):\n axes = _check_plot_works(df.height.hist, by=df.category, layout=(4, 1))\n self._check_axes_shape(axes, axes_num=4, layout=(4, 1))\n\n with tm.assert_produces_warning(UserWarning):\n axes = _check_plot_works(df.height.hist, by=df.category, layout=(2, -1))\n self._check_axes_shape(axes, axes_num=4, layout=(2, 2))\n\n with tm.assert_produces_warning(UserWarning):\n axes = _check_plot_works(df.height.hist, by=df.category, layout=(3, -1))\n self._check_axes_shape(axes, axes_num=4, layout=(3, 2))\n\n with tm.assert_produces_warning(UserWarning):\n axes = _check_plot_works(df.height.hist, by=df.category, layout=(-1, 4))\n self._check_axes_shape(axes, axes_num=4, layout=(1, 4))\n\n with tm.assert_produces_warning(UserWarning):\n axes = _check_plot_works(df.height.hist, by=df.classroom, layout=(2, 2))\n self._check_axes_shape(axes, axes_num=3, layout=(2, 2))\n\n axes = df.height.hist(by=df.category, layout=(4, 2), figsize=(12, 7))\n self._check_axes_shape(axes, axes_num=4, layout=(4, 2), figsize=(12, 7))\n\n def test_hist_no_overlap(self):\n from matplotlib.pyplot import gcf, subplot\n\n x = Series(np.random.randn(2))\n y = Series(np.random.randn(2))\n subplot(121)\n x.hist()\n subplot(122)\n y.hist()\n fig = gcf()\n axes = fig.axes\n assert len(axes) == 2\n\n def test_hist_by_no_extra_plots(self):\n df = self.hist_df\n axes = df.height.hist(by=df.gender) # noqa\n assert len(self.plt.get_fignums()) == 1\n\n def test_plot_fails_when_ax_differs_from_figure(self):\n from pylab import figure\n\n fig1 = figure()\n fig2 = figure()\n ax1 = fig1.add_subplot(111)\n with pytest.raises(AssertionError):\n self.ts.hist(ax=ax1, figure=fig2)\n\n @pytest.mark.parametrize(\n \"histtype, expected\",\n [\n (\"bar\", True),\n (\"barstacked\", True),\n (\"step\", False),\n (\"stepfilled\", True),\n ],\n )\n def test_histtype_argument(self, histtype, expected):\n # GH23992 Verify functioning of histtype argument\n ser = Series(np.random.randint(1, 10))\n ax = ser.hist(histtype=histtype)\n self._check_patches_all_filled(ax, filled=expected)\n\n @pytest.mark.parametrize(\n \"by, expected_axes_num, expected_layout\", [(None, 1, (1, 1)), (\"b\", 2, (1, 2))]\n )\n def test_hist_with_legend(self, by, expected_axes_num, expected_layout):\n # GH 6279 - Series histogram can have a legend\n index = 15 * [\"1\"] + 15 * [\"2\"]\n s = Series(np.random.randn(30), index=index, name=\"a\")\n s.index.name = \"b\"\n\n # Use default_axes=True when plotting method generate subplots itself\n axes = _check_plot_works(s.hist, default_axes=True, legend=True, by=by)\n self._check_axes_shape(axes, axes_num=expected_axes_num, layout=expected_layout)\n self._check_legend_labels(axes, \"a\")\n\n @pytest.mark.parametrize(\"by\", [None, \"b\"])\n def test_hist_with_legend_raises(self, by):\n # GH 6279 - Series histogram with legend and label raises\n index = 15 * [\"1\"] + 15 * [\"2\"]\n s = Series(np.random.randn(30), index=index, name=\"a\")\n s.index.name = \"b\"\n\n with pytest.raises(ValueError, match=\"Cannot use both legend and label\"):\n s.hist(legend=True, by=by, label=\"c\")\n\n\n@td.skip_if_no_mpl\nclass TestDataFramePlots(TestPlotBase):\n def test_hist_df_legacy(self):\n from matplotlib.patches import Rectangle\n\n with tm.assert_produces_warning(UserWarning):\n _check_plot_works(self.hist_df.hist)\n\n # make sure layout is handled\n df = DataFrame(np.random.randn(100, 2))\n df[2] = to_datetime(\n np.random.randint(\n self.start_date_to_int64,\n self.end_date_to_int64,\n size=100,\n dtype=np.int64,\n )\n )\n with tm.assert_produces_warning(UserWarning):\n axes = _check_plot_works(df.hist, grid=False)\n self._check_axes_shape(axes, axes_num=3, layout=(2, 2))\n assert not axes[1, 1].get_visible()\n\n _check_plot_works(df[[2]].hist)\n df = DataFrame(np.random.randn(100, 1))\n _check_plot_works(df.hist)\n\n # make sure layout is handled\n df = DataFrame(np.random.randn(100, 5))\n df[5] = to_datetime(\n np.random.randint(\n self.start_date_to_int64,\n self.end_date_to_int64,\n size=100,\n dtype=np.int64,\n )\n )\n with tm.assert_produces_warning(UserWarning):\n axes = _check_plot_works(df.hist, layout=(4, 2))\n self._check_axes_shape(axes, axes_num=6, layout=(4, 2))\n\n # make sure sharex, sharey is handled\n with tm.assert_produces_warning(UserWarning):\n _check_plot_works(df.hist, sharex=True, sharey=True)\n\n # handle figsize arg\n with tm.assert_produces_warning(UserWarning):\n _check_plot_works(df.hist, figsize=(8, 10))\n\n # check bins argument\n with tm.assert_produces_warning(UserWarning):\n _check_plot_works(df.hist, bins=5)\n\n # make sure xlabelsize and xrot are handled\n ser = df[0]\n xf, yf = 20, 18\n xrot, yrot = 30, 40\n axes = ser.hist(xlabelsize=xf, xrot=xrot, ylabelsize=yf, yrot=yrot)\n self._check_ticks_props(\n axes, xlabelsize=xf, xrot=xrot, ylabelsize=yf, yrot=yrot\n )\n\n xf, yf = 20, 18\n xrot, yrot = 30, 40\n axes = df.hist(xlabelsize=xf, xrot=xrot, ylabelsize=yf, yrot=yrot)\n self._check_ticks_props(\n axes, xlabelsize=xf, xrot=xrot, ylabelsize=yf, yrot=yrot\n )\n\n tm.close()\n\n ax = ser.hist(cumulative=True, bins=4, density=True)\n # height of last bin (index 5) must be 1.0\n rects = [x for x in ax.get_children() if isinstance(x, Rectangle)]\n tm.assert_almost_equal(rects[-1].get_height(), 1.0)\n\n tm.close()\n ax = ser.hist(log=True)\n # scale of y must be 'log'\n self._check_ax_scales(ax, yaxis=\"log\")\n\n tm.close()\n\n # propagate attr exception from matplotlib.Axes.hist\n with pytest.raises(AttributeError):\n ser.hist(foo=\"bar\")\n\n def test_hist_non_numerical_or_datetime_raises(self):\n # gh-10444, GH32590\n df = DataFrame(\n {\n \"a\": np.random.rand(10),\n \"b\": np.random.randint(0, 10, 10),\n \"c\": to_datetime(\n np.random.randint(\n 1582800000000000000, 1583500000000000000, 10, dtype=np.int64\n )\n ),\n \"d\": to_datetime(\n np.random.randint(\n 1582800000000000000, 1583500000000000000, 10, dtype=np.int64\n ),\n utc=True,\n ),\n }\n )\n df_o = df.astype(object)\n\n msg = \"hist method requires numerical or datetime columns, nothing to plot.\"\n with pytest.raises(ValueError, match=msg):\n df_o.hist()\n\n def test_hist_layout(self):\n df = DataFrame(np.random.randn(100, 2))\n df[2] = to_datetime(\n np.random.randint(\n self.start_date_to_int64,\n self.end_date_to_int64,\n size=100,\n dtype=np.int64,\n )\n )\n\n layout_to_expected_size = (\n {\"layout\": None, \"expected_size\": (2, 2)}, # default is 2x2\n {\"layout\": (2, 2), \"expected_size\": (2, 2)},\n {\"layout\": (4, 1), \"expected_size\": (4, 1)},\n {\"layout\": (1, 4), \"expected_size\": (1, 4)},\n {\"layout\": (3, 3), \"expected_size\": (3, 3)},\n {\"layout\": (-1, 4), \"expected_size\": (1, 4)},\n {\"layout\": (4, -1), \"expected_size\": (4, 1)},\n {\"layout\": (-1, 2), \"expected_size\": (2, 2)},\n {\"layout\": (2, -1), \"expected_size\": (2, 2)},\n )\n\n for layout_test in layout_to_expected_size:\n axes = df.hist(layout=layout_test[\"layout\"])\n expected = layout_test[\"expected_size\"]\n self._check_axes_shape(axes, axes_num=3, layout=expected)\n\n # layout too small for all 4 plots\n with pytest.raises(ValueError):\n df.hist(layout=(1, 1))\n\n # invalid format for layout\n with pytest.raises(ValueError):\n df.hist(layout=(1,))\n with pytest.raises(ValueError):\n df.hist(layout=(-1, -1))\n\n # GH 9351\n def test_tight_layout(self):\n df = DataFrame(np.random.randn(100, 2))\n df[2] = to_datetime(\n np.random.randint(\n self.start_date_to_int64,\n self.end_date_to_int64,\n size=100,\n dtype=np.int64,\n )\n )\n # Use default_axes=True when plotting method generate subplots itself\n _check_plot_works(df.hist, default_axes=True)\n self.plt.tight_layout()\n\n tm.close()\n\n def test_hist_subplot_xrot(self):\n # GH 30288\n df = DataFrame(\n {\n \"length\": [1.5, 0.5, 1.2, 0.9, 3],\n \"animal\": [\"pig\", \"rabbit\", \"pig\", \"pig\", \"rabbit\"],\n }\n )\n # Use default_axes=True when plotting method generate subplots itself\n axes = _check_plot_works(\n df.hist,\n default_axes=True,\n filterwarnings=\"always\",\n column=\"length\",\n by=\"animal\",\n bins=5,\n xrot=0,\n )\n self._check_ticks_props(axes, xrot=0)\n\n @pytest.mark.parametrize(\n \"column, expected\",\n [\n (None, [\"width\", \"length\", \"height\"]),\n ([\"length\", \"width\", \"height\"], [\"length\", \"width\", \"height\"]),\n ],\n )\n def test_hist_column_order_unchanged(self, column, expected):\n # GH29235\n\n df = DataFrame(\n {\n \"width\": [0.7, 0.2, 0.15, 0.2, 1.1],\n \"length\": [1.5, 0.5, 1.2, 0.9, 3],\n \"height\": [3, 0.5, 3.4, 2, 1],\n },\n index=[\"pig\", \"rabbit\", \"duck\", \"chicken\", \"horse\"],\n )\n\n # Use default_axes=True when plotting method generate subplots itself\n axes = _check_plot_works(\n df.hist,\n default_axes=True,\n column=column,\n layout=(1, 3),\n )\n result = [axes[0, i].get_title() for i in range(3)]\n assert result == expected\n\n @pytest.mark.parametrize(\n \"histtype, expected\",\n [\n (\"bar\", True),\n (\"barstacked\", True),\n (\"step\", False),\n (\"stepfilled\", True),\n ],\n )\n def test_histtype_argument(self, histtype, expected):\n # GH23992 Verify functioning of histtype argument\n df = DataFrame(np.random.randint(1, 10, size=(100, 2)), columns=[\"a\", \"b\"])\n ax = df.hist(histtype=histtype)\n self._check_patches_all_filled(ax, filled=expected)\n\n @pytest.mark.parametrize(\"by\", [None, \"c\"])\n @pytest.mark.parametrize(\"column\", [None, \"b\"])\n def test_hist_with_legend(self, by, column):\n # GH 6279 - DataFrame histogram can have a legend\n expected_axes_num = 1 if by is None and column is not None else 2\n expected_layout = (1, expected_axes_num)\n expected_labels = column or [\"a\", \"b\"]\n if by is not None:\n expected_labels = [expected_labels] * 2\n\n index = Index(15 * [\"1\"] + 15 * [\"2\"], name=\"c\")\n df = DataFrame(np.random.randn(30, 2), index=index, columns=[\"a\", \"b\"])\n\n # Use default_axes=True when plotting method generate subplots itself\n axes = _check_plot_works(\n df.hist,\n default_axes=True,\n legend=True,\n by=by,\n column=column,\n )\n\n self._check_axes_shape(axes, axes_num=expected_axes_num, layout=expected_layout)\n if by is None and column is None:\n axes = axes[0]\n for expected_label, ax in zip(expected_labels, axes):\n self._check_legend_labels(ax, expected_label)\n\n @pytest.mark.parametrize(\"by\", [None, \"c\"])\n @pytest.mark.parametrize(\"column\", [None, \"b\"])\n def test_hist_with_legend_raises(self, by, column):\n # GH 6279 - DataFrame histogram with legend and label raises\n index = Index(15 * [\"1\"] + 15 * [\"2\"], name=\"c\")\n df = DataFrame(np.random.randn(30, 2), index=index, columns=[\"a\", \"b\"])\n\n with pytest.raises(ValueError, match=\"Cannot use both legend and label\"):\n df.hist(legend=True, by=by, column=column, label=\"d\")\n\n\n@td.skip_if_no_mpl\nclass TestDataFrameGroupByPlots(TestPlotBase):\n def test_grouped_hist_legacy(self):\n from matplotlib.patches import Rectangle\n\n from pandas.plotting._matplotlib.hist import _grouped_hist\n\n df = DataFrame(np.random.randn(500, 1), columns=[\"A\"])\n df[\"B\"] = to_datetime(\n np.random.randint(\n self.start_date_to_int64,\n self.end_date_to_int64,\n size=500,\n dtype=np.int64,\n )\n )\n df[\"C\"] = np.random.randint(0, 4, 500)\n df[\"D\"] = [\"X\"] * 500\n\n axes = _grouped_hist(df.A, by=df.C)\n self._check_axes_shape(axes, axes_num=4, layout=(2, 2))\n\n tm.close()\n axes = df.hist(by=df.C)\n self._check_axes_shape(axes, axes_num=4, layout=(2, 2))\n\n tm.close()\n # group by a key with single value\n axes = df.hist(by=\"D\", rot=30)\n self._check_axes_shape(axes, axes_num=1, layout=(1, 1))\n self._check_ticks_props(axes, xrot=30)\n\n tm.close()\n # make sure kwargs to hist are handled\n xf, yf = 20, 18\n xrot, yrot = 30, 40\n\n axes = _grouped_hist(\n df.A,\n by=df.C,\n cumulative=True,\n bins=4,\n xlabelsize=xf,\n xrot=xrot,\n ylabelsize=yf,\n yrot=yrot,\n density=True,\n )\n # height of last bin (index 5) must be 1.0\n for ax in axes.ravel():\n rects = [x for x in ax.get_children() if isinstance(x, Rectangle)]\n height = rects[-1].get_height()\n tm.assert_almost_equal(height, 1.0)\n self._check_ticks_props(\n axes, xlabelsize=xf, xrot=xrot, ylabelsize=yf, yrot=yrot\n )\n\n tm.close()\n axes = _grouped_hist(df.A, by=df.C, log=True)\n # scale of y must be 'log'\n self._check_ax_scales(axes, yaxis=\"log\")\n\n tm.close()\n # propagate attr exception from matplotlib.Axes.hist\n with pytest.raises(AttributeError):\n _grouped_hist(df.A, by=df.C, foo=\"bar\")\n\n msg = \"Specify figure size by tuple instead\"\n with pytest.raises(ValueError, match=msg):\n df.hist(by=\"C\", figsize=\"default\")\n\n def test_grouped_hist_legacy2(self):\n n = 10\n weight = Series(np.random.normal(166, 20, size=n))\n height = Series(np.random.normal(60, 10, size=n))\n with tm.RNGContext(42):\n gender_int = np.random.choice([0, 1], size=n)\n df_int = DataFrame({\"height\": height, \"weight\": weight, \"gender\": gender_int})\n gb = df_int.groupby(\"gender\")\n axes = gb.hist()\n assert len(axes) == 2\n assert len(self.plt.get_fignums()) == 2\n tm.close()\n\n def test_grouped_hist_layout(self):\n df = self.hist_df\n msg = \"Layout of 1x1 must be larger than required size 2\"\n with pytest.raises(ValueError, match=msg):\n df.hist(column=\"weight\", by=df.gender, layout=(1, 1))\n\n msg = \"Layout of 1x3 must be larger than required size 4\"\n with pytest.raises(ValueError, match=msg):\n df.hist(column=\"height\", by=df.category, layout=(1, 3))\n\n msg = \"At least one dimension of layout must be positive\"\n with pytest.raises(ValueError, match=msg):\n df.hist(column=\"height\", by=df.category, layout=(-1, -1))\n\n with tm.assert_produces_warning(UserWarning):\n axes = _check_plot_works(\n df.hist, column=\"height\", by=df.gender, layout=(2, 1)\n )\n self._check_axes_shape(axes, axes_num=2, layout=(2, 1))\n\n with tm.assert_produces_warning(UserWarning):\n axes = _check_plot_works(\n df.hist, column=\"height\", by=df.gender, layout=(2, -1)\n )\n self._check_axes_shape(axes, axes_num=2, layout=(2, 1))\n\n axes = df.hist(column=\"height\", by=df.category, layout=(4, 1))\n self._check_axes_shape(axes, axes_num=4, layout=(4, 1))\n\n axes = df.hist(column=\"height\", by=df.category, layout=(-1, 1))\n self._check_axes_shape(axes, axes_num=4, layout=(4, 1))\n\n axes = df.hist(column=\"height\", by=df.category, layout=(4, 2), figsize=(12, 8))\n self._check_axes_shape(axes, axes_num=4, layout=(4, 2), figsize=(12, 8))\n tm.close()\n\n # GH 6769\n with tm.assert_produces_warning(UserWarning):\n axes = _check_plot_works(\n df.hist, column=\"height\", by=\"classroom\", layout=(2, 2)\n )\n self._check_axes_shape(axes, axes_num=3, layout=(2, 2))\n\n # without column\n with tm.assert_produces_warning(UserWarning):\n axes = _check_plot_works(df.hist, by=\"classroom\")\n self._check_axes_shape(axes, axes_num=3, layout=(2, 2))\n\n axes = df.hist(by=\"gender\", layout=(3, 5))\n self._check_axes_shape(axes, axes_num=2, layout=(3, 5))\n\n axes = df.hist(column=[\"height\", \"weight\", \"category\"])\n self._check_axes_shape(axes, axes_num=3, layout=(2, 2))\n\n def test_grouped_hist_multiple_axes(self):\n # GH 6970, GH 7069\n df = self.hist_df\n\n fig, axes = self.plt.subplots(2, 3)\n returned = df.hist(column=[\"height\", \"weight\", \"category\"], ax=axes[0])\n self._check_axes_shape(returned, axes_num=3, layout=(1, 3))\n tm.assert_numpy_array_equal(returned, axes[0])\n assert returned[0].figure is fig\n returned = df.hist(by=\"classroom\", ax=axes[1])\n self._check_axes_shape(returned, axes_num=3, layout=(1, 3))\n tm.assert_numpy_array_equal(returned, axes[1])\n assert returned[0].figure is fig\n\n with pytest.raises(ValueError):\n fig, axes = self.plt.subplots(2, 3)\n # pass different number of axes from required\n axes = df.hist(column=\"height\", ax=axes)\n\n def test_axis_share_x(self):\n df = self.hist_df\n # GH4089\n ax1, ax2 = df.hist(column=\"height\", by=df.gender, sharex=True)\n\n # share x\n assert ax1._shared_x_axes.joined(ax1, ax2)\n assert ax2._shared_x_axes.joined(ax1, ax2)\n\n # don't share y\n assert not ax1._shared_y_axes.joined(ax1, ax2)\n assert not ax2._shared_y_axes.joined(ax1, ax2)\n\n def test_axis_share_y(self):\n df = self.hist_df\n ax1, ax2 = df.hist(column=\"height\", by=df.gender, sharey=True)\n\n # share y\n assert ax1._shared_y_axes.joined(ax1, ax2)\n assert ax2._shared_y_axes.joined(ax1, ax2)\n\n # don't share x\n assert not ax1._shared_x_axes.joined(ax1, ax2)\n assert not ax2._shared_x_axes.joined(ax1, ax2)\n\n def test_axis_share_xy(self):\n df = self.hist_df\n ax1, ax2 = df.hist(column=\"height\", by=df.gender, sharex=True, sharey=True)\n\n # share both x and y\n assert ax1._shared_x_axes.joined(ax1, ax2)\n assert ax2._shared_x_axes.joined(ax1, ax2)\n\n assert ax1._shared_y_axes.joined(ax1, ax2)\n assert ax2._shared_y_axes.joined(ax1, ax2)\n\n @pytest.mark.parametrize(\n \"histtype, expected\",\n [\n (\"bar\", True),\n (\"barstacked\", True),\n (\"step\", False),\n (\"stepfilled\", True),\n ],\n )\n def test_histtype_argument(self, histtype, expected):\n # GH23992 Verify functioning of histtype argument\n df = DataFrame(np.random.randint(1, 10, size=(100, 2)), columns=[\"a\", \"b\"])\n ax = df.hist(by=\"a\", histtype=histtype)\n self._check_patches_all_filled(ax, filled=expected)\n",
"from statsmodels.compat.pandas import assert_frame_equal, make_dataframe\n\nfrom datetime import datetime\nimport numpy as np\nfrom numpy.testing import (assert_almost_equal, assert_equal, assert_allclose,\n assert_raises, assert_)\nfrom numpy import array, column_stack\n\nfrom statsmodels.tsa.filters._utils import pandas_wrapper\nfrom statsmodels.datasets import macrodata\nfrom pandas import DataFrame, date_range, concat\nfrom statsmodels.tsa.filters.api import (bkfilter, hpfilter, cffilter,\n convolution_filter, recursive_filter)\n\n\ndef test_bking1d():\n # Test Baxter King band-pass filter. Results are taken from Stata\n bking_results = array([\n 7.320813, 2.886914, -6.818976, -13.49436,\n -13.27936, -9.405913, -5.691091, -5.133076, -7.273468,\n -9.243364, -8.482916, -4.447764, 2.406559, 10.68433,\n 19.46414, 28.09749, 34.11066, 33.48468, 24.64598, 9.952399,\n -4.265528, -12.59471, -13.46714, -9.049501, -3.011248,\n .5655082, 2.897976, 7.406077, 14.67959, 18.651, 13.05891,\n -2.945415, -24.08659, -41.86147, -48.68383, -43.32689,\n -31.66654, -20.38356, -13.76411, -9.978693, -3.7704, 10.27108,\n 31.02847, 51.87613, 66.93117, 73.51951, 73.4053, 69.17468,\n 59.8543, 38.23899, -.2604809, -49.0107, -91.1128, -112.1574,\n -108.3227, -86.51453, -59.91258, -40.01185, -29.70265,\n -22.76396, -13.08037, 1.913622, 20.44045, 37.32873, 46.79802,\n 51.95937, 59.67393, 70.50803, 81.27311, 83.53191, 67.72536,\n 33.78039, -6.509092, -37.31579, -46.05207, -29.81496, 1.416417,\n 28.31503,\n 32.90134, 8.949259, -35.41895, -84.65775, -124.4288, -144.6036,\n -140.2204, -109.2624, -53.6901, 15.07415, 74.44268, 104.0403,\n 101.0725, 76.58291, 49.27925, 36.15751, 36.48799, 37.60897,\n 27.75998, 4.216643, -23.20579, -39.33292, -36.6134, -20.90161,\n -4.143123, 5.48432, 9.270075, 13.69573, 22.16675, 33.01987,\n 41.93186, 47.12222, 48.62164, 47.30701, 40.20537, 22.37898,\n -7.133002, -43.3339, -78.51229, -101.3684, -105.2179,\n -90.97147,\n -68.30824, -48.10113, -35.60709, -31.15775, -31.82346,\n -32.49278, -28.22499, -14.42852, 10.1827, 36.64189, 49.43468,\n 38.75517, 6.447761, -33.15883, -62.60446, -72.87829, -66.54629,\n -52.61205, -38.06676, -26.19963, -16.51492, -7.007577,\n .6125674,\n 7.866972, 14.8123, 22.52388, 30.65265, 39.47801, 49.05027,\n 59.02925,\n 72.88999, 95.08865, 125.8983, 154.4283, 160.7638, 130.6092,\n 67.84406, -7.070272, -68.08128, -99.39944, -104.911,\n -100.2372, -98.11596, -104.2051, -114.0125, -113.3475,\n -92.98669, -51.91707, -.7313812, 43.22938, 64.62762, 64.07226,\n 59.35707, 67.06026, 91.87247, 124.4591, 151.2402, 163.0648,\n 154.6432])\n X = macrodata.load_pandas().data['realinv'].values\n Y = bkfilter(X, 6, 32, 12)\n assert_almost_equal(Y, bking_results, 4)\n\n\ndef test_bking2d():\n # Test Baxter-King band-pass filter with 2d input\n bking_results = array([\n [7.320813, -.0374475], [2.886914, -.0430094],\n [-6.818976, -.053456], [-13.49436, -.0620739], [-13.27936, -.0626929],\n [-9.405913, -.0603022], [-5.691091, -.0630016], [-5.133076, -.0832268],\n [-7.273468, -.1186448], [-9.243364, -.1619868], [-8.482916, -.2116604],\n [-4.447764, -.2670747], [2.406559, -.3209931], [10.68433, -.3583075],\n [19.46414, -.3626742], [28.09749, -.3294618], [34.11066, -.2773388],\n [33.48468, -.2436127], [24.64598, -.2605531], [9.952399, -.3305166],\n [-4.265528, -.4275561], [-12.59471, -.5076068], [-13.46714, -.537573],\n [-9.049501, -.5205845], [-3.011248, -.481673], [.5655082, -.4403994],\n [2.897976, -.4039957], [7.406077, -.3537394], [14.67959, -.2687359],\n [18.651, -.1459743], [13.05891, .0014926], [-2.945415, .1424277],\n [-24.08659, .2451936], [-41.86147, .288541], [-48.68383, .2727282],\n [-43.32689, .1959127], [-31.66654, .0644874], [-20.38356, -.1158372],\n [-13.76411, -.3518627], [-9.978693, -.6557535], [-3.7704, -1.003754],\n [10.27108, -1.341632], [31.02847, -1.614486], [51.87613, -1.779089],\n [66.93117, -1.807459], [73.51951, -1.679688], [73.4053, -1.401012],\n [69.17468, -.9954996], [59.8543, -.511261], [38.23899, -.0146745],\n [-.2604809, .4261311], [-49.0107, .7452514], [-91.1128, .8879492],\n [-112.1574, .8282748], [-108.3227, .5851508], [-86.51453, .2351699],\n [-59.91258, -.1208998], [-40.01185, -.4297895], [-29.70265, -.6821963],\n [-22.76396, -.9234254], [-13.08037, -1.217539], [1.913622, -1.57367],\n [20.44045, -1.927008], [37.32873, -2.229565], [46.79802, -2.463154],\n [51.95937, -2.614697], [59.67393, -2.681357], [70.50803, -2.609654],\n [81.27311, -2.301618], [83.53191, -1.720974], [67.72536, -.9837123],\n [33.78039, -.2261613], [-6.509092, .4546985], [-37.31579, 1.005751],\n [-46.05207, 1.457224], [-29.81496, 1.870815], [1.416417, 2.263313],\n [28.31503, 2.599906], [32.90134, 2.812282], [8.949259, 2.83358],\n [-35.41895, 2.632667], [-84.65775, 2.201077], [-124.4288, 1.598951],\n [-144.6036, .9504762], [-140.2204, .4187932], [-109.2624, .1646726],\n [-53.6901, .2034265], [15.07415, .398165], [74.44268, .5427476],\n [104.0403, .5454975], [101.0725, .4723354], [76.58291, .4626823],\n [49.27925, .5840143], [36.15751, .7187981], [36.48799, .6058422],\n [37.60897, .1221227], [27.75998, -.5891272], [4.216643, -1.249841],\n [-23.20579, -1.594972], [-39.33292, -1.545968], [-36.6134, -1.275494],\n [-20.90161, -1.035783], [-4.143123, -.9971732], [5.48432, -1.154264],\n [9.270075, -1.29987], [13.69573, -1.240559], [22.16675, -.9662656],\n [33.01987, -.6420301], [41.93186, -.4698712], [47.12222, -.4527797],\n [48.62164, -.4407153], [47.30701, -.2416076], [40.20537, .2317583],\n [22.37898, .8710276], [-7.133002, 1.426177], [-43.3339, 1.652785],\n [-78.51229, 1.488021], [-101.3684, 1.072096], [-105.2179, .6496446],\n [-90.97147, .4193682], [-68.30824, .41847], [-48.10113, .5253419],\n [-35.60709, .595076], [-31.15775, .5509905], [-31.82346, .3755519],\n [-32.49278, .1297979], [-28.22499, -.0916165], [-14.42852, -.2531037],\n [10.1827, -.3220784], [36.64189, -.2660561], [49.43468, -.1358522],\n [38.75517, -.0279508], [6.447761, .0168735], [-33.15883, .0315687],\n [-62.60446, .0819507], [-72.87829, .2274033], [-66.54629, .4641401],\n [-52.61205, .7211093], [-38.06676, .907773], [-26.19963, .9387103],\n [-16.51492, .7940786], [-7.007577, .5026631], [.6125674, .1224996],\n [7.866972, -.2714422], [14.8123, -.6273921], [22.52388, -.9124271],\n [30.65265, -1.108861], [39.47801, -1.199206], [49.05027, -1.19908],\n [59.02925, -1.139046], [72.88999, -.9775021], [95.08865, -.6592603],\n [125.8983, -.1609712], [154.4283, .4796201], [160.7638, 1.100565],\n [130.6092, 1.447148], [67.84406, 1.359608], [-7.070272, .8931825],\n [-68.08128, .2619787], [-99.39944, -.252208], [-104.911, -.4703874],\n [-100.2372, -.4430657], [-98.11596, -.390683], [-104.2051, -.5647846],\n [-114.0125, -.9397582], [-113.3475, -1.341633], [-92.98669, -1.567337],\n [-51.91707, -1.504943], [-.7313812, -1.30576], [43.22938, -1.17151],\n [64.62762, -1.136151], [64.07226, -1.050555], [59.35707, -.7308369],\n [67.06026, -.1766731], [91.87247, .3898467], [124.4591, .8135461],\n [151.2402, .9644226], [163.0648, .6865934], [154.6432, .0115685]])\n\n mdata = macrodata.load_pandas()\n X = mdata.data[['realinv', 'cpi']].values.astype(float)\n Y = bkfilter(X, 6, 32, 12)\n assert_almost_equal(Y, bking_results, 4)\n\n\ndef test_hpfilter():\n # Test Hodrick-Prescott Filter. Results taken from Stata.\n hpfilt_res = array([\n [3.951191484487844718e+01, 2.670837085155121713e+03],\n [8.008853245681075350e+01, 2.698712467543189177e+03],\n [4.887545512195401898e+01, 2.726612544878045810e+03],\n [3.059193256079834100e+01, 2.754612067439201837e+03],\n [6.488266733421960453e+01, 2.782816332665780465e+03],\n [2.304024204546703913e+01, 2.811349757954532834e+03],\n [-1.355312369487364776e+00, 2.840377312369487299e+03],\n [-6.746236512580753697e+01, 2.870078365125807522e+03],\n [-8.136743836853429457e+01, 2.900631438368534418e+03],\n [-6.016789026443257171e+01, 2.932172890264432681e+03],\n [-4.636922433138215638e+01, 2.964788224331382025e+03],\n [-2.069533915570400495e+01, 2.998525339155703932e+03],\n [-2.162152558595607843e+00, 3.033403152558595593e+03],\n [-4.718647774311648391e+00, 3.069427647774311481e+03],\n [-1.355645669169007306e+01, 3.106603456691690099e+03],\n [-4.436926204475639679e+01, 3.144932262044756499e+03],\n [-4.332027378211660107e+01, 3.184407273782116590e+03],\n [-4.454697106352068658e+01, 3.224993971063520803e+03],\n [-2.629875787765286077e+01, 3.266630757877652741e+03],\n [-4.426119635629265758e+01, 3.309228196356292756e+03],\n [-1.443441190762496262e+01, 3.352680411907625057e+03],\n [-2.026686669186437939e+01, 3.396853866691864368e+03],\n [-1.913700136208899494e+01, 3.441606001362089046e+03],\n [-5.482458977940950717e+01, 3.486781589779409387e+03],\n [-1.596244517937793717e+01, 3.532213445179378141e+03],\n [-1.374011542874541192e+01, 3.577700115428745448e+03],\n [1.325482813403914406e+01, 3.623030171865960710e+03],\n [5.603040174253828809e+01, 3.667983598257461836e+03],\n [1.030743373627105939e+02, 3.712348662637289181e+03],\n [7.217534795943993231e+01, 3.755948652040559864e+03],\n [5.462972503693208637e+01, 3.798671274963067845e+03],\n [4.407065050666142270e+01, 3.840449349493338559e+03],\n [3.749016270204992907e+01, 3.881249837297949853e+03],\n [-1.511244199923112319e+00, 3.921067244199923152e+03],\n [-9.093507374079763395e+00, 3.959919507374079785e+03],\n [-1.685361946760258434e+01, 3.997823619467602384e+03],\n [2.822211031434289907e+01, 4.034790889685657021e+03],\n [6.117590627896424849e+01, 4.070822093721035344e+03],\n [5.433135391434370831e+01, 4.105935646085656117e+03],\n [3.810480376716623141e+01, 4.140188196232833434e+03],\n [7.042964928802848590e+01, 4.173670350711971878e+03],\n [4.996346842507591646e+01, 4.206496531574924120e+03],\n [4.455282059571254649e+01, 4.238825179404287155e+03],\n [-7.584961950576143863e+00, 4.270845961950576566e+03],\n [-4.620339247697120300e+01, 4.302776392476971523e+03],\n [-7.054024364552969928e+01, 4.334829243645529459e+03],\n [-6.492941099801464588e+01, 4.367188410998014660e+03],\n [-1.433567024239555394e+02, 4.399993702423955256e+03],\n [-5.932834493089012540e+01, 4.433344344930889747e+03],\n [-6.842096758743628016e+01, 4.467249967587436004e+03],\n [-6.774011924654860195e+01, 4.501683119246548813e+03],\n [-9.030958565658056614e+01, 4.536573585656580690e+03],\n [-4.603981499136807543e+01, 4.571808814991368308e+03],\n [2.588118806672991923e+01, 4.607219811933269739e+03],\n [3.489419371912299539e+01, 4.642608806280876706e+03],\n [7.675179642495095322e+01, 4.677794203575049323e+03],\n [1.635497817724171910e+02, 4.712616218227582976e+03],\n [1.856079654765617306e+02, 4.746963034523438182e+03],\n [1.254269446392718237e+02, 4.780825055360728584e+03],\n [1.387413113837174024e+02, 4.814308688616282780e+03],\n [6.201826599282230745e+01, 4.847598734007177882e+03],\n [4.122129542972197669e+01, 4.880966704570278125e+03],\n [-4.120287475842360436e+01, 4.914722874758424041e+03],\n [-9.486328233441963675e+01, 4.949203282334419782e+03],\n [-1.894232132641573116e+02, 4.984718213264157384e+03],\n [-1.895766639620087517e+02, 5.021518663962008759e+03],\n [-1.464092413342650616e+02, 5.059737241334265491e+03],\n [-1.218770668721217589e+02, 5.099388066872122181e+03],\n [-4.973075629078175552e+01, 5.140393756290781312e+03],\n [-5.365375213897277717e+01, 5.182600752138972894e+03],\n [-7.175241524251214287e+01, 5.225824415242512259e+03],\n [-7.834757283225462743e+01, 5.269846572832254424e+03],\n [-6.264220687943907251e+01, 5.314404206879438789e+03],\n [-3.054332122210325906e+00, 5.359185332122210639e+03],\n [4.808218808024685131e+01, 5.403838811919753425e+03],\n [2.781399326736391231e+00, 5.448011600673263274e+03],\n [-2.197570415173231595e+01, 5.491380704151732061e+03],\n [1.509441335012807031e+02, 5.533624866498719712e+03],\n [1.658909029574851957e+02, 5.574409097042514986e+03],\n [2.027292548049981633e+02, 5.613492745195001589e+03],\n [1.752101578176061594e+02, 5.650738842182393455e+03],\n [1.452808749847536092e+02, 5.686137125015246056e+03],\n [1.535481629475025329e+02, 5.719786837052497503e+03],\n [1.376169777998875361e+02, 5.751878022200112355e+03],\n [1.257703080340770612e+02, 5.782696691965922582e+03],\n [-2.524186846895645431e+01, 5.812614868468956047e+03],\n [-6.546618027042404719e+01, 5.842083180270424236e+03],\n [1.192352023580315290e+01, 5.871536479764196883e+03],\n [1.043482970188742911e+02, 5.901368702981125352e+03],\n [2.581376184768396342e+01, 5.931981238152316109e+03],\n [6.634330880534071184e+01, 5.963840691194659485e+03],\n [-4.236780162594641297e+01, 5.997429801625946311e+03],\n [-1.759397735321817891e+02, 6.033272773532181418e+03],\n [-1.827933311233055065e+02, 6.071867331123305121e+03],\n [-2.472312362505917918e+02, 6.113601236250591683e+03],\n [-2.877470049336488955e+02, 6.158748004933649099e+03],\n [-2.634066336693540507e+02, 6.207426633669354487e+03],\n [-1.819572770763625158e+02, 6.259576277076362203e+03],\n [-1.175034606274621183e+02, 6.314971460627461965e+03],\n [-4.769898649718379602e+01, 6.373272986497183410e+03],\n [1.419578280287896632e+01, 6.434068217197121157e+03],\n [6.267929662760798237e+01, 6.496914703372392069e+03],\n [6.196413196753746888e+01, 6.561378868032462378e+03],\n [5.019769125317907310e+01, 6.627066308746821051e+03],\n [4.665364933213822951e+01, 6.693621350667861407e+03],\n [3.662430749527266016e+01, 6.760719692504727391e+03],\n [7.545680850246480986e+01, 6.828066191497535328e+03],\n [6.052940492147536133e+01, 6.895388595078524304e+03],\n [6.029518881462354329e+01, 6.962461811185376064e+03],\n [2.187042136652689805e+01, 7.029098578633473153e+03],\n [2.380067926824722235e+01, 7.095149320731752596e+03],\n [-7.119129802169481991e+00, 7.160478129802169860e+03],\n [-3.194497359120850888e+01, 7.224963973591208742e+03],\n [-1.897137038934124575e+01, 7.288481370389341464e+03],\n [-1.832687287845146784e+01, 7.350884872878451461e+03],\n [4.600482336597542599e+01, 7.412017176634024509e+03],\n [2.489047706403016491e+01, 7.471709522935970199e+03],\n [6.305909392127250612e+01, 7.529821906078727807e+03],\n [4.585212309498183458e+01, 7.586229876905018500e+03],\n [9.314260180878318351e+01, 7.640848398191216802e+03],\n [1.129819097095369216e+02, 7.693621090290463144e+03],\n [1.204662123176703972e+02, 7.744549787682329224e+03],\n [1.336860614601246198e+02, 7.793706938539875409e+03],\n [1.034567175813735957e+02, 7.841240282418626521e+03],\n [1.403118873372050075e+02, 7.887381112662795204e+03],\n [1.271726169351004501e+02, 7.932425383064899506e+03],\n [8.271925765282139764e+01, 7.976756742347178260e+03],\n [-3.197432211752584408e+01, 8.020838322117525422e+03],\n [-1.150209535194062482e+02, 8.065184953519406008e+03],\n [-1.064694837456772802e+02, 8.110291483745677397e+03],\n [-1.190428718925368230e+02, 8.156580871892536379e+03],\n [-1.353635336292991269e+02, 8.204409533629299403e+03],\n [-9.644348283027102298e+01, 8.254059482830271008e+03],\n [-6.143413116116607853e+01, 8.305728131161165948e+03],\n [-3.019161311097923317e+01, 8.359552613110980019e+03],\n [1.384333163552582846e+00, 8.415631666836447039e+03],\n [-4.156016073666614830e+01, 8.474045160736666730e+03],\n [-4.843882841860977351e+01, 8.534873828418609264e+03],\n [-6.706442838867042155e+01, 8.598172428388670596e+03],\n [-2.019644488579979225e+01, 8.663965444885800025e+03],\n [-4.316446881084630149e+00, 8.732235446881084499e+03],\n [4.435061943264736328e+01, 8.802952380567352520e+03],\n [2.820550564155564643e+01, 8.876083494358445023e+03],\n [5.155624419490777655e+01, 8.951623755805092514e+03],\n [-4.318760899315748247e+00, 9.029585760899315574e+03],\n [-6.534632828542271454e+01, 9.110014328285422380e+03],\n [-7.226757738268497633e+01, 9.192951577382684263e+03],\n [-9.412378615444868046e+01, 9.278398786154448317e+03],\n [-1.191240653288368776e+02, 9.366312065328836979e+03],\n [-4.953669826751865912e+01, 9.456588698267518339e+03],\n [-6.017251579067487910e+01, 9.549051515790675694e+03],\n [-5.103438828313483100e+01, 9.643492388283135369e+03],\n [-7.343057830678117170e+01, 9.739665578306781754e+03],\n [-2.774245193054957781e+01, 9.837293451930549054e+03],\n [-3.380481112519191811e+00, 9.936052481112519672e+03],\n [-2.672779877794346248e+01, 1.003560179877794326e+04],\n [-3.217342505148371856e+01, 1.013559842505148299e+04],\n [-4.140567518359966925e+01, 1.023568267518359971e+04],\n [-6.687756033938057953e+00, 1.033547475603393832e+04],\n [7.300600408459467872e+01, 1.043456899591540605e+04],\n [6.862345670680042531e+01, 1.053255554329319966e+04],\n [5.497882461487461114e+01, 1.062907017538512628e+04],\n [9.612244093055960548e+01, 1.072379155906944106e+04],\n [1.978212770103891671e+02, 1.081643272298961165e+04],\n [1.362772276848754700e+02, 1.090676677231512440e+04],\n [2.637635494867263333e+02, 1.099469045051327339e+04],\n [1.876813256815166824e+02, 1.108018567431848351e+04],\n [1.711447873158413131e+02, 1.116339921268415856e+04],\n [5.257586460826678376e+01, 1.124459513539173349e+04],\n [4.710652228531762375e+01, 1.132414447771468258e+04],\n [-6.237613484241046535e+01, 1.140245113484241119e+04],\n [-9.982044354035315337e+01, 1.147994844354035376e+04],\n [-7.916275548997509759e+01, 1.155703075548997549e+04],\n [-9.526003459472303803e+01, 1.163403003459472347e+04],\n [-1.147987680369169539e+02, 1.171122876803691724e+04],\n [-1.900259054765901965e+02, 1.178884990547659072e+04],\n [-2.212256473439556430e+02, 1.186704464734395515e+04],\n [-2.071394278781845060e+02, 1.194584542787818464e+04],\n [-8.968541528904825100e+01, 1.202514641528904758e+04],\n [-6.189531564415665343e+01, 1.210471231564415575e+04],\n [-5.662878162551714922e+01, 1.218425178162551674e+04],\n [-4.961678134413705266e+01, 1.226343478134413635e+04],\n [-3.836288992144181975e+01, 1.234189588992144127e+04],\n [-8.956671991456460091e+00, 1.241923867199145570e+04],\n [3.907028461866866564e+01, 1.249504271538133071e+04],\n [1.865299000184495526e+01, 1.256888200999815490e+04],\n [4.279803532226833340e+01, 1.264035496467773191e+04],\n [3.962735362631610769e+01, 1.270907164637368442e+04],\n [1.412691291877854383e+02, 1.277466887081221466e+04],\n [1.256537791844366438e+02, 1.283680822081556289e+04],\n [7.067642758858892194e+01, 1.289523957241141034e+04],\n [1.108876647603192396e+02, 1.294979133523968085e+04],\n [9.956490829291760747e+01, 1.300033609170708223e+04],\n [1.571612709880937473e+02, 1.304681572901190702e+04],\n [2.318746375812715996e+02, 1.308923436241872878e+04],\n [2.635546670125277160e+02, 1.312769433298747208e+04],\n [2.044220965739259555e+02, 1.316244290342607383e+04],\n [2.213739418903714977e+02, 1.319389205810962812e+04],\n [1.020184547767112235e+02, 1.322258154522328914e+04],\n [-1.072694716663390864e+02, 1.324918947166633916e+04],\n [-3.490477058718843182e+02, 1.327445770587188417e+04],\n [-3.975570728533530200e+02, 1.329906107285335383e+04],\n [-3.331152428080622485e+02, 1.332345624280806260e+04]])\n dta = macrodata.load_pandas().data['realgdp'].values\n res = column_stack((hpfilter(dta, 1600)))\n assert_almost_equal(res, hpfilt_res, 6)\n\n\ndef test_cfitz_filter():\n # Test Christiano-Fitzgerald Filter. Results taken from R.\n # NOTE: The Stata mata code and the matlab code it's based on are wrong.\n cfilt_res = array([\n [0.712599537179426, 0.439563468233128],\n [1.06824041304411, 0.352886666575907],\n [1.19422467791128, 0.257297004260607],\n [0.970845473140327, 0.114504692143872],\n [0.467026976628563, -0.070734782329146],\n [-0.089153511514031, -0.238609685132605],\n [-0.452339254128573, -0.32376584042956],\n [-0.513231214461187, -0.314288554228112],\n [-0.352372578720063, -0.258815055101336],\n [-0.160282602521333, -0.215076844089567],\n [-0.0918782593827686, -0.194120745417214],\n [-0.168083823205437, -0.158327420072693],\n [-0.291595204965808, -0.0742727139742986],\n [-0.348638756841307, 0.037008291163602],\n [-0.304328040874631, 0.108196527328748],\n [-0.215933150969686, 0.0869231107437175],\n [-0.165632621390694, -0.0130556619786275],\n [-0.182326839507151, -0.126570926191824],\n [-0.223737786804725, -0.205535321806185],\n [-0.228939291453403, -0.269110078201836],\n [-0.185518327227038, -0.375976507132174],\n [-0.143900152461529, -0.53760115656157],\n [-0.162749541550174, -0.660065018626038],\n [-0.236263634756884, -0.588542352053736],\n [-0.275785854309211, -0.236867929421996],\n [-0.173666515108109, 0.303436335579219],\n [0.0963135720251639, 0.779772338801993],\n [0.427070069032285, 0.929108075350647],\n [0.629034743259998, 0.658330841002647],\n [0.557941248993624, 0.118500049361018],\n [0.227866624051603, -0.385048321099911],\n [-0.179878859883227, -0.582223992561493],\n [-0.428263000051965, -0.394053702908091],\n [-0.381640684645912, 0.0445437406977307],\n [-0.0942745548364887, 0.493997792757968],\n [0.238132391504895, 0.764519811304315],\n [0.431293754256291, 0.814755206427316],\n [0.455010435813661, 0.745567043101108],\n [0.452800768971269, 0.709401694610443],\n [0.615754619329312, 0.798293251119636],\n [1.00256335412457, 0.975856845059388],\n [1.44841039351691, 1.09097252730799],\n [1.64651971120370, 0.967823457118036],\n [1.35534532901802, 0.522397724737059],\n [0.580492790312048, -0.16941343361609],\n [-0.410746188031773, -0.90760401289056],\n [-1.26148406066881, -1.49592867122591],\n [-1.75784179124566, -1.87404167409849],\n [-1.94478553960064, -2.14586210891112],\n [-2.03751202708559, -2.465855239868],\n [-2.20376059354166, -2.86294187189049],\n [-2.39722338315852, -3.15004697654831],\n [-2.38032366161537, -3.01390466643222],\n [-1.91798022532025, -2.23395210271226],\n [-0.982318490353716, -0.861346053067472],\n [0.199047030343412, 0.790266582335616],\n [1.28582776574786, 2.33731327460104],\n [2.03565905376430, 3.54085486821911],\n [2.41201557412526, 4.36519456268955],\n [2.52011070482927, 4.84810517685452],\n [2.45618479815452, 4.92906708807477],\n [2.22272146945388, 4.42591058990048],\n [1.78307567169034, 3.20962906108388],\n [1.18234431860844, 1.42568060336985],\n [0.590069172333348, -0.461896808688991],\n [0.19662302949837, -1.89020992539465],\n [0.048307034171166, -2.53490571941987],\n [-0.0141956981899000, -2.50020338531674],\n [-0.230505187108187, -2.20625973569823],\n [-0.700947410386801, -2.06643697511048],\n [-1.27085123163060, -2.21536883679783],\n [-1.64082547897928, -2.49016921117735],\n [-1.62286182971254, -2.63948740221362],\n [-1.31609762181362, -2.54685250637904],\n [-1.03085567704873, -2.27157435428923],\n [-1.01100120380112, -1.90404507430561],\n [-1.19823958399826, -1.4123209792214],\n [-1.26398933608383, -0.654000086153317],\n [-0.904710628949692, 0.447960016248203],\n [-0.151340093679588, 1.73970411237156],\n [0.592926881165989, 2.85741581650685],\n [0.851660587507523, 3.4410446351716],\n [0.480324393352127, 3.36870271362297],\n [-0.165153230782417, 2.82003806696544],\n [-0.459235919375844, 2.12858991660866],\n [0.0271158842479935, 1.55840980891556],\n [1.18759188180671, 1.17980298478623],\n [2.43238266962309, 0.904011534980672],\n [3.08277213720132, 0.595286911949837],\n [2.79953663720953, 0.148014782859571],\n [1.73694442845833, -0.496297332023011],\n [0.357638079951977, -1.33108149877570],\n [-0.891418825216945, -2.22650083183366],\n [-1.77646467793627, -2.89359299718574],\n [-2.24614790863088, -2.97921619243347],\n [-2.29048879096607, -2.30003092779280],\n [-1.87929656465888, -1.05298381273274],\n [-1.04510101454788, 0.215837488618531],\n [0.00413338508394524, 0.937866257924888],\n [0.906870625251025, 0.92664365343019],\n [1.33869057593416, 0.518564571494679],\n [1.22659678454440, 0.288096869652890],\n [0.79380139656044, 0.541053084632774],\n [0.38029431865832, 1.01905199983437],\n [0.183929413600038, 1.10529586616777],\n [0.140045425897033, 0.393618564826736],\n [0.0337313182352219, -0.86431819007665],\n [-0.269208622829813, -1.85638085246792],\n [-0.687276639992166, -1.82275359004533],\n [-1.00161592325614, -0.692695765071617],\n [-1.06320089194036, 0.803577361347341],\n [-0.927152307196776, 1.67366338751788],\n [-0.786802101366614, 1.42564362251793],\n [-0.772970884572502, 0.426446388877964],\n [-0.81275662801789, -0.437721213831647],\n [-0.686831250382476, -0.504255468075149],\n [-0.237936463020255, 0.148656301898438],\n [0.459631879129522, 0.832925905720478],\n [1.12717379822508, 0.889455302576383],\n [1.48640453200855, 0.268042676202216],\n [1.46515245776211, -0.446505038539178],\n [1.22993484959115, -0.563868578181134],\n [1.0272100765927, 0.0996849952196907],\n [0.979191212438404, 1.05053652824665],\n [1.00733490030391, 1.51658415000556],\n [0.932192535457706, 1.06262774912638],\n [0.643374300839414, -0.0865180803476065],\n [0.186885168954461, -1.24799408923277],\n [-0.290842337365465, -1.80035611156538],\n [-0.669446735516495, -1.58847333561510],\n [-0.928915624595538, -0.932116966867929],\n [-1.11758635926997, -0.307879396807850],\n [-1.26832454569756, -0.00856199983957032],\n [-1.35755577149251, -0.0303537516690989],\n [-1.34244112665546, -0.196807620887435],\n [-1.22227976023299, -0.342062643495923],\n [-1.04601473486818, -0.390474392372016],\n [-0.85158508717846, -0.322164402093596],\n [-0.605033439160543, -0.126930141915954],\n [-0.218304303942818, 0.179551077808122],\n [0.352173017779006, 0.512327303000081],\n [1.01389600097229, 0.733397490572755],\n [1.55149778750607, 0.748740387440165],\n [1.75499674757591, 0.601759717901009],\n [1.56636057468633, 0.457705308377562],\n [1.12239792537274, 0.470849913286519],\n [0.655802600286141, 0.646142040378738],\n [0.335285115340180, 0.824103600255079],\n [0.173454596506888, 0.808068498175582],\n [0.0666753011315252, 0.521488214487996],\n [-0.0842367474816212, 0.0583493276173476],\n [-0.285604762631464, -0.405958418332253],\n [-0.465735422869919, -0.747800086512926],\n [-0.563586691231348, -0.94982272350799],\n [-0.598110322024572, -1.04736894794361],\n [-0.65216025756061, -1.04858365218822],\n [-0.789663117801624, -0.924145633093637],\n [-0.984704045337959, -0.670740724179446],\n [-1.12449565589348, -0.359476803003931],\n [-1.07878318723543, -0.092290938944355],\n [-0.775555435407062, 0.102132527529259],\n [-0.231610677329856, 0.314409560305622],\n [0.463192794235131, 0.663523546243286],\n [1.17416973448423, 1.13156902460931],\n [1.74112278814906, 1.48967153067024],\n [2.00320855757084, 1.42571085941843],\n [1.8529912317336, 0.802460519079555],\n [1.30747261947211, -0.169219078629572],\n [0.540237070403222, -1.01621539672694],\n [-0.177136817092375, -1.3130784867977],\n [-0.611981468823591, -0.982477824460773],\n [-0.700240028737747, -0.344919609255406],\n [-0.572396497740112, 0.125083535035390],\n [-0.450934466600975, 0.142553112732280],\n [-0.494020014254326, -0.211429053871656],\n [-0.701707589094918, -0.599602868825992],\n [-0.94721339346157, -0.710669870591623],\n [-1.09297139748946, -0.47846194092245],\n [-1.08850658866583, -0.082258450179988],\n [-0.976082880696692, 0.235758921309309],\n [-0.81885695346771, 0.365298185204303],\n [-0.63165529525553, 0.384725179378064],\n [-0.37983149226421, 0.460240196164378],\n [-0.0375551354277652, 0.68580913832794],\n [0.361996927427804, 0.984470835955107],\n [0.739920615366072, 1.13195975020298],\n [1.03583478061534, 0.88812510421667],\n [1.25614938962160, 0.172561520611839],\n [1.45295030231799, -0.804979390544485],\n [1.64887158748426, -1.55662011197859],\n [1.78022721495313, -1.52921975346218],\n [1.71945683859668, -0.462240366424548],\n [1.36728880239190, 1.31213774341268],\n [0.740173894315912, 2.88362740582926],\n [-0.0205364331835904, 3.20319080963167],\n [-0.725643970956428, 1.75222466531151],\n [-1.23900506689782, -0.998432917440275],\n [-1.52651897508678, -3.72752870885448],\n [-1.62857516631435, -5.00551707196292],\n [-1.59657420180451, -4.18499132634584],\n [-1.45489013276495, -1.81759097305637],\n [-1.21309542313047, 0.722029457352468]])\n dta = macrodata.load_pandas().data[['tbilrate', 'infl']].values[1:]\n cyc, trend = cffilter(dta)\n assert_almost_equal(cyc, cfilt_res, 8)\n # do 1d\n cyc, trend = cffilter(dta[:, 1])\n assert_almost_equal(cyc, cfilt_res[:, 1], 8)\n\n\ndef test_bking_pandas():\n # 1d\n dta = macrodata.load_pandas().data\n index = date_range(start='1959-01-01', end='2009-10-01', freq='Q')\n dta.index = index\n filtered = bkfilter(dta[\"infl\"])\n nd_filtered = bkfilter(dta['infl'].values)\n assert_equal(filtered.values, nd_filtered)\n assert_equal(filtered.index[0], datetime(1962, 3, 31))\n assert_equal(filtered.index[-1], datetime(2006, 9, 30))\n assert_equal(filtered.name, \"infl_cycle\")\n\n # 2d\n filtered = bkfilter(dta[[\"infl\", \"unemp\"]])\n nd_filtered = bkfilter(dta[['infl', 'unemp']].values)\n assert_equal(filtered.values, nd_filtered)\n assert_equal(filtered.index[0], datetime(1962, 3, 31))\n assert_equal(filtered.index[-1], datetime(2006, 9, 30))\n assert_equal(filtered.columns.values, [\"infl_cycle\", \"unemp_cycle\"])\n\n\ndef test_cfitz_pandas():\n # 1d\n dta = macrodata.load_pandas().data\n index = date_range(start='1959-01-01', end='2009-10-01', freq='Q')\n dta.index = index\n cycle, trend = cffilter(dta[\"infl\"])\n ndcycle, ndtrend = cffilter(dta['infl'].values)\n assert_allclose(cycle.values, ndcycle, rtol=1e-14)\n assert_equal(cycle.index[0], datetime(1959, 3, 31))\n assert_equal(cycle.index[-1], datetime(2009, 9, 30))\n assert_equal(cycle.name, \"infl_cycle\")\n\n # 2d\n cycle, trend = cffilter(dta[[\"infl\", \"unemp\"]])\n ndcycle, ndtrend = cffilter(dta[['infl', 'unemp']].values)\n assert_allclose(cycle.values, ndcycle, rtol=1e-14)\n assert_equal(cycle.index[0], datetime(1959, 3, 31))\n assert_equal(cycle.index[-1], datetime(2009, 9, 30))\n assert_equal(cycle.columns.values, [\"infl_cycle\", \"unemp_cycle\"])\n\n\ndef test_hpfilter_pandas():\n dta = macrodata.load_pandas().data\n index = date_range(start='1959-01-01', end='2009-10-01', freq='Q')\n dta.index = index\n cycle, trend = hpfilter(dta[\"realgdp\"])\n ndcycle, ndtrend = hpfilter(dta['realgdp'].values)\n assert_equal(cycle.values, ndcycle)\n assert_equal(cycle.index[0], datetime(1959, 3, 31))\n assert_equal(cycle.index[-1], datetime(2009, 9, 30))\n assert_equal(cycle.name, \"realgdp_cycle\")\n\n\nclass TestFilters(object):\n @classmethod\n def setup_class(cls):\n # even\n data = [-50, 175, 149, 214, 247, 237, 225, 329, 729, 809,\n 530, 489, 540, 457, 195, 176, 337, 239, 128, 102,\n 232, 429, 3, 98, 43, -141, -77, -13, 125, 361, -45, 184]\n cls.data = DataFrame(data, date_range(start='1/1/1951',\n periods=len(data),\n freq='Q'))\n data[9] = np.nan\n cls.datana = DataFrame(data, date_range(start='1/1/1951',\n periods=len(data),\n freq='Q'))\n from .results import filter_results\n cls.expected = filter_results\n\n def test_convolution(self):\n x = self.data.values.squeeze()\n res = convolution_filter(x, [.75, .25])\n expected = self.expected.conv2\n np.testing.assert_almost_equal(res, expected)\n\n res = convolution_filter(x, [.75, .25], nsides=1)\n expected = self.expected.conv1\n np.testing.assert_almost_equal(res, expected)\n\n x = self.datana.values.squeeze()\n res = convolution_filter(x, [.75, .25])\n expected = self.expected.conv2_na\n np.testing.assert_almost_equal(res, expected)\n\n res = convolution_filter(x, [.75, .25], nsides=1)\n expected = self.expected.conv1_na\n np.testing.assert_almost_equal(res, expected)\n\n def test_convolution2d(self):\n x = self.data.values\n res = convolution_filter(x, [[.75], [.25]])\n expected = self.expected.conv2\n np.testing.assert_almost_equal(res, expected[:, None])\n res = convolution_filter(np.c_[x, x], [[.75, .75], [.25, .25]])\n np.testing.assert_almost_equal(res, np.c_[expected, expected])\n\n res = convolution_filter(x, [[.75], [.25]], nsides=1)\n expected = self.expected.conv1\n np.testing.assert_almost_equal(res, expected[:, None])\n\n x = self.datana.values\n res = convolution_filter(x, [[.75], [.25]])\n expected = self.expected.conv2_na\n np.testing.assert_almost_equal(res, expected[:, None])\n\n res = convolution_filter(x, [[.75], [.25]], nsides=1)\n expected = self.expected.conv1_na\n np.testing.assert_almost_equal(res, expected[:, None])\n\n def test_recursive(self):\n x = self.data.values.squeeze()\n res = recursive_filter(x, [.75, .25])\n expected = self.expected.recurse\n np.testing.assert_almost_equal(res, expected)\n\n res = recursive_filter(x, [.75, .25], init=[150, 100])\n expected = self.expected.recurse_init\n np.testing.assert_almost_equal(res, expected)\n\n x = self.datana.values.squeeze()\n res = recursive_filter(x, [.75, .25])\n expected = self.expected.recurse_na\n np.testing.assert_almost_equal(res, expected)\n\n res = recursive_filter(x, [.75, .25], init=[150, 100])\n expected = self.expected.recurse_init_na\n np.testing.assert_almost_equal(res, expected)\n\n assert_raises(ValueError, recursive_filter, x,\n [.75, .25, .5], [150, 100])\n\n def test_pandas(self):\n start = datetime(1951, 3, 31)\n end = datetime(1958, 12, 31)\n x = self.data[0]\n res = convolution_filter(x, [.75, .25])\n assert_(res.index[0] == start)\n assert_(res.index[-1] == end)\n\n res = convolution_filter(x, [.75, .25], nsides=1)\n assert_(res.index[0] == start)\n # with no nan-padding q1 if not\n assert_(res.index[-1] == end)\n\n res = recursive_filter(x, [.75, .25])\n assert_(res.index[0] == start)\n assert_(res.index[-1] == end)\n\n x = self.datana\n res = recursive_filter(x, [.75, .25])\n assert_(res.index[0] == start)\n assert_(res.index[-1] == end)\n\n def test_pandas2d(self):\n start = datetime(1951, 3, 31)\n end = datetime(1958, 12, 31)\n x = concat((self.data[0], self.data[0]), axis=1)\n res = convolution_filter(x, [[.75, .75], [.25, .25]])\n assert_(res.index[0] == start)\n assert_(res.index[-1] == end)\n\n def test_odd_length_filter(self):\n start = datetime(1951, 3, 31)\n end = datetime(1958, 12, 31)\n x = self.data[0]\n res = convolution_filter(x, [.75, .5, .3, .2, .1])\n expected = self.expected.conv2_odd\n np.testing.assert_almost_equal(res.values.squeeze(), expected)\n np.testing.assert_(res.index[0] == start)\n np.testing.assert_(res.index[-1] == end)\n\n res = convolution_filter(x, [.75, .5, .3, .2, .1], nsides=1)\n expected = self.expected.conv1_odd\n np.testing.assert_almost_equal(res.values.squeeze(), expected)\n np.testing.assert_(res.index[0] == start)\n np.testing.assert_(res.index[-1] == end)\n # with no NAs\n\n # not a stable filter\n res = recursive_filter(x, [.75, .5, .3, .2, .1], init=[150, 100,\n 125, 135,\n 145])\n expected = self.expected.recurse_odd\n # only have 12 characters in R and this blows up and gets big\n np.testing.assert_almost_equal(res.values.squeeze(), expected, 4)\n np.testing.assert_(res.index[0] == start)\n np.testing.assert_(res.index[-1] == end)\n\n\ndef dummy_func(x):\n return x\n\n\ndef dummy_func_array(x):\n return x.values\n\n\ndef dummy_func_pandas_columns(x):\n return x.values\n\n\ndef dummy_func_pandas_series(x):\n return x['A']\n\n\ndef test_pandas_freq_decorator():\n x = make_dataframe()\n # in x, get a function back that returns an x with the same columns\n func = pandas_wrapper(dummy_func)\n\n np.testing.assert_equal(func(x.values), x)\n\n func = pandas_wrapper(dummy_func_array)\n assert_frame_equal(func(x), x)\n\n expected = x.rename(columns=dict(zip('ABCD', 'EFGH')))\n func = pandas_wrapper(dummy_func_array, names=list('EFGH'))\n assert_frame_equal(func(x), expected)\n",
"from warnings import warn\n\nimport numpy as np\nfrom mizani.palettes import rescale_pal\n\nfrom ..doctools import document\nfrom ..exceptions import PlotnineWarning\nfrom ..utils import alias\nfrom .scale import scale_discrete, scale_continuous\n\n\n@document\nclass scale_stroke_continuous(scale_continuous):\n \"\"\"\n Continuous Stroke Scale\n\n Parameters\n ----------\n range : array_like\n Range ([Minimum, Maximum]) of output stroke values.\n Should be between 0 and 1. Default is ``(1, 6)``\n {superclass_parameters}\n \"\"\"\n _aesthetics = ['stroke']\n\n def __init__(self, range=(1, 6), **kwargs):\n self.palette = rescale_pal(range)\n scale_continuous.__init__(self, **kwargs)\n\n\n@document\nclass scale_stroke_ordinal(scale_discrete):\n \"\"\"\n Discrete Stroke Scale\n\n Parameters\n ----------\n range : array_like\n Range ([Minimum, Maximum]) of output stroke values.\n Should be between 0 and 1. Default is ``(1, 6)``\n {superclass_parameters}\n \"\"\"\n _aesthetics = ['stroke']\n\n def __init__(self, range=(1, 6), **kwargs):\n def palette(n):\n return np.linspace(range[0], range[1], n)\n\n self.palette = palette\n scale_discrete.__init__(self, **kwargs)\n\n\n@document\nclass scale_stroke_discrete(scale_stroke_ordinal):\n \"\"\"\n Discrete Stroke Scale\n\n Parameters\n ----------\n {superclass_parameters}\n \"\"\"\n _aesthetics = ['stroke']\n\n def __init__(self, **kwargs):\n warn(\n \"Using stroke for a ordinal variable is not advised.\",\n PlotnineWarning\n )\n super().__init__(self, **kwargs)\n\n\nalias('scale_stroke', scale_stroke_continuous)\n",
"#!/usr/bin/env python\n\"\"\"Tests for the linalg.isolve.gcrotmk module\n\"\"\"\n\nfrom numpy.testing import (assert_, assert_allclose, assert_equal,\n suppress_warnings)\n\nimport numpy as np\nfrom numpy import zeros, array, allclose\nfrom scipy.linalg import norm\nfrom scipy.sparse import csr_matrix, eye, rand\n\nfrom scipy.sparse.linalg.interface import LinearOperator\nfrom scipy.sparse.linalg import splu\nfrom scipy.sparse.linalg.isolve import gcrotmk, gmres\n\n\nAm = csr_matrix(array([[-2,1,0,0,0,9],\n [1,-2,1,0,5,0],\n [0,1,-2,1,0,0],\n [0,0,1,-2,1,0],\n [0,3,0,1,-2,1],\n [1,0,0,0,1,-2]]))\nb = array([1,2,3,4,5,6])\ncount = [0]\n\n\ndef matvec(v):\n count[0] += 1\n return Am*v\n\n\nA = LinearOperator(matvec=matvec, shape=Am.shape, dtype=Am.dtype)\n\n\ndef do_solve(**kw):\n count[0] = 0\n with suppress_warnings() as sup:\n sup.filter(DeprecationWarning, \".*called without specifying.*\")\n x0, flag = gcrotmk(A, b, x0=zeros(A.shape[0]), tol=1e-14, **kw)\n count_0 = count[0]\n assert_(allclose(A*x0, b, rtol=1e-12, atol=1e-12), norm(A*x0-b))\n return x0, count_0\n\n\nclass TestGCROTMK(object):\n def test_preconditioner(self):\n # Check that preconditioning works\n pc = splu(Am.tocsc())\n M = LinearOperator(matvec=pc.solve, shape=A.shape, dtype=A.dtype)\n\n x0, count_0 = do_solve()\n x1, count_1 = do_solve(M=M)\n\n assert_equal(count_1, 3)\n assert_(count_1 < count_0/2)\n assert_(allclose(x1, x0, rtol=1e-14))\n\n def test_arnoldi(self):\n np.random.seed(1)\n\n A = eye(2000) + rand(2000, 2000, density=5e-4)\n b = np.random.rand(2000)\n\n # The inner arnoldi should be equivalent to gmres\n with suppress_warnings() as sup:\n sup.filter(DeprecationWarning, \".*called without specifying.*\")\n x0, flag0 = gcrotmk(A, b, x0=zeros(A.shape[0]), m=15, k=0, maxiter=1)\n x1, flag1 = gmres(A, b, x0=zeros(A.shape[0]), restart=15, maxiter=1)\n\n assert_equal(flag0, 1)\n assert_equal(flag1, 1)\n assert np.linalg.norm(A.dot(x0) - b) > 1e-3\n\n assert_allclose(x0, x1)\n\n def test_cornercase(self):\n np.random.seed(1234)\n\n # Rounding error may prevent convergence with tol=0 --- ensure\n # that the return values in this case are correct, and no\n # exceptions are raised\n\n for n in [3, 5, 10, 100]:\n A = 2*eye(n)\n\n with suppress_warnings() as sup:\n sup.filter(DeprecationWarning, \".*called without specifying.*\")\n b = np.ones(n)\n x, info = gcrotmk(A, b, maxiter=10)\n assert_equal(info, 0)\n assert_allclose(A.dot(x) - b, 0, atol=1e-14)\n\n x, info = gcrotmk(A, b, tol=0, maxiter=10)\n if info == 0:\n assert_allclose(A.dot(x) - b, 0, atol=1e-14)\n\n b = np.random.rand(n)\n x, info = gcrotmk(A, b, maxiter=10)\n assert_equal(info, 0)\n assert_allclose(A.dot(x) - b, 0, atol=1e-14)\n\n x, info = gcrotmk(A, b, tol=0, maxiter=10)\n if info == 0:\n assert_allclose(A.dot(x) - b, 0, atol=1e-14)\n\n def test_nans(self):\n A = eye(3, format='lil')\n A[1,1] = np.nan\n b = np.ones(3)\n\n with suppress_warnings() as sup:\n sup.filter(DeprecationWarning, \".*called without specifying.*\")\n x, info = gcrotmk(A, b, tol=0, maxiter=10)\n assert_equal(info, 1)\n\n def test_truncate(self):\n np.random.seed(1234)\n A = np.random.rand(30, 30) + np.eye(30)\n b = np.random.rand(30)\n\n for truncate in ['oldest', 'smallest']:\n with suppress_warnings() as sup:\n sup.filter(DeprecationWarning, \".*called without specifying.*\")\n x, info = gcrotmk(A, b, m=10, k=10, truncate=truncate, tol=1e-4,\n maxiter=200)\n assert_equal(info, 0)\n assert_allclose(A.dot(x) - b, 0, atol=1e-3)\n\n def test_CU(self):\n for discard_C in (True, False):\n # Check that C,U behave as expected\n CU = []\n x0, count_0 = do_solve(CU=CU, discard_C=discard_C)\n assert_(len(CU) > 0)\n assert_(len(CU) <= 6)\n\n if discard_C:\n for c, u in CU:\n assert_(c is None)\n\n # should converge immediately\n x1, count_1 = do_solve(CU=CU, discard_C=discard_C)\n if discard_C:\n assert_equal(count_1, 2 + len(CU))\n else:\n assert_equal(count_1, 3)\n assert_(count_1 <= count_0/2)\n assert_allclose(x1, x0, atol=1e-14)\n\n def test_denormals(self):\n # Check that no warnings are emitted if the matrix contains\n # numbers for which 1/x has no float representation, and that\n # the solver behaves properly.\n A = np.array([[1, 2], [3, 4]], dtype=float)\n A *= 100 * np.nextafter(0, 1)\n\n b = np.array([1, 1])\n\n with suppress_warnings() as sup:\n sup.filter(DeprecationWarning, \".*called without specifying.*\")\n xp, info = gcrotmk(A, b)\n\n if info == 0:\n assert_allclose(A.dot(xp), b)\n",
"\"\"\"Test functions for the sparse.linalg.interface module\n\"\"\"\n\nfrom functools import partial\nfrom itertools import product\nimport operator\nimport pytest\nfrom pytest import raises as assert_raises, warns\nfrom numpy.testing import assert_, assert_equal\n\nimport numpy as np\nimport scipy.sparse as sparse\n\nfrom scipy.sparse.linalg import interface\nfrom scipy.sparse.sputils import matrix\n\n\nclass TestLinearOperator(object):\n def setup_method(self):\n self.A = np.array([[1,2,3],\n [4,5,6]])\n self.B = np.array([[1,2],\n [3,4],\n [5,6]])\n self.C = np.array([[1,2],\n [3,4]])\n\n def test_matvec(self):\n def get_matvecs(A):\n return [{\n 'shape': A.shape,\n 'matvec': lambda x: np.dot(A, x).reshape(A.shape[0]),\n 'rmatvec': lambda x: np.dot(A.T.conj(),\n x).reshape(A.shape[1])\n },\n {\n 'shape': A.shape,\n 'matvec': lambda x: np.dot(A, x),\n 'rmatvec': lambda x: np.dot(A.T.conj(), x),\n 'rmatmat': lambda x: np.dot(A.T.conj(), x),\n 'matmat': lambda x: np.dot(A, x)\n }]\n\n for matvecs in get_matvecs(self.A):\n A = interface.LinearOperator(**matvecs)\n\n assert_(A.args == ())\n\n assert_equal(A.matvec(np.array([1,2,3])), [14,32])\n assert_equal(A.matvec(np.array([[1],[2],[3]])), [[14],[32]])\n assert_equal(A * np.array([1,2,3]), [14,32])\n assert_equal(A * np.array([[1],[2],[3]]), [[14],[32]])\n assert_equal(A.dot(np.array([1,2,3])), [14,32])\n assert_equal(A.dot(np.array([[1],[2],[3]])), [[14],[32]])\n\n assert_equal(A.matvec(matrix([[1],[2],[3]])), [[14],[32]])\n assert_equal(A * matrix([[1],[2],[3]]), [[14],[32]])\n assert_equal(A.dot(matrix([[1],[2],[3]])), [[14],[32]])\n\n assert_equal((2*A)*[1,1,1], [12,30])\n assert_equal((2 * A).rmatvec([1, 1]), [10, 14, 18])\n assert_equal((2*A).H.matvec([1,1]), [10, 14, 18])\n assert_equal((2*A)*[[1],[1],[1]], [[12],[30]])\n assert_equal((2 * A).matmat([[1], [1], [1]]), [[12], [30]])\n assert_equal((A*2)*[1,1,1], [12,30])\n assert_equal((A*2)*[[1],[1],[1]], [[12],[30]])\n assert_equal((2j*A)*[1,1,1], [12j,30j])\n assert_equal((A+A)*[1,1,1], [12, 30])\n assert_equal((A + A).rmatvec([1, 1]), [10, 14, 18])\n assert_equal((A+A).H.matvec([1,1]), [10, 14, 18])\n assert_equal((A+A)*[[1],[1],[1]], [[12], [30]])\n assert_equal((A+A).matmat([[1],[1],[1]]), [[12], [30]])\n assert_equal((-A)*[1,1,1], [-6,-15])\n assert_equal((-A)*[[1],[1],[1]], [[-6],[-15]])\n assert_equal((A-A)*[1,1,1], [0,0])\n assert_equal((A - A) * [[1], [1], [1]], [[0], [0]])\n\n X = np.array([[1, 2], [3, 4]])\n # A_asarray = np.array([[1, 2, 3], [4, 5, 6]])\n assert_equal((2 * A).rmatmat(X), np.dot((2 * self.A).T, X))\n assert_equal((A * 2).rmatmat(X), np.dot((self.A * 2).T, X))\n assert_equal((2j * A).rmatmat(X),\n np.dot((2j * self.A).T.conj(), X))\n assert_equal((A * 2j).rmatmat(X),\n np.dot((self.A * 2j).T.conj(), X))\n assert_equal((A + A).rmatmat(X),\n np.dot((self.A + self.A).T, X))\n assert_equal((A + 2j * A).rmatmat(X),\n np.dot((self.A + 2j * self.A).T.conj(), X))\n assert_equal((-A).rmatmat(X), np.dot((-self.A).T, X))\n assert_equal((A - A).rmatmat(X),\n np.dot((self.A - self.A).T, X))\n assert_equal((2j * A).rmatmat(2j * X),\n np.dot((2j * self.A).T.conj(), 2j * X))\n\n z = A+A\n assert_(len(z.args) == 2 and z.args[0] is A and z.args[1] is A)\n z = 2*A\n assert_(len(z.args) == 2 and z.args[0] is A and z.args[1] == 2)\n\n assert_(isinstance(A.matvec([1, 2, 3]), np.ndarray))\n assert_(isinstance(A.matvec(np.array([[1],[2],[3]])), np.ndarray))\n assert_(isinstance(A * np.array([1,2,3]), np.ndarray))\n assert_(isinstance(A * np.array([[1],[2],[3]]), np.ndarray))\n assert_(isinstance(A.dot(np.array([1,2,3])), np.ndarray))\n assert_(isinstance(A.dot(np.array([[1],[2],[3]])), np.ndarray))\n\n assert_(isinstance(A.matvec(matrix([[1],[2],[3]])), np.ndarray))\n assert_(isinstance(A * matrix([[1],[2],[3]]), np.ndarray))\n assert_(isinstance(A.dot(matrix([[1],[2],[3]])), np.ndarray))\n\n assert_(isinstance(2*A, interface._ScaledLinearOperator))\n assert_(isinstance(2j*A, interface._ScaledLinearOperator))\n assert_(isinstance(A+A, interface._SumLinearOperator))\n assert_(isinstance(-A, interface._ScaledLinearOperator))\n assert_(isinstance(A-A, interface._SumLinearOperator))\n\n assert_((2j*A).dtype == np.complex_)\n\n assert_raises(ValueError, A.matvec, np.array([1,2]))\n assert_raises(ValueError, A.matvec, np.array([1,2,3,4]))\n assert_raises(ValueError, A.matvec, np.array([[1],[2]]))\n assert_raises(ValueError, A.matvec, np.array([[1],[2],[3],[4]]))\n\n assert_raises(ValueError, lambda: A*A)\n assert_raises(ValueError, lambda: A**2)\n\n for matvecsA, matvecsB in product(get_matvecs(self.A),\n get_matvecs(self.B)):\n A = interface.LinearOperator(**matvecsA)\n B = interface.LinearOperator(**matvecsB)\n # AtimesB = np.array([[22, 28], [49, 64]])\n AtimesB = self.A.dot(self.B)\n X = np.array([[1, 2], [3, 4]])\n\n assert_equal((A * B).rmatmat(X), np.dot((AtimesB).T, X))\n assert_equal((2j * A * B).rmatmat(X),\n np.dot((2j * AtimesB).T.conj(), X))\n\n assert_equal((A*B)*[1,1], [50,113])\n assert_equal((A*B)*[[1],[1]], [[50],[113]])\n assert_equal((A*B).matmat([[1],[1]]), [[50],[113]])\n\n assert_equal((A * B).rmatvec([1, 1]), [71, 92])\n assert_equal((A * B).H.matvec([1, 1]), [71, 92])\n\n assert_(isinstance(A*B, interface._ProductLinearOperator))\n\n assert_raises(ValueError, lambda: A+B)\n assert_raises(ValueError, lambda: A**2)\n\n z = A*B\n assert_(len(z.args) == 2 and z.args[0] is A and z.args[1] is B)\n\n for matvecsC in get_matvecs(self.C):\n C = interface.LinearOperator(**matvecsC)\n X = np.array([[1, 2], [3, 4]])\n\n assert_equal(C.rmatmat(X), np.dot((self.C).T, X))\n assert_equal((C**2).rmatmat(X),\n np.dot((np.dot(self.C, self.C)).T, X))\n\n assert_equal((C**2)*[1,1], [17,37])\n assert_equal((C**2).rmatvec([1, 1]), [22, 32])\n assert_equal((C**2).H.matvec([1, 1]), [22, 32])\n assert_equal((C**2).matmat([[1],[1]]), [[17],[37]])\n\n assert_(isinstance(C**2, interface._PowerLinearOperator))\n\n def test_matmul(self):\n D = {'shape': self.A.shape,\n 'matvec': lambda x: np.dot(self.A, x).reshape(self.A.shape[0]),\n 'rmatvec': lambda x: np.dot(self.A.T.conj(),\n x).reshape(self.A.shape[1]),\n 'rmatmat': lambda x: np.dot(self.A.T.conj(), x),\n 'matmat': lambda x: np.dot(self.A, x)}\n A = interface.LinearOperator(**D)\n B = np.array([[1, 2, 3],\n [4, 5, 6],\n [7, 8, 9]])\n b = B[0]\n\n assert_equal(operator.matmul(A, b), A * b)\n assert_equal(operator.matmul(A, B), A * B)\n assert_raises(ValueError, operator.matmul, A, 2)\n assert_raises(ValueError, operator.matmul, 2, A)\n\n\nclass TestAsLinearOperator(object):\n def setup_method(self):\n self.cases = []\n\n def make_cases(original, dtype):\n cases = []\n\n cases.append((matrix(original, dtype=dtype), original))\n cases.append((np.array(original, dtype=dtype), original))\n cases.append((sparse.csr_matrix(original, dtype=dtype), original))\n\n # Test default implementations of _adjoint and _rmatvec, which\n # refer to each other.\n def mv(x, dtype):\n y = original.dot(x)\n if len(x.shape) == 2:\n y = y.reshape(-1, 1)\n return y\n\n def rmv(x, dtype):\n return original.T.conj().dot(x)\n\n class BaseMatlike(interface.LinearOperator):\n args = ()\n\n def __init__(self, dtype):\n self.dtype = np.dtype(dtype)\n self.shape = original.shape\n\n def _matvec(self, x):\n return mv(x, self.dtype)\n\n class HasRmatvec(BaseMatlike):\n args = ()\n\n def _rmatvec(self,x):\n return rmv(x, self.dtype)\n\n class HasAdjoint(BaseMatlike):\n args = ()\n\n def _adjoint(self):\n shape = self.shape[1], self.shape[0]\n matvec = partial(rmv, dtype=self.dtype)\n rmatvec = partial(mv, dtype=self.dtype)\n return interface.LinearOperator(matvec=matvec,\n rmatvec=rmatvec,\n dtype=self.dtype,\n shape=shape)\n\n class HasRmatmat(HasRmatvec):\n def _matmat(self, x):\n return original.dot(x)\n\n def _rmatmat(self, x):\n return original.T.conj().dot(x)\n\n cases.append((HasRmatvec(dtype), original))\n cases.append((HasAdjoint(dtype), original))\n cases.append((HasRmatmat(dtype), original))\n return cases\n\n original = np.array([[1,2,3], [4,5,6]])\n self.cases += make_cases(original, np.int32)\n self.cases += make_cases(original, np.float32)\n self.cases += make_cases(original, np.float64)\n self.cases += [(interface.aslinearoperator(M).T, A.T)\n for M, A in make_cases(original.T, np.float64)]\n self.cases += [(interface.aslinearoperator(M).H, A.T.conj())\n for M, A in make_cases(original.T, np.float64)]\n\n original = np.array([[1, 2j, 3j], [4j, 5j, 6]])\n self.cases += make_cases(original, np.complex_)\n self.cases += [(interface.aslinearoperator(M).T, A.T)\n for M, A in make_cases(original.T, np.complex_)]\n self.cases += [(interface.aslinearoperator(M).H, A.T.conj())\n for M, A in make_cases(original.T, np.complex_)]\n\n def test_basic(self):\n\n for M, A_array in self.cases:\n A = interface.aslinearoperator(M)\n M,N = A.shape\n\n xs = [np.array([1, 2, 3]),\n np.array([[1], [2], [3]])]\n ys = [np.array([1, 2]), np.array([[1], [2]])]\n\n if A.dtype == np.complex_:\n xs += [np.array([1, 2j, 3j]),\n np.array([[1], [2j], [3j]])]\n ys += [np.array([1, 2j]), np.array([[1], [2j]])]\n\n x2 = np.array([[1, 4], [2, 5], [3, 6]])\n\n for x in xs:\n assert_equal(A.matvec(x), A_array.dot(x))\n assert_equal(A * x, A_array.dot(x))\n\n assert_equal(A.matmat(x2), A_array.dot(x2))\n assert_equal(A * x2, A_array.dot(x2))\n\n for y in ys:\n assert_equal(A.rmatvec(y), A_array.T.conj().dot(y))\n assert_equal(A.T.matvec(y), A_array.T.dot(y))\n assert_equal(A.H.matvec(y), A_array.T.conj().dot(y))\n\n for y in ys:\n if y.ndim < 2:\n continue\n assert_equal(A.rmatmat(y), A_array.T.conj().dot(y))\n assert_equal(A.T.matmat(y), A_array.T.dot(y))\n assert_equal(A.H.matmat(y), A_array.T.conj().dot(y))\n\n if hasattr(M,'dtype'):\n assert_equal(A.dtype, M.dtype)\n\n assert_(hasattr(A, 'args'))\n\n def test_dot(self):\n\n for M, A_array in self.cases:\n A = interface.aslinearoperator(M)\n M,N = A.shape\n\n x0 = np.array([1, 2, 3])\n x1 = np.array([[1], [2], [3]])\n x2 = np.array([[1, 4], [2, 5], [3, 6]])\n\n assert_equal(A.dot(x0), A_array.dot(x0))\n assert_equal(A.dot(x1), A_array.dot(x1))\n assert_equal(A.dot(x2), A_array.dot(x2))\n\n\ndef test_repr():\n A = interface.LinearOperator(shape=(1, 1), matvec=lambda x: 1)\n repr_A = repr(A)\n assert_('unspecified dtype' not in repr_A, repr_A)\n\n\ndef test_identity():\n ident = interface.IdentityOperator((3, 3))\n assert_equal(ident * [1, 2, 3], [1, 2, 3])\n assert_equal(ident.dot(np.arange(9).reshape(3, 3)).ravel(), np.arange(9))\n\n assert_raises(ValueError, ident.matvec, [1, 2, 3, 4])\n\n\ndef test_attributes():\n A = interface.aslinearoperator(np.arange(16).reshape(4, 4))\n\n def always_four_ones(x):\n x = np.asarray(x)\n assert_(x.shape == (3,) or x.shape == (3, 1))\n return np.ones(4)\n\n B = interface.LinearOperator(shape=(4, 3), matvec=always_four_ones)\n\n for op in [A, B, A * B, A.H, A + A, B + B, A**4]:\n assert_(hasattr(op, \"dtype\"))\n assert_(hasattr(op, \"shape\"))\n assert_(hasattr(op, \"_matvec\"))\n\ndef matvec(x):\n \"\"\" Needed for test_pickle as local functions are not pickleable \"\"\"\n return np.zeros(3)\n\ndef test_pickle():\n import pickle\n\n for protocol in range(pickle.HIGHEST_PROTOCOL + 1):\n A = interface.LinearOperator((3, 3), matvec)\n s = pickle.dumps(A, protocol=protocol)\n B = pickle.loads(s)\n\n for k in A.__dict__:\n assert_equal(getattr(A, k), getattr(B, k))\n\ndef test_inheritance():\n class Empty(interface.LinearOperator):\n pass\n\n with warns(RuntimeWarning, match=\"should implement at least\"):\n assert_raises(TypeError, Empty)\n\n class Identity(interface.LinearOperator):\n def __init__(self, n):\n super(Identity, self).__init__(dtype=None, shape=(n, n))\n\n def _matvec(self, x):\n return x\n\n id3 = Identity(3)\n assert_equal(id3.matvec([1, 2, 3]), [1, 2, 3])\n assert_raises(NotImplementedError, id3.rmatvec, [4, 5, 6])\n\n class MatmatOnly(interface.LinearOperator):\n def __init__(self, A):\n super(MatmatOnly, self).__init__(A.dtype, A.shape)\n self.A = A\n\n def _matmat(self, x):\n return self.A.dot(x)\n\n mm = MatmatOnly(np.random.randn(5, 3))\n assert_equal(mm.matvec(np.random.randn(3)).shape, (5,))\n\ndef test_dtypes_of_operator_sum():\n # gh-6078\n\n mat_complex = np.random.rand(2,2) + 1j * np.random.rand(2,2)\n mat_real = np.random.rand(2,2)\n\n complex_operator = interface.aslinearoperator(mat_complex)\n real_operator = interface.aslinearoperator(mat_real)\n\n sum_complex = complex_operator + complex_operator\n sum_real = real_operator + real_operator\n\n assert_equal(sum_real.dtype, np.float64)\n assert_equal(sum_complex.dtype, np.complex128)\n\ndef test_no_double_init():\n call_count = [0]\n\n def matvec(v):\n call_count[0] += 1\n return v\n\n # It should call matvec exactly once (in order to determine the\n # operator dtype)\n interface.LinearOperator((2, 2), matvec=matvec)\n assert_equal(call_count[0], 1)\n\ndef test_adjoint_conjugate():\n X = np.array([[1j]])\n A = interface.aslinearoperator(X)\n\n B = 1j * A\n Y = 1j * X\n\n v = np.array([1])\n\n assert_equal(B.dot(v), Y.dot(v))\n assert_equal(B.H.dot(v), Y.T.conj().dot(v))\n\ndef test_ndim():\n X = np.array([[1]])\n A = interface.aslinearoperator(X)\n assert_equal(A.ndim, 2)\n\ndef test_transpose_noconjugate():\n X = np.array([[1j]])\n A = interface.aslinearoperator(X)\n\n B = 1j * A\n Y = 1j * X\n\n v = np.array([1])\n\n assert_equal(B.dot(v), Y.dot(v))\n assert_equal(B.T.dot(v), Y.T.dot(v))\n",
"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Aug 3 21:08:49 2017\n\nAuthor: Josef Perktold\n\"\"\"\n\nimport numpy as np\nfrom numpy.testing import assert_allclose\nfrom statsmodels.discrete.discrete_model import (Poisson, NegativeBinomial,\n NegativeBinomialP)\nfrom statsmodels.tools.tools import add_constant\n\nimport statsmodels.discrete.tests.results.results_count_margins as res_stata\n\n# load data into module namespace\nfrom statsmodels.datasets.cpunish import load\ncpunish_data = load(as_pandas=False)\ncpunish_data.exog[:,3] = np.log(cpunish_data.exog[:,3])\nexog = add_constant(cpunish_data.exog, prepend=False)\nendog = cpunish_data.endog - 1 # avoid zero-truncation\nexog /= np.round(exog.max(0), 3)\n\nclass CheckMarginMixin(object):\n rtol_fac = 1\n\n def test_margins_table(self):\n res1 = self.res1\n sl = self.res1_slice\n rf = self.rtol_fac\n assert_allclose(self.margeff.margeff, self.res1.params[sl], rtol=1e-5 * rf)\n assert_allclose(self.margeff.margeff_se, self.res1.bse[sl], rtol=1e-6 * rf)\n assert_allclose(self.margeff.pvalues, self.res1.pvalues[sl], rtol=5e-6 * rf)\n assert_allclose(self.margeff.conf_int(), res1.margins_table[sl, 4:6],\n rtol=1e-6 * rf)\n\n\nclass TestPoissonMargin(CheckMarginMixin):\n\n @classmethod\n def setup_class(cls):\n # here we do not need to check convergence from default start_params\n start_params = [14.1709, 0.7085, -3.4548, -0.539, 3.2368, -7.9299,\n -5.0529]\n mod_poi = Poisson(endog, exog)\n res_poi = mod_poi.fit(start_params=start_params)\n #res_poi = mod_poi.fit(maxiter=100)\n marge_poi = res_poi.get_margeff()\n cls.res = res_poi\n cls.margeff = marge_poi\n\n cls.rtol_fac = 1\n cls.res1_slice = slice(None, None, None)\n cls.res1 = res_stata.results_poisson_margins_cont\n\n\nclass TestPoissonMarginDummy(CheckMarginMixin):\n\n @classmethod\n def setup_class(cls):\n # here we do not need to check convergence from default start_params\n start_params = [14.1709, 0.7085, -3.4548, -0.539, 3.2368, -7.9299,\n -5.0529]\n mod_poi = Poisson(endog, exog)\n res_poi = mod_poi.fit(start_params=start_params)\n marge_poi = res_poi.get_margeff(dummy=True)\n cls.res = res_poi\n cls.margeff = marge_poi\n\n cls.res1_slice = [0, 1, 2, 3, 5, 6]\n cls.res1 = res_stata.results_poisson_margins_dummy\n\n\nclass TestNegBinMargin(CheckMarginMixin):\n\n @classmethod\n def setup_class(cls):\n # here we do not need to check convergence from default start_params\n start_params = [13.1996, 0.8582, -2.8005, -1.5031, 2.3849, -8.5552,\n -2.88, 1.14]\n mod = NegativeBinomial(endog, exog)\n res = mod.fit(start_params=start_params, method='nm', maxiter=2000)\n marge = res.get_margeff()\n cls.res = res\n cls.margeff = marge\n\n cls.res1_slice = slice(None, None, None)\n cls.res1 = res_stata.results_negbin_margins_cont\n cls.rtol_fac = 5e1\n # negbin has lower agreement with Stata in this case\n\n\nclass TestNegBinMarginDummy(CheckMarginMixin):\n\n @classmethod\n def setup_class(cls):\n # here we do not need to check convergence from default start_params\n start_params = [13.1996, 0.8582, -2.8005, -1.5031, 2.3849, -8.5552,\n -2.88, 1.14]\n mod = NegativeBinomial(endog, exog)\n res = mod.fit(start_params=start_params, method='nm', maxiter=2000)\n marge = res.get_margeff(dummy=True)\n cls.res = res\n cls.margeff = marge\n\n cls.res1_slice = cls.res1_slice = [0, 1, 2, 3, 5, 6]\n cls.res1 = res_stata.results_negbin_margins_dummy\n cls.rtol_fac = 5e1\n\n\nclass TestNegBinPMargin(CheckMarginMixin):\n # this is the same as the nb2 version above for NB-P, p=2\n\n @classmethod\n def setup_class(cls):\n # here we do not need to check convergence from default start_params\n start_params = [13.1996, 0.8582, -2.8005, -1.5031, 2.3849, -8.5552,\n -2.88, 1.14]\n mod = NegativeBinomialP(endog, exog) # checks also that default p=2\n res = mod.fit(start_params=start_params, method='nm', maxiter=2000)\n marge = res.get_margeff()\n cls.res = res\n cls.margeff = marge\n\n cls.res1_slice = slice(None, None, None)\n cls.res1 = res_stata.results_negbin_margins_cont\n cls.rtol_fac = 5e1\n # negbin has lower agreement with Stata in this case\n",
"from typing import List\n\nfrom pandas._typing import FilePathOrBuffer, Scalar, StorageOptions\nfrom pandas.compat._optional import import_optional_dependency\n\nfrom pandas.io.excel._base import BaseExcelReader\n\n\nclass PyxlsbReader(BaseExcelReader):\n def __init__(\n self,\n filepath_or_buffer: FilePathOrBuffer,\n storage_options: StorageOptions = None,\n ):\n \"\"\"\n Reader using pyxlsb engine.\n\n Parameters\n ----------\n filepath_or_buffer : str, path object, or Workbook\n Object to be parsed.\n storage_options : dict, optional\n passed to fsspec for appropriate URLs (see ``_get_filepath_or_buffer``)\n \"\"\"\n import_optional_dependency(\"pyxlsb\")\n # This will call load_workbook on the filepath or buffer\n # And set the result to the book-attribute\n super().__init__(filepath_or_buffer, storage_options=storage_options)\n\n @property\n def _workbook_class(self):\n from pyxlsb import Workbook\n\n return Workbook\n\n def load_workbook(self, filepath_or_buffer: FilePathOrBuffer):\n from pyxlsb import open_workbook\n\n # TODO: hack in buffer capability\n # This might need some modifications to the Pyxlsb library\n # Actual work for opening it is in xlsbpackage.py, line 20-ish\n\n return open_workbook(filepath_or_buffer)\n\n @property\n def sheet_names(self) -> List[str]:\n return self.book.sheets\n\n def get_sheet_by_name(self, name: str):\n self.raise_if_bad_sheet_by_name(name)\n return self.book.get_sheet(name)\n\n def get_sheet_by_index(self, index: int):\n self.raise_if_bad_sheet_by_index(index)\n # pyxlsb sheets are indexed from 1 onwards\n # There's a fix for this in the source, but the pypi package doesn't have it\n return self.book.get_sheet(index + 1)\n\n def _convert_cell(self, cell, convert_float: bool) -> Scalar:\n # TODO: there is no way to distinguish between floats and datetimes in pyxlsb\n # This means that there is no way to read datetime types from an xlsb file yet\n if cell.v is None:\n return \"\" # Prevents non-named columns from not showing up as Unnamed: i\n if isinstance(cell.v, float) and convert_float:\n val = int(cell.v)\n if val == cell.v:\n return val\n else:\n return float(cell.v)\n\n return cell.v\n\n def get_sheet_data(self, sheet, convert_float: bool) -> List[List[Scalar]]:\n return [\n [self._convert_cell(c, convert_float) for c in r]\n for r in sheet.rows(sparse=False)\n ]\n",
"import numpy as np\n\nimport matplotlib as mpl\nfrom matplotlib.testing.decorators import image_comparison\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.axes_grid1 import ImageGrid\n\n\n# The original version of this test relied on mpl_toolkits's slightly different\n# colorbar implementation; moving to matplotlib's own colorbar implementation\n# caused the small image comparison error.\n@image_comparison(['imagegrid_cbar_mode.png'],\n remove_text=True, style='mpl20', tol=0.3)\ndef test_imagegrid_cbar_mode_edge():\n # Remove this line when this test image is regenerated.\n plt.rcParams['pcolormesh.snap'] = False\n\n X, Y = np.meshgrid(np.linspace(0, 6, 30), np.linspace(0, 6, 30))\n arr = np.sin(X) * np.cos(Y) + 1j*(np.sin(3*Y) * np.cos(Y/2.))\n\n fig = plt.figure(figsize=(18, 9))\n\n positions = (241, 242, 243, 244, 245, 246, 247, 248)\n directions = ['row']*4 + ['column']*4\n cbar_locations = ['left', 'right', 'top', 'bottom']*2\n\n for position, direction, location in zip(\n positions, directions, cbar_locations):\n grid = ImageGrid(fig, position,\n nrows_ncols=(2, 2),\n direction=direction,\n cbar_location=location,\n cbar_size='20%',\n cbar_mode='edge')\n ax1, ax2, ax3, ax4, = grid\n\n ax1.imshow(arr.real, cmap='nipy_spectral')\n ax2.imshow(arr.imag, cmap='hot')\n ax3.imshow(np.abs(arr), cmap='jet')\n ax4.imshow(np.arctan2(arr.imag, arr.real), cmap='hsv')\n\n # In each row/column, the \"first\" colorbars must be overwritten by the\n # \"second\" ones. To achieve this, clear out the axes first.\n for ax in grid:\n ax.cax.cla()\n cb = ax.cax.colorbar(\n ax.images[0],\n ticks=mpl.ticker.MaxNLocator(5)) # old default locator.\n\n\ndef test_imagegrid():\n fig = plt.figure()\n grid = ImageGrid(fig, 111, nrows_ncols=(1, 1))\n ax = grid[0]\n im = ax.imshow([[1, 2]], norm=mpl.colors.LogNorm())\n cb = ax.cax.colorbar(im)\n assert isinstance(cb.locator, mpl.colorbar._ColorbarLogLocator)\n"
] | [
[
"pandas._testing.assert_numpy_array_equal",
"pandas.tests.plotting.common.TestPlotBase.setup_method",
"matplotlib.pyplot.gcf",
"pandas._testing.assert_produces_warning",
"numpy.random.choice",
"pandas._testing.close",
"pandas._testing.assert_almost_equal",
"numpy.random.rand",
"pandas.tests.plotting.common._check_plot_works",
"pandas._testing.RNGContext",
"pandas.Index",
"matplotlib.rcdefaults",
"pandas.plotting._matplotlib.hist._grouped_hist",
"pandas._testing.makeTimeSeries",
"pandas.DataFrame",
"numpy.random.randn",
"matplotlib.pyplot.subplot",
"numpy.random.normal",
"numpy.random.randint"
],
[
"numpy.testing.assert_almost_equal",
"numpy.testing.assert_raises",
"pandas.date_range",
"numpy.testing.assert_equal",
"pandas.concat",
"numpy.testing.assert_allclose",
"numpy.array",
"numpy.testing.assert_"
],
[
"numpy.linspace"
],
[
"numpy.allclose",
"numpy.ones",
"numpy.eye",
"scipy.linalg.norm",
"numpy.zeros",
"numpy.testing.assert_equal",
"scipy.sparse.rand",
"numpy.random.seed",
"scipy.sparse.eye",
"numpy.nextafter",
"scipy.sparse.linalg.isolve.gcrotmk",
"numpy.random.rand",
"numpy.testing.assert_allclose",
"numpy.array",
"numpy.testing.suppress_warnings",
"scipy.sparse.linalg.interface.LinearOperator",
"numpy.testing.assert_"
],
[
"numpy.ones",
"numpy.zeros",
"numpy.testing.assert_equal",
"scipy.sparse.linalg.interface.IdentityOperator",
"numpy.dtype",
"numpy.random.randn",
"numpy.asarray",
"scipy.sparse.csr_matrix",
"numpy.arange",
"scipy.sparse.linalg.interface.aslinearoperator",
"numpy.random.rand",
"scipy.sparse.sputils.matrix",
"numpy.array",
"numpy.dot",
"scipy.sparse.linalg.interface.LinearOperator",
"numpy.testing.assert_"
],
[
"numpy.log",
"numpy.testing.assert_allclose"
],
[
"pandas.compat._optional.import_optional_dependency"
],
[
"matplotlib.ticker.MaxNLocator",
"numpy.arctan2",
"matplotlib.pyplot.figure",
"numpy.abs",
"numpy.cos",
"matplotlib.testing.decorators.image_comparison",
"matplotlib.colors.LogNorm",
"numpy.sin",
"numpy.linspace"
]
] |
RussellM2020/maml_gps | [
"631560dfd4e23dc2da9bfbbd2e3c5252aa9775c5"
] | [
"rllab/optimizers/conjugate_gradient_optimizer.py"
] | [
"import numpy as np\nimport theano\nimport theano.tensor as TT\n\nfrom rllab.core.serializable import Serializable\nfrom rllab.misc import ext\nfrom rllab.misc import krylov\nfrom rllab.misc import logger\nfrom rllab.misc.ext import sliced_fun\n\n\nclass PerlmutterHvp(Serializable):\n\n def __init__(self, num_slices=1):\n Serializable.quick_init(self, locals())\n self.target = None\n self.reg_coeff = None\n self.opt_fun = None\n self._num_slices = num_slices\n\n def update_opt(self, f, target, inputs, reg_coeff):\n self.target = target\n self.reg_coeff = reg_coeff\n params = target.get_params(trainable=True)\n\n constraint_grads = theano.grad(\n f, wrt=params, disconnected_inputs='warn')\n xs = tuple([ext.new_tensor_like(\"%s x\" % p.name, p) for p in params])\n\n def Hx_plain():\n Hx_plain_splits = TT.grad(\n TT.sum([TT.sum(g * x)\n for g, x in zip(constraint_grads, xs)]),\n wrt=params,\n disconnected_inputs='warn'\n )\n return TT.concatenate([TT.flatten(s) for s in Hx_plain_splits])\n\n self.opt_fun = ext.lazydict(\n f_Hx_plain=lambda: ext.compile_function(\n inputs=inputs + xs,\n outputs=Hx_plain(),\n log_name=\"f_Hx_plain\",\n ),\n )\n\n def build_eval(self, inputs):\n def eval(x):\n xs = tuple(self.target.flat_to_params(x, trainable=True))\n ret = sliced_fun(self.opt_fun[\"f_Hx_plain\"], self._num_slices)(\n inputs, xs) + self.reg_coeff * x\n return ret\n\n return eval\n\n\nclass FiniteDifferenceHvp(Serializable):\n\n def __init__(self, base_eps=1e-8, symmetric=True, grad_clip=None, num_slices=1):\n Serializable.quick_init(self, locals())\n self.base_eps = base_eps\n self.symmetric = symmetric\n self.grad_clip = grad_clip\n self._num_slices = num_slices\n\n def update_opt(self, f, target, inputs, reg_coeff):\n self.target = target\n self.reg_coeff = reg_coeff\n\n params = target.get_params(trainable=True)\n\n constraint_grads = theano.grad(\n f, wrt=params, disconnected_inputs='warn')\n flat_grad = ext.flatten_tensor_variables(constraint_grads)\n\n def f_Hx_plain(*args):\n inputs_ = args[:len(inputs)]\n xs = args[len(inputs):]\n flat_xs = np.concatenate([np.reshape(x, (-1,)) for x in xs])\n param_val = self.target.get_param_values(trainable=True)\n eps = np.cast['float32'](\n self.base_eps / (np.linalg.norm(param_val) + 1e-8))\n self.target.set_param_values(\n param_val + eps * flat_xs, trainable=True)\n flat_grad_dvplus = self.opt_fun[\"f_grad\"](*inputs_)\n if self.symmetric:\n self.target.set_param_values(\n param_val - eps * flat_xs, trainable=True)\n flat_grad_dvminus = self.opt_fun[\"f_grad\"](*inputs_)\n hx = (flat_grad_dvplus - flat_grad_dvminus) / (2 * eps)\n self.target.set_param_values(param_val, trainable=True)\n else:\n self.target.set_param_values(param_val, trainable=True)\n flat_grad = self.opt_fun[\"f_grad\"](*inputs_)\n hx = (flat_grad_dvplus - flat_grad) / eps\n return hx\n\n self.opt_fun = ext.lazydict(\n f_grad=lambda: ext.compile_function(\n inputs=inputs,\n outputs=flat_grad,\n log_name=\"f_grad\",\n ),\n f_Hx_plain=lambda: f_Hx_plain,\n )\n\n def build_eval(self, inputs):\n def eval(x):\n xs = tuple(self.target.flat_to_params(x, trainable=True))\n ret = sliced_fun(self.opt_fun[\"f_Hx_plain\"], self._num_slices)(\n inputs, xs) + self.reg_coeff * x\n return ret\n\n return eval\n\n\nclass ConjugateGradientOptimizer(Serializable):\n \"\"\"\n Performs constrained optimization via line search. The search direction is computed using a conjugate gradient\n algorithm, which gives x = A^{-1}g, where A is a second order approximation of the constraint and g is the gradient\n of the loss function.\n \"\"\"\n\n def __init__(\n self,\n cg_iters=10,\n reg_coeff=1e-5,\n subsample_factor=1.,\n backtrack_ratio=0.8,\n max_backtracks=15,\n accept_violation=False,\n hvp_approach=None,\n num_slices=1):\n \"\"\"\n\n :param cg_iters: The number of CG iterations used to calculate A^-1 g\n :param reg_coeff: A small value so that A -> A + reg*I\n :param subsample_factor: Subsampling factor to reduce samples when using \"conjugate gradient. Since the\n computation time for the descent direction dominates, this can greatly reduce the overall computation time.\n :param accept_violation: whether to accept the descent step if it violates the line search condition after\n exhausting all backtracking budgets\n :return:\n \"\"\"\n Serializable.quick_init(self, locals())\n self._cg_iters = cg_iters\n self._reg_coeff = reg_coeff\n self._subsample_factor = subsample_factor\n self._backtrack_ratio = backtrack_ratio\n self._max_backtracks = max_backtracks\n self._num_slices = num_slices\n\n self._opt_fun = None\n self._target = None\n self._max_constraint_val = None\n self._constraint_name = None\n self._accept_violation = accept_violation\n if hvp_approach is None:\n hvp_approach = PerlmutterHvp(num_slices)\n self._hvp_approach = hvp_approach\n\n def update_opt(self, loss, target, leq_constraint, inputs, extra_inputs=None, constraint_name=\"constraint\", *args,\n **kwargs):\n \"\"\"\n :param loss: Symbolic expression for the loss function.\n :param target: A parameterized object to optimize over. It should implement methods of the\n :class:`rllab.core.paramerized.Parameterized` class.\n :param leq_constraint: A constraint provided as a tuple (f, epsilon), of the form f(*inputs) <= epsilon.\n :param inputs: A list of symbolic variables as inputs, which could be subsampled if needed. It is assumed\n that the first dimension of these inputs should correspond to the number of data points\n :param extra_inputs: A list of symbolic variables as extra inputs which should not be subsampled\n :return: No return value.\n \"\"\"\n\n inputs = tuple(inputs)\n if extra_inputs is None:\n extra_inputs = tuple()\n else:\n extra_inputs = tuple(extra_inputs)\n\n constraint_term, constraint_value = leq_constraint\n\n params = target.get_params(trainable=True)\n grads = theano.grad(loss, wrt=params, disconnected_inputs='warn')\n flat_grad = ext.flatten_tensor_variables(grads)\n\n self._hvp_approach.update_opt(f=constraint_term, target=target, inputs=inputs + extra_inputs,\n reg_coeff=self._reg_coeff)\n\n self._target = target\n self._max_constraint_val = constraint_value\n self._constraint_name = constraint_name\n\n self._opt_fun = ext.lazydict(\n f_loss=lambda: ext.compile_function(\n inputs=inputs + extra_inputs,\n outputs=loss,\n log_name=\"f_loss\",\n ),\n f_grad=lambda: ext.compile_function(\n inputs=inputs + extra_inputs,\n outputs=flat_grad,\n log_name=\"f_grad\",\n ),\n f_constraint=lambda: ext.compile_function(\n inputs=inputs + extra_inputs,\n outputs=constraint_term,\n log_name=\"constraint\",\n ),\n f_loss_constraint=lambda: ext.compile_function(\n inputs=inputs + extra_inputs,\n outputs=[loss, constraint_term],\n log_name=\"f_loss_constraint\",\n ),\n )\n\n def loss(self, inputs, extra_inputs=None):\n inputs = tuple(inputs)\n if extra_inputs is None:\n extra_inputs = tuple()\n return sliced_fun(self._opt_fun[\"f_loss\"], self._num_slices)(inputs, extra_inputs)\n\n def constraint_val(self, inputs, extra_inputs=None):\n inputs = tuple(inputs)\n if extra_inputs is None:\n extra_inputs = tuple()\n return sliced_fun(self._opt_fun[\"f_constraint\"], self._num_slices)(inputs, extra_inputs)\n\n def optimize(self, inputs, extra_inputs=None, subsample_grouped_inputs=None):\n\n inputs = tuple(inputs)\n if extra_inputs is None:\n extra_inputs = tuple()\n\n if self._subsample_factor < 1:\n if subsample_grouped_inputs is None:\n subsample_grouped_inputs = [inputs]\n subsample_inputs = tuple()\n for inputs_grouped in subsample_grouped_inputs:\n n_samples = len(inputs_grouped[0])\n inds = np.random.choice(\n n_samples, int(n_samples * self._subsample_factor), replace=False)\n subsample_inputs += tuple([x[inds] for x in inputs_grouped])\n else:\n subsample_inputs = inputs\n\n logger.log(\"computing loss before\")\n loss_before = sliced_fun(self._opt_fun[\"f_loss\"], self._num_slices)(\n inputs, extra_inputs)\n logger.log(\"performing update\")\n logger.log(\"computing descent direction\")\n\n flat_g = sliced_fun(self._opt_fun[\"f_grad\"], self._num_slices)(\n inputs, extra_inputs)\n\n Hx = self._hvp_approach.build_eval(subsample_inputs + extra_inputs)\n\n descent_direction = krylov.cg(Hx, flat_g, cg_iters=self._cg_iters)\n\n initial_step_size = np.sqrt(\n 2.0 * self._max_constraint_val *\n (1. / (descent_direction.dot(Hx(descent_direction)) + 1e-8))\n )\n if np.isnan(initial_step_size):\n initial_step_size = 1.\n flat_descent_step = initial_step_size * descent_direction\n\n logger.log(\"descent direction computed\")\n\n prev_param = np.copy(self._target.get_param_values(trainable=True))\n n_iter = 0\n for n_iter, ratio in enumerate(self._backtrack_ratio ** np.arange(self._max_backtracks)):\n cur_step = ratio * flat_descent_step\n cur_param = prev_param - cur_step\n self._target.set_param_values(cur_param, trainable=True)\n loss, constraint_val = sliced_fun(\n self._opt_fun[\"f_loss_constraint\"], self._num_slices)(inputs, extra_inputs)\n if loss < loss_before and constraint_val <= self._max_constraint_val:\n break\n if (np.isnan(loss) or np.isnan(constraint_val) or loss >= loss_before or constraint_val >=\n self._max_constraint_val) and not self._accept_violation:\n logger.log(\"Line search condition violated. Rejecting the step!\")\n if np.isnan(loss):\n logger.log(\"Violated because loss is NaN\")\n if np.isnan(constraint_val):\n logger.log(\"Violated because constraint %s is NaN\" %\n self._constraint_name)\n if loss >= loss_before:\n logger.log(\"Violated because loss not improving\")\n if constraint_val >= self._max_constraint_val:\n logger.log(\n \"Violated because constraint %s is violated\" % self._constraint_name)\n self._target.set_param_values(prev_param, trainable=True)\n logger.log(\"backtrack iters: %d\" % n_iter)\n logger.log(\"computing loss after\")\n logger.log(\"optimization finished\")\n"
] | [
[
"numpy.arange",
"numpy.reshape",
"numpy.linalg.norm",
"numpy.isnan"
]
] |
ngwenbin/ExpenseTracker | [
"f50793c9a4c6efc4f58cc7d759b45f2e16b7832e"
] | [
"app.py"
] | [
"from flask import Flask, render_template, redirect, url_for, flash, request, abort\nfrom functions import UserLogin, UserRegistration, NewExpense\nfrom flask_sqlalchemy import SQLAlchemy\nfrom sqlalchemy import func\nfrom datetime import datetime, timedelta, date\nfrom flask_bcrypt import Bcrypt\nfrom flask_login import LoginManager, UserMixin, login_user, current_user, logout_user, login_required\nfrom matplotlib import pyplot as plt\nfrom matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas\nfrom matplotlib.figure import Figure\nfrom itertools import zip_longest\nimport os\nimport io\nimport base64\nimport numpy as np\n\napp = Flask(__name__)\nSECRET_KEY = os.urandom(16)\napp.config['SECRET_KEY'] = SECRET_KEY\napp.config['SQLALCHEMY_DATABASE_URI'] = ' '\ndb = SQLAlchemy(app)\nbcrypt = Bcrypt(app)\nlogin_manager = LoginManager(app)\nlogin_manager.login_view = 'login'\nlogin_manager.login_message_category = 'info'\n\n@login_manager.user_loader\ndef load_user(user_id):\n return User.query.get(int(user_id))\n\nclass User(db.Model, UserMixin):\n __tablename__ = 'user'\n id = db.Column(db.Integer, primary_key=True)\n email = db.Column(db.String(30), unique=True, nullable=False)\n username = db.Column(db.String(10), unique=True, nullable=False)\n password = db.Column(db.String(128), nullable=False)\n expense_id = db.relationship('UserExpense', backref='expensedate', lazy='dynamic')\n\n def __repr__(self):\n return f\"User('{self.username}', '{self.email}')\"\n\nclass UserExpense(db.Model):\n __tablename__ = 'user_expenses'\n id = db.Column(db.Integer, primary_key=True)\n userid = db.Column(db.Integer, db.ForeignKey('user.id'), nullable=False)\n category = db.Column(db.String(30))\n description = db.Column(db.String(50))\n expense = db.Column(db.Numeric(scale=2, asdecimal=True))\n expense_date = db.Column(db.Date, default=date.today())\n\n def __repr__(self):\n return f\"UserExpense('{self.category}', '{self.description}', '{self.expense}', '{self.expense_date}')\"\n\n@app.route('/', methods=['GET', 'POST'])\ndef login():\n form = UserLogin()\n if current_user.is_authenticated:\n return redirect(url_for('overview'))\n if form.validate_on_submit():\n user = User.query.filter_by(username=form.username.data).first()\n if user and bcrypt.check_password_hash(user.password, form.password.data):\n login_user(user, remember=form.remember.data)\n return redirect(url_for('overview'))\n else:\n flash('Invalid login', 'danger')\n return render_template('login.html', form=form)\n\n@app.route('/register', methods=['GET', 'POST'])\ndef register():\n if current_user.is_authenticated:\n return redirect(url_for('overview'))\n form = UserRegistration()\n if form.validate_on_submit():\n password_hashed = bcrypt.generate_password_hash(form.password.data).decode('utf-8')\n user = User(username=form.username.data, email=form.email.data, password=password_hashed)\n db.session.add(user)\n db.session.commit()\n flash('Account created!', 'success')\n return redirect(url_for('login'))\n return render_template('register.html', title='Register', form=form)\n\n@app.route('/logout')\ndef logout():\n logout_user()\n flash('Logged out!', 'success')\n return redirect(url_for('login'))\n\n@app.route('/overview', methods=['GET','POST'])\n@login_required\ndef overview():\n form = NewExpense()\n userids = current_user.id\n name = current_user.username\n\n # Forms\n if form.validate_on_submit():\n expenses = UserExpense(category=form.category.data, description=form.description.data,\n expense=form.expense.data, expensedate=current_user)\n db.session.add(expenses)\n db.session.commit()\n\n # Queries\n filters = db.session.query(UserExpense.expense_date).filter(UserExpense.userid==userids).distinct()\n\n date_list=[] #List of distinct dates\n for u in filters:\n date_list.append(f'{u.expense_date}')\n\n date_expense_list=[] #List of expenses for that specific date\n for item in date_list:\n date_expense = db.session.query(func.sum(UserExpense.expense)).filter(UserExpense.userid==userids, UserExpense.expense_date==item).scalar()\n date_expense_list.append(f'{date_expense}')\n\n item = list(zip_longest(date_list,date_expense_list,date_list, fillvalue=\"\"))\n\n # Matplotlib\n fig, ax = plt.subplots(figsize=(11, 5))\n ax.plot(date_list, [float(g) for g in date_expense_list], label=\"Expenses\")\n ax.legend()\n fig.suptitle('Expense pattern')\n\n patternpngImage = io.BytesIO()\n FigureCanvas(fig).print_png(patternpngImage)\n\n patternpngImageString = \"data:image/png;base64,\"\n patternpngImageString += base64.b64encode(patternpngImage.getvalue()).decode('utf8')\n\n\n return render_template('overview.html', normal='normal', title='Expenses',image=patternpngImageString,\n form=form, name=name, item=item)\n\n\n@app.route('/expense/<string:wkex_id>', methods=['GET','POST'])\n@login_required\ndef userexpenses(wkex_id):\n form = NewExpense()\n userids = current_user.id\n name = current_user.username\n\n # Queries\n items = db.session.query(UserExpense).filter(UserExpense.userid==userids, UserExpense.expense_date==wkex_id)\n\n todays = str(date.today())\n state=\"not\"\n if (wkex_id == todays) is True:\n state=\"today\"\n if (wkex_id > todays) is True:\n abort(404)\n\n # Forms\n if form.validate_on_submit():\n expenses = UserExpense(category=form.category.data, description=form.description.data,\n expense=form.expense.data, expensedate=current_user)\n db.session.add(expenses)\n db.session.commit()\n flash('Expense added!', 'success')\n return redirect(url_for('userexpenses', wkex_id=wkex_id))\n\n return render_template('expenses.html', normal='normal', title='Expenses',\n form=form, items=items, name=name, ids=wkex_id, state=state)\n\n@app.route('/expense/<string:wkex_id>/<int:ex_id>/delete', methods=['GET','POST'])\n@login_required\ndef delete_expense(wkex_id, ex_id):\n expenses = db.session.query(UserExpense).get_or_404(ex_id) # Query for valid access\n if expenses.expensedate != current_user:\n abort(403)\n db.session.delete(expenses)\n db.session.commit()\n flash('Expense deleted', 'success')\n return redirect(url_for('overview'))\n\n@app.route(\"/expense/<string:wkex_id>/<int:ex_id>/update\", methods=['GET', 'POST'])\n@login_required\ndef update_expense(wkex_id, ex_id):\n name = current_user.username\n expenses = db.session.query(UserExpense).get_or_404(ex_id) # Query for valid access\n if expenses.expensedate != current_user:\n abort(403)\n form = NewExpense()\n\n if form.validate_on_submit():\n expenses.category = form.category.data\n expenses.description = form.description.data\n expenses.expense = form.expense.data\n db.session.commit()\n flash('Expense updated', 'success')\n return redirect(url_for('overview'))\n\n elif request.method=='GET':\n form.category.data = expenses.category\n form.description.data =expenses.description\n form.expense.data = expenses.expense\n return render_template('expenses.html', title='Expenses',form=form, name=name, wkex_id=wkex_id, state='today')\n\n@app.route(\"/expense/<string:day_id>/charts\", methods=['GET', 'POST'])\n@login_required\ndef charts(day_id):\n userids = current_user.id\n name = current_user.username\n # Queries\n categories = db.session.query(UserExpense.category).filter(UserExpense.userid==userids,\n UserExpense.expense_date==day_id).distinct()\n cat_list=[]\n for u in categories:\n cat_list.append(f'{u.category}')\n\n counts_list=[]\n for item in cat_list:\n counts = db.session.query(UserExpense.category).filter(UserExpense.userid==userids,\n UserExpense.expense_date==day_id,\n UserExpense.category==item).count()\n counts_list.append(counts)\n\n sum_list=[]\n for item in cat_list:\n Sums = db.session.query(func.sum(UserExpense.expense)).filter(UserExpense.userid==userids,\n UserExpense.expense_date==day_id,\n UserExpense.category==item).scalar()\n sum_list.append(f'{Sums}')\n\n # Highest expenditure graph\n fig, axs = plt.subplots(figsize=(10, 5))\n axs.bar(cat_list, [float(g) for g in sum_list])\n fig.suptitle('Expenditure breakdown')\n\n # Frequency graph\n fig1, ax1 = plt.subplots(figsize=(10, 5), subplot_kw=dict(aspect=\"equal\"))\n\n wedges, texts = ax1.pie(counts_list, wedgeprops=dict(width=0.5), startangle=-40)\n\n bbox_props = dict(boxstyle=\"square,pad=0.3\", fc=\"w\", ec=\"k\", lw=0.72)\n kw = dict(arrowprops=dict(arrowstyle=\"-\"),\n bbox=bbox_props, zorder=0, va=\"top\")\n\n for i, p in enumerate(wedges):\n ang = (p.theta2 - p.theta1)/2. + p.theta1\n y = np.sin(np.deg2rad(ang))\n x = np.cos(np.deg2rad(ang))\n horizontalalignment = {-1: \"right\", 1: \"left\"}[int(np.sign(x))]\n connectionstyle = \"angle,angleA=0,angleB={}\".format(ang)\n kw[\"arrowprops\"].update({\"connectionstyle\": connectionstyle})\n ax1.annotate(cat_list[i], xy=(x, y), xytext=(1.35*np.sign(x), 1.4*y),\n horizontalalignment=horizontalalignment, **kw)\n\n ax1.set_title(\"Expenses category frequency\")\n\n # Convert plot to PNG image\n highpngImage = io.BytesIO()\n freqpngImage = io.BytesIO()\n FigureCanvas(fig).print_png(highpngImage)\n FigureCanvas(fig1).print_png(freqpngImage)\n\n # Encode PNG image to base64 string\n highpngImageString = \"data:image/png;base64,\"\n highpngImageString += base64.b64encode(highpngImage.getvalue()).decode('utf8')\n\n freqpngImageString = \"data:image/png;base64,\"\n freqpngImageString += base64.b64encode(freqpngImage.getvalue()).decode('utf8')\n\n return render_template('charts.html',title ='History', name=name,\n image1=highpngImageString, image2=freqpngImageString, day_id=day_id)\n\n if __name__ == '__main__':\n app.run()\n"
] | [
[
"matplotlib.backends.backend_agg.FigureCanvasAgg",
"numpy.sign",
"matplotlib.pyplot.subplots",
"numpy.deg2rad"
]
] |
comp5331-Xtimeseries/mWDN | [
"3805f90230b93d04f86201079358ec1f6dd6bb2d"
] | [
"utils.py"
] | [
"import torch\nimport numpy as np;\nfrom torch.autograd import Variable\n\n\ndef normal_std(x):\n return x.std() * np.sqrt((len(x) - 1.)/(len(x)))\n\nclass Data_utility(object):\n # train and valid is the ratio of training set and validation set. test = 1 - train - valid\n def __init__(self, dSet, train, valid, cuda, horizon, window, normalize = 2):\n self.cuda = cuda;\n self.P = window;\n self.h = horizon\n self.rawdat = dSet\n self.dat = np.zeros(self.rawdat.shape);\n self.n, self.m = self.dat.shape;\n self.normalize = 2\n self.scale = np.ones(self.m);\n self._normalized(normalize);\n self._split(int(train * self.n), int((train+valid) * self.n), self.n);\n \n self.scale = torch.from_numpy(self.scale).float();\n tmp = self.test[1] * self.scale.expand(self.test[1].size(0), self.m);\n \n if self.cuda:\n self.scale = self.scale.cuda();\n self.scale = Variable(self.scale);\n \n self.rse = normal_std(tmp);\n self.rae = torch.mean(torch.abs(tmp - torch.mean(tmp)));\n \n def _normalized(self, normalize):\n #normalized by the maximum value of entire matrix.\n \n if (normalize == 0):\n self.dat = self.rawdat\n \n if (normalize == 1):\n self.dat = self.rawdat / np.max(self.rawdat);\n \n #normlized by the maximum value of each row(sensor).\n if (normalize == 2):\n for i in range(self.m):\n self.scale[i] = np.max(np.abs(self.rawdat[:,i]));\n self.dat[:,i] = self.rawdat[:,i] / np.max(np.abs(self.rawdat[:,i]));\n \n \n def _split(self, train, valid, test):\n \n train_set = range(self.P+self.h-1, train);\n valid_set = range(train, valid);\n test_set = range(valid, self.n);\n self.train = self._batchify(train_set, self.h);\n self.valid = self._batchify(valid_set, self.h);\n self.test = self._batchify(test_set, self.h);\n \n \n def _batchify(self, idx_set, horizon):\n \n n = len(idx_set);\n X = torch.zeros((n,self.P,self.m));\n Y = torch.zeros((n,self.m));\n \n for i in range(n):\n end = idx_set[i] - self.h + 1;\n start = end - self.P;\n X[i,:,:] = torch.from_numpy(self.dat[start:end, :]);\n Y[i,:] = torch.from_numpy(self.dat[idx_set[i], :]);\n\n return [X, Y];\n\n def get_batches(self, inputs, targets, batch_size, shuffle=True):\n length = len(inputs)\n if shuffle:\n index = torch.randperm(length)\n else:\n index = torch.LongTensor(range(length))\n start_idx = 0\n while (start_idx < length):\n end_idx = min(length, start_idx + batch_size)\n excerpt = index[start_idx:end_idx]\n X = inputs[excerpt]; Y = targets[excerpt];\n # if (self.cuda):\n # X = X.cuda();\n # Y = Y.cuda();\n yield Variable(X), Variable(Y);\n start_idx += batch_size\n"
] | [
[
"numpy.ones",
"numpy.zeros",
"torch.autograd.Variable",
"numpy.abs",
"torch.from_numpy",
"numpy.max",
"torch.randperm",
"torch.zeros",
"torch.mean"
]
] |
timmyzhao/ptstat | [
"0401203e5b6053df6d62b2af9ab4b831f1b41660"
] | [
"ptstat/dist/categorical.py"
] | [
"import torch\nfrom ptstat.core import RandomVariable, _to_v\n\n\nclass Categorical(RandomVariable):\n \"\"\"\n Categorical over 0,...,N-1 with arbitrary probabilities, 1-dimensional rv, long type.\n \"\"\"\n def __init__(self, p=None, p_min=1E-6, size=None, cuda=False):\n super(Categorical, self).__init__()\n if size:\n assert len(size) == 2, str(size)\n p = _to_v(1 / size[1], size, cuda)\n else:\n assert len(p.size()) == 2, str(p.size())\n assert torch.min(p.data) >= 0, str(torch.min(p.data))\n assert torch.max(torch.abs(torch.sum(p.data, 1) - 1)) <= 1E-5\n self._p = torch.clamp(p, p_min)\n\n def _size(self):\n return self._p.size()[0], 1 # Type is Long.\n\n def _log_pdf(self, x):\n return torch.log(self._p.gather(1, x)).squeeze()\n\n def _sample(self):\n return self._p.multinomial(1, True)\n\n def _entropy(self):\n return - torch.sum(self._p * torch.log(self._p), 1).squeeze()\n"
] | [
[
"torch.sum",
"torch.min",
"torch.log",
"torch.clamp"
]
] |
SX-Aurora/orchespy | [
"6b85a78831c8e3e05df7143101ca3418817fcbbd"
] | [
"tests/device_tests/test_device_args_numpy_module.py"
] | [
"from orchespy import device\nfrom orchespy.devicetype import CUDAGPU, Host, VE\nimport sys\nimport pytest\n\nimport numpy as np\n\nif \"cupy\" in sys.modules:\n import cupy as cp\nif \"nlcpy\" in sys.modules:\n import nlcpy as vp\n\nno_nlcpy = pytest.mark.skipif(\n \"nlcpy\" not in sys.modules, reason=' test require nlcpy. ')\nno_cupy = pytest.mark.skipif(\n \"cupy\" not in sys.modules, reason=' test require cupy. ')\n\n\n# for tests with an argument\n@device(Host, numpy_module_arg='xp')\ndef create_array_init_5_at_host(shape, dtype, order, xp):\n return xp.full(shape, 5, dtype=dtype, order=order)\n\n\n@device(CUDAGPU, numpy_module_arg='xp')\ndef create_array_init_5_at_gpu(shape, dtype, order, xp):\n return xp.full(shape, 5, dtype=dtype, order=order)\n\n\n@device(VE, numpy_module_arg='xp')\ndef create_array_init_5_at_ve(shape, dtype, order, xp):\n return xp.full(shape, 5, dtype=dtype, order=order)\n\n\n@pytest.mark.parametrize('shape', [(2), (2, 2), (2, 2, 2), (2, 3), (2, 3, 4)])\n@pytest.mark.parametrize('dtype', [\n 'i4', 'i8', 'u4', 'u8', 'f4', 'f8', 'c8', 'c16'\n ])\n@pytest.mark.parametrize('order', ['C', 'F'])\nclass TestDeviceArgs:\n def test_device_args_host(self, shape, dtype, order):\n y = create_array_init_5_at_host(shape, dtype, order)\n assert(isinstance(y, np.ndarray))\n expected = np.full(shape, 5, dtype=dtype, order=order)\n assert((y == expected).all())\n\n @no_cupy\n def test_device_args_gpu(self, shape, dtype, order):\n y = create_array_init_5_at_gpu(shape, dtype, order)\n assert(isinstance(y, cp.ndarray))\n expected = cp.full(shape, 5, dtype=dtype, order=order)\n assert((y == expected).all())\n\n @no_nlcpy\n def test_device_args_ve(self, shape, dtype, order):\n y = create_array_init_5_at_ve(shape, dtype, order)\n assert(isinstance(y, vp.ndarray))\n expected = vp.full(shape, 5, dtype=dtype, order=order)\n assert((y == expected).all())\n"
] | [
[
"numpy.full"
]
] |
ncfrey/mlmsynth | [
"99fc8fabba511aefd6f0a0be4e85c78c54dd3648"
] | [
"pumml/learners.py"
] | [
"\"\"\"\nDeploy semi-supervised PU machine learning models.\n\nThis module provides classes for training, testing, and deploying a PU\nlearning model for predicting material synthesizability. Utility functions\nfor plotting aid in visualizing and analyzing results.\n\nReferences:\n [1] DOI: 10.1021/acsnano.8b08014\n [2] DOI: 10.1145/1401890.1401920\n [3] DOI: 10.1016/j.patrec.2013.06.010\n\"\"\"\n\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.metrics import precision_recall_fscore_support\nfrom sklearn.cluster import KMeans\nfrom sklearn.mixture import GaussianMixture, BayesianGaussianMixture\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.metrics import precision_recall_curve\nfrom sklearn.model_selection import RepeatedKFold\nfrom sklearn.utils import resample\n\nfrom mpl_toolkits.mplot3d import Axes3D\n\nfrom monty.serialization import dumpfn\n\nimport pandas as pd\nimport seaborn as sns\nimport os\nimport pickle\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom pylab import rcParams\n\n__author__ = \"Nathan C. Frey, Jin Wang\"\n__copyright__ = \"MIT License\"\n__version__ = \"0.0.1\"\n__maintainer__ = \"Nathan C. Frey\"\n__email__ = \"n.frey@seas.upenn.edu\"\n__status__ = \"Development\"\n__date__ = \"Aug 2017\"\n\n\nclass PULearner:\n def __init__(self):\n \"\"\"A machine learning model that predicts material synthesizability.\n\n Positive samples are experimentally synthesized materials. Unlabeled\n samples are not-yet synthesized materials.\n\n Features for training data might be generated by first-principles \n (density functional theory) calculations, or structural or chemical\n data looked up from a table.\n\n Hyperparameters are initialized with sensible defaults, but any newly\n trained model should have hyperparams carefully converged.\n\n Attributes:\n pu_stats (dict): Outputs of cv_baggingDT\n df_U (DataFrame): Unlabeled data.\n df_P (DataFrame): Positive data.\n\n synth_scores (list): Synthesizability scores (between 0 and 1) of\n unlabeled samples.\n labels (list): Likely synthesizable (1) or not (0)\n feat_importances (DataFrame): Feature importances from trained\n decision tree classifiers. Index corresponds to feature index\n in original data. \n\n \"\"\"\n\n self.pu_stats = None\n self.df_U = None\n self.df_P = None\n self.synth_scores = None\n self.labels = None\n self.feat_importances = None\n\n def cv_baggingDT(self, pu_data, splits=10, repeats=10, bags=100, filename=\"\"):\n \"\"\"\n Train bagged decision tree base classifiers and do repeated \n k-fold CV.\n\n Synthesizability scores (0 = not synthesizable, 1 = already\n synthesized) are generated for an unlabeled sample by averaging\n the scores from the ensemble of decision tree classifiers that\n have not been trained on that sample. \n\n Args:\n pu_data (json): A file where each row describes a material.\n There MUST be a column called \"PU_label\" where a 1 value\n indicates a synthesized (positive) compound and a 0 value\n indicates an unlabeled compound.\n\n splits (int): Number of splits in k-fold CV.\n repeats (int): Number of repeated k-fold CV.\n bags (int): Number of bags in bootstrap aggregation.\n filename (string): Save model training results to file with\n filename ending in .json or .pkl.\n\n Returns:\n pu_stats (dict): Metrics and outputs of PU learning model\n training.\n\n \"\"\"\n \n print(\"Start PU Learning.\")\n\n # Preprocess data and set attributes\n df = pd.read_json(pu_data)\n df_P, df_U, X_P, X_U = self._process_pu_data(df)\n self.df_P = df_P\n self.df_U = df_U\n\n # Split data into training and test splits for k-fold CV\n kfold = RepeatedKFold(n_splits=splits, n_repeats=repeats, random_state=42)\n\n # Scores for PU learning (tpr = True Positive Rate)\n scores = []\n tprs = []\n\n # Predicted synthesis probability of CVed P and U sets\n prob_P = np.ones(shape=(X_P.shape[0], splits * repeats))\n prob_U = -np.ones(shape=(X_U.shape[0], splits * repeats))\n\n # Feature importance\n feat_rank = np.zeros(shape=(X_P.shape[1], splits * repeats))\n\n idsp = 0 # index of repeated k splits\n\n # Loop over P and U training/test samples\n for (ptrain, ptest), (utrain, utest) in zip(kfold.split(X_P), kfold.split(X_U)):\n\n # Number of P and U training samples\n N_ptrain = X_P[ptrain].shape[0]\n N_utrain = X_U[utrain].shape[0]\n\n d = X_P.shape[1]\n K = N_ptrain\n train_label = np.zeros(shape=(N_ptrain + K,))\n train_label[:N_ptrain] = 1.0 # Synthesized (positive)\n\n # Out of bag samples\n n_oob = np.zeros(shape=(N_utrain,))\n f_oob = np.zeros(shape=(N_utrain, 2))\n\n # Sums of probabilities of test sets\n f_ptest = np.zeros(shape=(X_P[ptest].shape[0], 2))\n f_utest = np.zeros(shape=(X_U[utest].shape[0], 2))\n\n # Bootstrap resampling for each bag\n for i in range(bags):\n bootstrap_sample = np.random.choice(\n np.arange(N_utrain), replace=True, size=K\n )\n\n # Positive samples and bootstrapped unlabeled samples\n data_bootstrap = np.concatenate(\n (X_P[ptrain], X_U[bootstrap_sample, :]), axis=0\n )\n\n # Train decision tree classifier\n model = DecisionTreeClassifier(\n max_depth=None,\n max_features=None,\n criterion=\"gini\",\n class_weight=\"balanced\",\n )\n\n model.fit(data_bootstrap, train_label)\n\n # Index for the oob samples\n idx_oob = sorted(\n set(range(N_utrain)) - set(np.unique(bootstrap_sample))\n )\n\n # Transductive learning on oob samples\n f_oob[idx_oob] += model.predict_proba(X_U[utrain][idx_oob])\n n_oob[idx_oob] += 1\n f_ptest += model.predict_proba(X_P[ptest])\n f_utest += model.predict_proba(X_U[utest])\n feat_rank[:, idsp] = model.feature_importances_\n\n # Predicted synthesis probabilities of unlabeled samples\n predict_utrain = f_oob[:, 1] / n_oob\n\n # Predicted probabilities for P and U test sets\n predict_ptest = f_ptest[:, 1] / bags\n predict_utest = f_utest[:, 1] / bags\n\n # Find predicted positives\n true_pos = predict_ptest[np.where(predict_ptest > 0.5)].shape[0]\n u_pos = predict_utest[np.where(predict_utest > 0.5)].shape[0]\n\n N_ptest = X_P[ptest].shape[0]\n N_utest = X_U[utest].shape[0]\n\n # Predicted positive ratio in test set\n p_pred_pos = (true_pos + u_pos) / (N_ptest + N_utest) + 0.0001\n\n # Compute PU recall (TPR) and score metrics\n recall = true_pos / N_ptest\n score = recall ** 2 / p_pred_pos\n scores.append(score)\n tprs.append(recall)\n\n # Predicted probabilities\n prob_P[ptest, idsp] = predict_ptest\n prob_U[utrain, idsp] = predict_utrain\n prob_U[utest, idsp] = predict_utest\n idsp += 1\n\n # Progress update\n if (idsp + 1) % splits == 0:\n tpr_tmp = np.asarray(tprs[-splits - 1 : -1])\n print(\n \"Performed Repeated \"\n + str(splits)\n + \"-fold: \"\n + str(idsp // splits + 1)\n + \" out of \"\n + str(repeats)\n )\n print(\n \"True Positive Rate: %0.2f (+/- %0.2f)\"\n % (tpr_tmp.mean(), tpr_tmp.std() * 2)\n )\n\n # Predicted labels from k-fold CV\n label_U = np.zeros(shape=(X_U.shape[0], splits * repeats + 1), dtype=int)\n label_U[:, : splits * repeats][np.where(prob_U > 0.5)] = 1\n label_U[:, splits * repeats] = np.sum(\n label_U[:, : splits * repeats + 1], axis=1\n )\n\n tprs = np.asarray(tprs)\n scores = np.asarray(scores)\n\n # Metrics for each model in the k-folds\n label_U_rp = np.zeros(shape=(X_U.shape[0], repeats), dtype=int)\n prob_U_rp = np.zeros(shape=(X_U.shape[0], repeats))\n feat_rank_rp = np.zeros(shape=(X_U.shape[1], repeats))\n tpr_rp = np.zeros(shape=(repeats,))\n scores_rp = np.zeros(shape=(repeats,))\n labels = np.zeros(shape=(X_U.shape[0],))\n\n for i in range(repeats):\n prob_U_rp[:, i] = prob_U[:, i * splits : (i + 1) * splits].mean(axis=1)\n feat_rank_rp[:, i] = feat_rank[:, i * splits : (i + 1) * splits].mean(\n axis=1\n )\n tpr_rp[i] = tprs[i * splits : (i + 1) * splits].mean()\n scores_rp[i] = scores[i * splits : (i + 1) * splits].mean()\n\n label_U_rp[np.where(prob_U_rp > 0.5)] = 1\n prob = prob_U_rp.mean(axis=1)\n labels[np.where(prob > 0.5)] = 1\n\n # Get confidence interval of TPR for each kfold\n tpr_low, tpr_up = self.bootstrapCI(tpr_rp)\n scores_low, scores_up = self.bootstrapCI(scores_rp)\n\n # PU learning metrics\n metrics = np.asarray(\n [tpr_rp.mean(), tpr_low, tpr_up, scores_rp.mean(), scores_low, scores_up]\n )\n\n print(\"Accuracy: %0.2f\" % (tpr_rp.mean()))\n print(\"95%% confidence interval: [%0.2f, %0.2f]\" % (tpr_low, tpr_up))\n\n # Metrics and results from training / testing\n pu_stats = {\n \"prob\": prob,\n \"labels\": labels,\n \"metrics\": metrics,\n \"prob_rp\": prob_U_rp,\n \"label_rp\": label_U_rp,\n \"tpr_rp\": tpr_rp,\n \"scores_rp\": scores_rp,\n \"feat_rank_rp\": feat_rank_rp,\n }\n\n # Save results\n if filename:\n if filename.endswith(\".json\"):\n dumpfn(pu_stats, filename)\n if filename.endswith(\".pkl\"):\n with open(filename, \"wb\") as file:\n pickle.dump(pu_stats, file, protocol=pickle.HIGHEST_PROTOCOL)\n\n self.pu_stats = pu_stats\n return pu_stats\n\n def bootstrapCI(self, data, ci=95, ns=10000):\n \"\"\"Compute confidence interval of the TPR.\n\n Args:\n data (array): Array of TPRs for each kfold.\n ci (int): Confidence interval.\n ns (int): Number of bootstrap resamplings.\n\n Returns:\n lower (float): Lower endpoint of CI.\n upper (float): Upper endpoint of CI.\n \n \"\"\"\n\n bs_rsample = []\n for _ in range(ns):\n rsample = resample(data, n_samples=len(data))\n bs_rsample.append(np.mean(rsample))\n\n bs_rsample = np.asarray(bs_rsample)\n lower = np.percentile(bs_rsample, (100 - ci) / 2)\n upper = np.percentile(bs_rsample, ci + (100 - ci) / 2)\n\n return lower, upper\n\n def corr_heatmap(self, num_feats=10, fname=\"\"):\n \"\"\"Plot correlation matrix between synthesizability and features.\n\n cv_baggingDT must be run first.\n\n Args:\n num_feats (int): How many features to consider.\n fname (str): Filename if correlation plot should be saved.\n\n Returns:\n None (generates plots)\n\n \"\"\"\n\n pu_stats = self.pu_stats\n df_U = self.df_U\n df_U_copy = df_U.drop(columns=[\"PU_label\"])\n\n # Get normalized, sorted & ranked list of most important features\n synth_scores = pu_stats[\"prob\"]\n df_U_copy[\"synth_score\"] = synth_scores\n\n # Make correlation matrix of top \"num_feats\" features\n corrmat = df_U_copy.corr()\n cols = corrmat.nlargest(num_feats, \"synth_score\")[\"synth_score\"].index\n cm = np.corrcoef(df_U_copy[cols].values.T)\n\n sns.set(style='ticks')\n rcParams['figure.dpi'] = 300\n\n fig, ax = plt.subplots(1, 1)\n hm = sns.heatmap(\n cm,\n ax=ax,\n cbar=True,\n annot=True,\n square=True,\n fmt=\".2f\",\n annot_kws={\"size\": 7},\n yticklabels=cols.values,\n xticklabels=cols.values,\n )\n\n if fname:\n self.save_plot(fname + \".png\", fig, ax)\n\n def get_feat_importances(self, plot_format=\"\"):\n \"\"\"Process output from PU learning k-fold cross validation.\n\n cv_baggingDT must be run first.\n\n If plot_format is specified, a feature importance plot will\n be saved.\n\n Args:\n plot_format (str): svg, png, or pdf file format for saving simple\n visualizations of feature importance and correlation. \n\n \"\"\"\n\n pu_stats = self.pu_stats\n\n # Feature importances for individual repetitions of kfold CV\n feat_rank_rp = pu_stats[\"feat_rank_rp\"]\n feat_importances = np.sum(feat_rank_rp, axis=1)\n\n df_U = self.df_U\n df_U = df_U._get_numeric_data()\n df_U_copy = df_U.drop(columns=[\"PU_label\"])\n feat_names = df_U_copy.columns\n\n # Index corresponds to feature in original data\n df_feat = pd.DataFrame(columns=[\"feature\", \"importance\"])\n df_feat[\"feature\"] = feat_names\n df_feat[\"importance\"] = feat_importances\n\n # Sort by importance\n df_feat_sort = df_feat.sort_values(by=\"importance\", ascending=False)\n max_value = df_feat[\"importance\"].max()\n\n # Normalize to 1\n df_feat_sort[\"importance\"] = df_feat_sort[\"importance\"] / max_value\n\n # Set feature importance attribute\n self.feat_importances = df_feat\n\n if plot_format in [\"svg\", \"pdf\", \"png\"]:\n\n # Feature importance plot\n fig, ax = plt.subplots(figsize=(10, 4))\n with sns.axes_style(style=\"ticks\"):\n sns.barplot(x=\"feature\", y=\"importance\", data=df_feat_sort)\n ax.set_xticklabels(\n ax.get_xticklabels(), rotation=45, ha=\"right\", fontsize=7\n )\n filename = \"feat_importance.\" + plot_format\n self.save_plot(filename, fig, ax)\n\n @staticmethod\n def _process_pu_data(data):\n \"\"\"Utility method for processing input data.\n\n Args:\n data (DataFrame): Data with positive and unlabeled samples.\n\n Returns:\n X_P (array): Positive sample set.\n X_U (array): Unlabeled sample set.\n\n \"\"\"\n\n df_P = data.query(\"PU_label == 1\") # Positive value is 1\n df_U = data.query(\"PU_label == 0\") # Unlabeled value is 0\n\n # Chop off PU label and drop non-numeric columns for sklearn\n X_P = np.asarray(df_P.drop(columns=[\"PU_label\"])._get_numeric_data())\n X_U = np.asarray(df_U.drop(columns=[\"PU_label\"])._get_numeric_data())\n\n return df_P, df_U, X_P, X_U\n\n @staticmethod\n def save_plot(filename, fig, ax):\n \"\"\"Utility method for saving simple visualizations.\n\n Args:\n filename (str): Name ending in .svg, .png, or .pdf\n fig, ax (objects): Matplotlib objects.\n\n Returns:\n None\n\n \"\"\"\n\n sns.set_style(\"ticks\")\n fig.tight_layout()\n fig.savefig(filename)\n\n\nclass PUInteract:\n def __init__(self, df_parent, pu_parent, df_child, pu_child, merge_on=(), feats=()):\n \"\"\"Consider parent and child phase PU learning scores.\n\n This class looks at PU learning scores for parent bulk\n compounds (e.g. layered h-BN) and scores of the child phases\n along with descriptors like exfoliation energy and changes\n in structural/electronic properties to predict (parent, child)\n pairs that can be synthesized.\n\n Parent and child must be linked by a column that allows the\n dataframes to be merged. There should also be additional features\n that characterize the structural and chemical differences between\n parents and children, e.g. changes in bond lengths, etc.\n\n Unsupervised clustering models are used to identify synthesizable \n (parent/child) pairs.\n\n Args:\n df_parent (str): Parent data filename.\n pu_parent (dict): Output from PULearner.cv_baggingDT.\n df_child (str): Child data filename.\n pu_child (dict): Output from PULearner.cv_baggingDT.\n merge_on (tuple): Column name(s) on which to merge.\n feats (tuple): Column names to use as features. If empty, use all\n possible columns. \n\n Attributes:\n merged_df (DataFrame): (Parent, child) pair data.\n X (array): Array representation of merged_df.\n\n Returns:\n None\n\n \"\"\"\n\n df_parent = pd.read_json(df_parent)\n df_child = pd.read_json(df_child)\n\n # Set scores from PULearner\n df_parent[\"synth_score\"] = 1\n df_child[\"synth_score\"] = 1\n\n df_parent.loc[df_parent.eval(\"PU_label == 0\"), \"synth_score\"] = pu_parent[\n \"prob\"\n ]\n df_child.loc[df_child.eval(\"PU_label == 0\"), \"synth_score\"] = pu_child[\"prob\"]\n\n # Merge parent and child dfs\n merge_on = list(merge_on)\n df = pd.merge(\n df_parent, df_child, on=merge_on, how=\"outer\", suffixes=[\"_p\", \"_c\"]\n )\n df.drop(columns=[\"PU_label_p\", \"PU_label_c\"], inplace=True, axis=1)\n\n if feats:\n feat_names = [f + \"_p\" for f in feats] + [f + \"_c\" for f in feats]\n df = df[feat_names]\n\n self.merged_df = df\n self.X = np.array(df)\n\n def do_kmeans(self, n_clusters=2, seed=42):\n \"\"\"Do k-means clustering on (parent, child) pairs.\n\n Args:\n n_clusters (int): Number of clusters.\n seed (int): Fix random seed for kmeans reproducibility.\n\n Returns:\n kmeans_output (dict): kmeans cluster centers, cluster labels for\n each (parent, child)\n\n \"\"\"\n\n np.random.seed(seed)\n km = KMeans(n_clusters=n_clusters, random_state=seed)\n\n km.fit(self.X)\n kmeans_output = {\n \"cluster_centers\": km.cluster_centers_,\n \"cluster_labels\": km.labels_,\n }\n\n return kmeans_output\n\n def do_gmixture(self, n_components=2, seed=42):\n \"\"\"\n Estimate parameters of a Gaussian mixture distribution of (parent,\n child) data.\n\n Args:\n n_components (int): Number of components in GMM.\n seed (int): Random seed.\n\n Returns:\n gmm_output (dict): Predicted labels of (parent, child) pairs and\n predicted posterior probabilities of each component.\n\n \"\"\"\n\n np.random.seed(seed)\n gmm = GaussianMixture(\n n_components=n_components, random_state=seed, covariance_type=\"full\"\n )\n\n gmm.fit(self.X)\n gmm_labels = gmm.predict(self.X)\n gmm_prob = gmm.predict_proba(self.X)[:, 0]\n gmm_output = {\"gmm_labels\": gmm_labels, \"gmm_prob\": gmm_prob}\n\n return gmm_output\n\n def do_bgm(self, n_components=6, seed=42):\n \"\"\"Bayesian Gaussian Mixture.\n\n Infer the effective number of components in a Gaussian Mixture Model\n via variational Bayesian estimation.\n\n n_effective_componenents < n_components if the model sets some\n weights close to 0.\n\n Args:\n n_components (int): Number of components in GMM.\n seed (int): Random seed.\n\n Returns:\n bgm_output (dict): Labels and probabilities.\n\n \"\"\"\n\n np.random.seed(seed)\n bgm = BayesianGaussianMixture(\n n_components=n_components,\n covariance_type=\"full\",\n weight_concentration_prior=1e-2,\n weight_concentration_prior_type=\"dirichlet_process\",\n mean_precision_prior=1e-2,\n init_params=\"random\",\n max_iter=100,\n random_state=seed,\n )\n\n bgm.fit(self.X)\n bgm_labels = bgm.predict(self.X)\n bgm_prob = bgm.predict_proba(self.X)[:, 0]\n\n bgm_output = {\"bgm_labels\": bgm_labels, \"bgm_prob\": bgm_prob}\n\n return bgm_output\n"
] | [
[
"numpy.sum",
"numpy.ones",
"numpy.random.seed",
"numpy.asarray",
"sklearn.cluster.KMeans",
"sklearn.tree.DecisionTreeClassifier",
"pandas.read_json",
"numpy.where",
"numpy.unique",
"numpy.mean",
"numpy.corrcoef",
"numpy.zeros",
"sklearn.model_selection.RepeatedKFold",
"matplotlib.pyplot.subplots",
"numpy.arange",
"pandas.merge",
"sklearn.mixture.GaussianMixture",
"sklearn.mixture.BayesianGaussianMixture",
"numpy.percentile",
"pandas.DataFrame",
"numpy.array",
"numpy.concatenate"
]
] |
CitrineInformatics/smlb | [
"28a3689bd36aa8d51031b4faf7e2331bbd8148a9"
] | [
"tests/learners/scikit_learn/test_gpr_skl.py"
] | [
"\"\"\"GaussianProcessRegressionSklearn tests.\n\nScientific Machine Learning Benchmark:\nA benchmark of regression models in chem- and materials informatics.\n\"\"\"\n\nimport pytest\n\nimport numpy as np\n\nskl = pytest.importorskip(\"sklearn\")\n\nimport smlb\nfrom smlb.learners.scikit_learn.gaussian_process_regression_sklearn import GaussianProcessRegressionSklearn\n\n\ndef test_GaussianProcessRegressionSklearn_1():\n \"\"\"Simple examples.\"\"\"\n\n # linear function with linear kernel\n kernel = skl.gaussian_process.kernels.DotProduct(sigma_0=0, sigma_0_bounds=\"fixed\")\n gpr = GaussianProcessRegressionSklearn(kernel=kernel, optimizer=None, rng=1)\n train_data = smlb.TabularData(data=np.array([[-1], [1]]), labels=np.array([-1, 1]))\n valid_data = smlb.TabularData(data=np.array([[-2], [-1], [0], [1], [2]]))\n preds = gpr.fit(train_data).apply(valid_data)\n mean, stddev = preds.mean, preds.stddev\n\n assert np.allclose(mean, [-2, -1, 0, 1, 2])\n assert stddev[0] > stddev[1] > stddev[2] < stddev[3] < stddev[4]\n\n\ndef test_GaussianProcessRegressionSklearn_2():\n \"\"\"All predictive distributions.\n\n Linear noise-free function, linear kernel + white noise kernel.\n The optimized noise level is expected to go to its lower bound.\n \"\"\"\n\n kernel = skl.gaussian_process.kernels.DotProduct(\n sigma_0=0, sigma_0_bounds=\"fixed\"\n ) + skl.gaussian_process.kernels.WhiteKernel(noise_level=0.1, noise_level_bounds=(1e-5, 1e-5))\n gpr = GaussianProcessRegressionSklearn(kernel=kernel, rng=1)\n n = 100\n train_data = smlb.TabularData(\n data=np.ones(shape=(n, 1)) * 2, labels=np.ones(shape=n) * 3\n )\n valid_data = smlb.TabularData(data=train_data.samples())\n preds = gpr.fit(train_data).apply(valid_data)\n\n assert preds.has_signal_part and preds.has_noise_part\n conf, noise = preds.signal_part, preds.noise_part\n\n assert np.allclose(conf.mean, train_data.labels())\n assert np.allclose(conf.stddev, np.ones(n) * np.sqrt(1e-5), atol=1e-3)\n\n assert (preds.mean == conf.mean).all()\n assert np.allclose(preds.stddev, np.ones(n) * np.sqrt(np.square(conf.stddev) + 1e-5))\n\n assert np.allclose(noise.mean, np.zeros(shape=n))\n assert np.allclose(noise.stddev, np.sqrt(1e-5))\n\n\ndef test_GaussianProcessRegressionSklearn_3():\n \"\"\"All predictive distributions.\n\n Linear noisy function, linear kernel + white noise kernel.\n The optimized noise level is expected to go to its true value.\n \"\"\"\n\n kernel = skl.gaussian_process.kernels.DotProduct(\n sigma_0=0, sigma_0_bounds=\"fixed\"\n ) + skl.gaussian_process.kernels.WhiteKernel(noise_level=1, noise_level_bounds=(1e-5, 1e5))\n gpr = GaussianProcessRegressionSklearn(kernel=kernel, rng=1)\n n, nlsd = 100, 0.5\n data = smlb.TabularData(data=np.ones(shape=(n, 1)) * 2, labels=np.ones(shape=n) * 3)\n data = smlb.LabelNoise(noise=smlb.NormalNoise(stddev=nlsd, rng=1)).fit(data).apply(data)\n preds = gpr.fit(data).apply(data)\n\n assert preds.has_signal_part and preds.has_noise_part\n conf, noise = preds.signal_part, preds.noise_part\n\n assert np.allclose(conf.mean, np.ones(n) * 3, atol=1e-1)\n assert np.allclose(conf.stddev, np.ones(n) * nlsd, atol=1e-1)\n\n assert (preds.mean == conf.mean).all()\n assert np.allclose(preds.stddev, np.sqrt(np.square(conf.stddev) + np.square(nlsd)), atol=1e-1)\n\n assert np.allclose(noise.mean, np.zeros(shape=n))\n assert np.allclose(noise.stddev, nlsd, atol=1e-1)\n"
] | [
[
"numpy.allclose",
"numpy.ones",
"numpy.zeros",
"numpy.sqrt",
"numpy.square",
"numpy.array"
]
] |
lmarti/pandas | [
"fdfd66cdf3f357fb52831eb644897e144a0d7f30"
] | [
"pandas/core/frame.py"
] | [
"\"\"\"\nDataFrame\n---------\nAn efficient 2D container for potentially mixed-type time series or other\nlabeled data series.\n\nSimilar to its R counterpart, data.frame, except providing automatic data\nalignment and a host of useful data manipulation methods having to do with the\nlabeling information\n\"\"\"\nfrom __future__ import division\n# pylint: disable=E1101,E1103\n# pylint: disable=W0212,W0231,W0703,W0622\n\nimport functools\nimport collections\nimport itertools\nimport sys\nimport types\nimport warnings\n\nfrom numpy import nan as NA\nimport numpy as np\nimport numpy.ma as ma\n\nfrom pandas.core.common import (isnull, notnull, PandasError, _try_sort,\n _default_index, _maybe_upcast, is_sequence,\n _infer_dtype_from_scalar, _values_from_object,\n is_list_like, _get_dtype, _maybe_box_datetimelike,\n is_categorical_dtype, is_object_dtype, _possibly_infer_to_datetimelike)\nfrom pandas.core.generic import NDFrame, _shared_docs\nfrom pandas.core.index import Index, MultiIndex, _ensure_index\nfrom pandas.core.indexing import (maybe_droplevels,\n convert_to_index_sliceable,\n check_bool_indexer)\nfrom pandas.core.internals import (BlockManager,\n create_block_manager_from_arrays,\n create_block_manager_from_blocks)\nfrom pandas.core.series import Series\nfrom pandas.core.categorical import Categorical\nimport pandas.computation.expressions as expressions\nfrom pandas.computation.eval import eval as _eval\nfrom numpy import percentile as _quantile\nfrom pandas.compat import(range, zip, lrange, lmap, lzip, StringIO, u,\n OrderedDict, raise_with_traceback)\nfrom pandas import compat\nfrom pandas.sparse.array import SparseArray\nfrom pandas.util.decorators import deprecate, Appender, Substitution, \\\n deprecate_kwarg\n\nfrom pandas.tseries.period import PeriodIndex\nfrom pandas.tseries.index import DatetimeIndex\n\nimport pandas.core.algorithms as algos\nimport pandas.core.common as com\nimport pandas.core.format as fmt\nimport pandas.core.nanops as nanops\nimport pandas.core.ops as ops\n\nimport pandas.lib as lib\nimport pandas.algos as _algos\n\nfrom pandas.core.config import get_option\n\n#----------------------------------------------------------------------\n# Docstring templates\n\n_shared_doc_kwargs = dict(axes='index, columns', klass='DataFrame',\n axes_single_arg=\"{0,1,'index','columns'}\")\n\n_numeric_only_doc = \"\"\"numeric_only : boolean, default None\n Include only float, int, boolean data. If None, will attempt to use\n everything, then use only numeric data\n\"\"\"\n\n_merge_doc = \"\"\"\nMerge DataFrame objects by performing a database-style join operation by\ncolumns or indexes.\n\nIf joining columns on columns, the DataFrame indexes *will be\nignored*. Otherwise if joining indexes on indexes or indexes on a column or\ncolumns, the index will be passed on.\n\nParameters\n----------%s\nright : DataFrame\nhow : {'left', 'right', 'outer', 'inner'}, default 'inner'\n * left: use only keys from left frame (SQL: left outer join)\n * right: use only keys from right frame (SQL: right outer join)\n * outer: use union of keys from both frames (SQL: full outer join)\n * inner: use intersection of keys from both frames (SQL: inner join)\non : label or list\n Field names to join on. Must be found in both DataFrames. If on is\n None and not merging on indexes, then it merges on the intersection of\n the columns by default.\nleft_on : label or list, or array-like\n Field names to join on in left DataFrame. Can be a vector or list of\n vectors of the length of the DataFrame to use a particular vector as\n the join key instead of columns\nright_on : label or list, or array-like\n Field names to join on in right DataFrame or vector/list of vectors per\n left_on docs\nleft_index : boolean, default False\n Use the index from the left DataFrame as the join key(s). If it is a\n MultiIndex, the number of keys in the other DataFrame (either the index\n or a number of columns) must match the number of levels\nright_index : boolean, default False\n Use the index from the right DataFrame as the join key. Same caveats as\n left_index\nsort : boolean, default False\n Sort the join keys lexicographically in the result DataFrame\nsuffixes : 2-length sequence (tuple, list, ...)\n Suffix to apply to overlapping column names in the left and right\n side, respectively\ncopy : boolean, default True\n If False, do not copy data unnecessarily\n\nExamples\n--------\n\n>>> A >>> B\n lkey value rkey value\n0 foo 1 0 foo 5\n1 bar 2 1 bar 6\n2 baz 3 2 qux 7\n3 foo 4 3 bar 8\n\n>>> merge(A, B, left_on='lkey', right_on='rkey', how='outer')\n lkey value_x rkey value_y\n0 foo 1 foo 5\n1 foo 4 foo 5\n2 bar 2 bar 6\n3 bar 2 bar 8\n4 baz 3 NaN NaN\n5 NaN NaN qux 7\n\nReturns\n-------\nmerged : DataFrame\n The output type will the be same as 'left', if it is a subclass\n of DataFrame.\n\"\"\"\n\n#----------------------------------------------------------------------\n# DataFrame class\n\n\nclass DataFrame(NDFrame):\n\n \"\"\" Two-dimensional size-mutable, potentially heterogeneous tabular data\n structure with labeled axes (rows and columns). Arithmetic operations\n align on both row and column labels. Can be thought of as a dict-like\n container for Series objects. The primary pandas data structure\n\n Parameters\n ----------\n data : numpy ndarray (structured or homogeneous), dict, or DataFrame\n Dict can contain Series, arrays, constants, or list-like objects\n index : Index or array-like\n Index to use for resulting frame. Will default to np.arange(n) if\n no indexing information part of input data and no index provided\n columns : Index or array-like\n Column labels to use for resulting frame. Will default to\n np.arange(n) if no column labels are provided\n dtype : dtype, default None\n Data type to force, otherwise infer\n copy : boolean, default False\n Copy data from inputs. Only affects DataFrame / 2d ndarray input\n\n Examples\n --------\n >>> d = {'col1': ts1, 'col2': ts2}\n >>> df = DataFrame(data=d, index=index)\n >>> df2 = DataFrame(np.random.randn(10, 5))\n >>> df3 = DataFrame(np.random.randn(10, 5),\n ... columns=['a', 'b', 'c', 'd', 'e'])\n\n See also\n --------\n DataFrame.from_records : constructor from tuples, also record arrays\n DataFrame.from_dict : from dicts of Series, arrays, or dicts\n DataFrame.from_csv : from CSV files\n DataFrame.from_items : from sequence of (key, value) pairs\n pandas.read_csv, pandas.read_table, pandas.read_clipboard\n \"\"\"\n _auto_consolidate = True\n\n @property\n def _constructor(self):\n return DataFrame\n\n _constructor_sliced = Series\n\n def __init__(self, data=None, index=None, columns=None, dtype=None,\n copy=False):\n if data is None:\n data = {}\n if dtype is not None:\n dtype = self._validate_dtype(dtype)\n\n if isinstance(data, DataFrame):\n data = data._data\n\n if isinstance(data, BlockManager):\n mgr = self._init_mgr(data, axes=dict(index=index, columns=columns),\n dtype=dtype, copy=copy)\n elif isinstance(data, dict):\n mgr = self._init_dict(data, index, columns, dtype=dtype)\n elif isinstance(data, ma.MaskedArray):\n import numpy.ma.mrecords as mrecords\n # masked recarray\n if isinstance(data, mrecords.MaskedRecords):\n mgr = _masked_rec_array_to_mgr(data, index, columns, dtype,\n copy)\n\n # a masked array\n else:\n mask = ma.getmaskarray(data)\n if mask.any():\n data, fill_value = _maybe_upcast(data, copy=True)\n data[mask] = fill_value\n else:\n data = data.copy()\n mgr = self._init_ndarray(data, index, columns, dtype=dtype,\n copy=copy)\n\n elif isinstance(data, (np.ndarray, Series, Index)):\n if data.dtype.names:\n data_columns = list(data.dtype.names)\n data = dict((k, data[k]) for k in data_columns)\n if columns is None:\n columns = data_columns\n mgr = self._init_dict(data, index, columns, dtype=dtype)\n elif getattr(data, 'name', None):\n mgr = self._init_dict({data.name: data}, index, columns,\n dtype=dtype)\n else:\n mgr = self._init_ndarray(data, index, columns, dtype=dtype,\n copy=copy)\n elif isinstance(data, (list, types.GeneratorType)):\n if isinstance(data, types.GeneratorType):\n data = list(data)\n if len(data) > 0:\n if is_list_like(data[0]) and getattr(data[0], 'ndim', 1) == 1:\n arrays, columns = _to_arrays(data, columns, dtype=dtype)\n columns = _ensure_index(columns)\n\n # set the index\n if index is None:\n if isinstance(data[0], Series):\n index = _get_names_from_index(data)\n elif isinstance(data[0], Categorical):\n index = _default_index(len(data[0]))\n else:\n index = _default_index(len(data))\n\n mgr = _arrays_to_mgr(arrays, columns, index, columns,\n dtype=dtype)\n else:\n mgr = self._init_ndarray(data, index, columns, dtype=dtype,\n copy=copy)\n else:\n mgr = self._init_ndarray(data, index, columns, dtype=dtype,\n copy=copy)\n elif isinstance(data, collections.Iterator):\n raise TypeError(\"data argument can't be an iterator\")\n else:\n try:\n arr = np.array(data, dtype=dtype, copy=copy)\n except (ValueError, TypeError) as e:\n exc = TypeError('DataFrame constructor called with '\n 'incompatible data and dtype: %s' % e)\n raise_with_traceback(exc)\n\n if arr.ndim == 0 and index is not None and columns is not None:\n if isinstance(data, compat.string_types) and dtype is None:\n dtype = np.object_\n if dtype is None:\n dtype, data = _infer_dtype_from_scalar(data)\n\n values = np.empty((len(index), len(columns)), dtype=dtype)\n values.fill(data)\n mgr = self._init_ndarray(values, index, columns, dtype=dtype,\n copy=False)\n else:\n raise PandasError('DataFrame constructor not properly called!')\n\n NDFrame.__init__(self, mgr, fastpath=True)\n\n def _init_dict(self, data, index, columns, dtype=None):\n \"\"\"\n Segregate Series based on type and coerce into matrices.\n Needs to handle a lot of exceptional cases.\n \"\"\"\n if columns is not None:\n columns = _ensure_index(columns)\n\n # prefilter if columns passed\n\n data = dict((k, v) for k, v in compat.iteritems(data)\n if k in columns)\n\n if index is None:\n index = extract_index(list(data.values()))\n else:\n index = _ensure_index(index)\n\n arrays = []\n data_names = []\n for k in columns:\n if k not in data:\n # no obvious \"empty\" int column\n if dtype is not None and issubclass(dtype.type,\n np.integer):\n continue\n\n if dtype is None:\n # 1783\n v = np.empty(len(index), dtype=object)\n else:\n v = np.empty(len(index), dtype=dtype)\n\n v.fill(NA)\n else:\n v = data[k]\n data_names.append(k)\n arrays.append(v)\n else:\n keys = list(data.keys())\n if not isinstance(data, OrderedDict):\n keys = _try_sort(keys)\n columns = data_names = Index(keys)\n arrays = [data[k] for k in keys]\n\n return _arrays_to_mgr(arrays, data_names, index, columns,\n dtype=dtype)\n\n def _init_ndarray(self, values, index, columns, dtype=None,\n copy=False):\n # input must be a ndarray, list, Series, index\n\n if isinstance(values, Series):\n if columns is None:\n if values.name is not None:\n columns = [values.name]\n if index is None:\n index = values.index\n else:\n values = values.reindex(index)\n\n # zero len case (GH #2234)\n if not len(values) and columns is not None and len(columns):\n values = np.empty((0, 1), dtype=object)\n\n # helper to create the axes as indexes\n def _get_axes(N, K, index=index, columns=columns):\n # return axes or defaults\n\n if index is None:\n index = _default_index(N)\n else:\n index = _ensure_index(index)\n\n if columns is None:\n columns = _default_index(K)\n else:\n columns = _ensure_index(columns)\n return index, columns\n\n # we could have a categorical type passed or coerced to 'category'\n # recast this to an _arrays_to_mgr\n if is_categorical_dtype(getattr(values,'dtype',None)) or is_categorical_dtype(dtype):\n\n if not hasattr(values,'dtype'):\n values = _prep_ndarray(values, copy=copy)\n values = values.ravel()\n elif copy:\n values = values.copy()\n\n index, columns = _get_axes(len(values),1)\n return _arrays_to_mgr([ values ], columns, index, columns,\n dtype=dtype)\n\n # by definition an array here\n # the dtypes will be coerced to a single dtype\n values = _prep_ndarray(values, copy=copy)\n\n if dtype is not None:\n\n if values.dtype != dtype:\n try:\n values = values.astype(dtype)\n except Exception as orig:\n e = ValueError(\"failed to cast to '%s' (Exception was: %s)\"\n % (dtype, orig))\n raise_with_traceback(e)\n\n index, columns = _get_axes(*values.shape)\n values = values.T\n\n # if we don't have a dtype specified, then try to convert objects\n # on the entire block; this is to convert if we have datetimelike's\n # embedded in an object type\n if dtype is None and is_object_dtype(values):\n values = _possibly_infer_to_datetimelike(values)\n\n return create_block_manager_from_blocks([values], [columns, index])\n\n @property\n def axes(self):\n return [self.index, self.columns]\n\n @property\n def shape(self):\n return (len(self.index), len(self.columns))\n\n def _repr_fits_vertical_(self):\n \"\"\"\n Check length against max_rows.\n \"\"\"\n max_rows = get_option(\"display.max_rows\")\n return len(self) <= max_rows\n\n def _repr_fits_horizontal_(self, ignore_width=False):\n \"\"\"\n Check if full repr fits in horizontal boundaries imposed by the display\n options width and max_columns. In case off non-interactive session, no\n boundaries apply.\n\n ignore_width is here so ipnb+HTML output can behave the way\n users expect. display.max_columns remains in effect.\n GH3541, GH3573\n \"\"\"\n\n width, height = fmt.get_console_size()\n max_columns = get_option(\"display.max_columns\")\n nb_columns = len(self.columns)\n\n # exceed max columns\n if ((max_columns and nb_columns > max_columns) or\n ((not ignore_width) and width and nb_columns > (width // 2))):\n return False\n\n if (ignore_width # used by repr_html under IPython notebook\n # scripts ignore terminal dims\n or not com.in_interactive_session()):\n return True\n\n if (get_option('display.width') is not None or\n com.in_ipython_frontend()):\n # check at least the column row for excessive width\n max_rows = 1\n else:\n max_rows = get_option(\"display.max_rows\")\n\n # when auto-detecting, so width=None and not in ipython front end\n # check whether repr fits horizontal by actualy checking\n # the width of the rendered repr\n buf = StringIO()\n\n # only care about the stuff we'll actually print out\n # and to_string on entire frame may be expensive\n d = self\n\n if not (max_rows is None): # unlimited rows\n # min of two, where one may be None\n d = d.iloc[:min(max_rows, len(d))]\n else:\n return True\n\n d.to_string(buf=buf)\n value = buf.getvalue()\n repr_width = max([len(l) for l in value.split('\\n')])\n\n return repr_width < width\n\n def _info_repr(self):\n \"\"\"True if the repr should show the info view.\"\"\"\n info_repr_option = (get_option(\"display.large_repr\") == \"info\")\n return info_repr_option and not (\n self._repr_fits_horizontal_() and self._repr_fits_vertical_()\n )\n\n def __unicode__(self):\n \"\"\"\n Return a string representation for a particular DataFrame\n\n Invoked by unicode(df) in py2 only. Yields a Unicode String in both\n py2/py3.\n \"\"\"\n buf = StringIO(u(\"\"))\n if self._info_repr():\n self.info(buf=buf)\n return buf.getvalue()\n\n max_rows = get_option(\"display.max_rows\")\n max_cols = get_option(\"display.max_columns\")\n show_dimensions = get_option(\"display.show_dimensions\")\n if get_option(\"display.expand_frame_repr\"):\n width, _ = fmt.get_console_size()\n else:\n width = None\n self.to_string(buf=buf, max_rows=max_rows, max_cols=max_cols,\n line_width=width, show_dimensions=show_dimensions)\n\n return buf.getvalue()\n\n def _repr_html_(self):\n \"\"\"\n Return a html representation for a particular DataFrame.\n Mainly for IPython notebook.\n \"\"\"\n # qtconsole doesn't report its line width, and also\n # behaves badly when outputting an HTML table\n # that doesn't fit the window, so disable it.\n # XXX: In IPython 3.x and above, the Qt console will not attempt to\n # display HTML, so this check can be removed when support for IPython 2.x\n # is no longer needed.\n if com.in_qtconsole():\n # 'HTML output is disabled in QtConsole'\n return None\n\n if self._info_repr():\n buf = StringIO(u(\"\"))\n self.info(buf=buf)\n # need to escape the <class>, should be the first line.\n val = buf.getvalue().replace('<', r'<', 1).replace('>',\n r'>', 1)\n return '<pre>' + val + '</pre>'\n\n if get_option(\"display.notebook_repr_html\"):\n max_rows = get_option(\"display.max_rows\")\n max_cols = get_option(\"display.max_columns\")\n show_dimensions = get_option(\"display.show_dimensions\")\n\n return ('<div style=\"max-height:1000px;'\n 'max-width:1500px;overflow:auto;\">\\n' +\n self.to_html(max_rows=max_rows, max_cols=max_cols,\n show_dimensions=show_dimensions) + '\\n</div>')\n else:\n return None\n\n def iteritems(self):\n \"\"\"Iterator over (column, series) pairs\"\"\"\n if self.columns.is_unique and hasattr(self, '_item_cache'):\n for k in self.columns:\n yield k, self._get_item_cache(k)\n else:\n for i, k in enumerate(self.columns):\n yield k, self.icol(i)\n\n def iterrows(self):\n \"\"\"\n Iterate over rows of DataFrame as (index, Series) pairs.\n\n Notes\n -----\n\n * ``iterrows`` does **not** preserve dtypes across the rows (dtypes\n are preserved across columns for DataFrames). For example,\n\n >>> df = DataFrame([[1, 1.0]], columns=['x', 'y'])\n >>> row = next(df.iterrows())[1]\n >>> print(row['x'].dtype)\n float64\n >>> print(df['x'].dtype)\n int64\n\n Returns\n -------\n it : generator\n A generator that iterates over the rows of the frame.\n \"\"\"\n columns = self.columns\n for k, v in zip(self.index, self.values):\n s = Series(v, index=columns, name=k)\n yield k, s\n\n def itertuples(self, index=True):\n \"\"\"\n Iterate over rows of DataFrame as tuples, with index value\n as first element of the tuple\n \"\"\"\n arrays = []\n if index:\n arrays.append(self.index)\n\n # use integer indexing because of possible duplicate column names\n arrays.extend(self.iloc[:, k] for k in range(len(self.columns)))\n return zip(*arrays)\n\n if compat.PY3: # pragma: no cover\n items = iteritems\n\n def __len__(self):\n \"\"\"Returns length of info axis, but here we use the index \"\"\"\n return len(self.index)\n\n def dot(self, other):\n \"\"\"\n Matrix multiplication with DataFrame or Series objects\n\n Parameters\n ----------\n other : DataFrame or Series\n\n Returns\n -------\n dot_product : DataFrame or Series\n \"\"\"\n if isinstance(other, (Series, DataFrame)):\n common = self.columns.union(other.index)\n if (len(common) > len(self.columns) or\n len(common) > len(other.index)):\n raise ValueError('matrices are not aligned')\n\n left = self.reindex(columns=common, copy=False)\n right = other.reindex(index=common, copy=False)\n lvals = left.values\n rvals = right.values\n else:\n left = self\n lvals = self.values\n rvals = np.asarray(other)\n if lvals.shape[1] != rvals.shape[0]:\n raise ValueError('Dot product shape mismatch, %s vs %s' %\n (lvals.shape, rvals.shape))\n\n if isinstance(other, DataFrame):\n return self._constructor(np.dot(lvals, rvals),\n index=left.index,\n columns=other.columns)\n elif isinstance(other, Series):\n return Series(np.dot(lvals, rvals), index=left.index)\n elif isinstance(rvals, (np.ndarray, Index)):\n result = np.dot(lvals, rvals)\n if result.ndim == 2:\n return self._constructor(result, index=left.index)\n else:\n return Series(result, index=left.index)\n else: # pragma: no cover\n raise TypeError('unsupported type: %s' % type(other))\n\n #----------------------------------------------------------------------\n # IO methods (to / from other formats)\n\n @classmethod\n def from_dict(cls, data, orient='columns', dtype=None):\n \"\"\"\n Construct DataFrame from dict of array-like or dicts\n\n Parameters\n ----------\n data : dict\n {field : array-like} or {field : dict}\n orient : {'columns', 'index'}, default 'columns'\n The \"orientation\" of the data. If the keys of the passed dict\n should be the columns of the resulting DataFrame, pass 'columns'\n (default). Otherwise if the keys should be rows, pass 'index'.\n\n Returns\n -------\n DataFrame\n \"\"\"\n index, columns = None, None\n orient = orient.lower()\n if orient == 'index':\n if len(data) > 0:\n # TODO speed up Series case\n if isinstance(list(data.values())[0], (Series, dict)):\n data = _from_nested_dict(data)\n else:\n data, index = list(data.values()), list(data.keys())\n elif orient != 'columns': # pragma: no cover\n raise ValueError('only recognize index or columns for orient')\n\n return cls(data, index=index, columns=columns, dtype=dtype)\n\n @deprecate_kwarg(old_arg_name='outtype', new_arg_name='orient')\n def to_dict(self, orient='dict'):\n \"\"\"Convert DataFrame to dictionary.\n\n Parameters\n ----------\n orient : str {'dict', 'list', 'series', 'split', 'records'}\n Determines the type of the values of the dictionary.\n\n - dict (default) : dict like {column -> {index -> value}}\n - list : dict like {column -> [values]}\n - series : dict like {column -> Series(values)}\n - split : dict like\n {index -> [index], columns -> [columns], data -> [values]}\n - records : list like\n [{column -> value}, ... , {column -> value}]\n\n Abbreviations are allowed. `s` indicates `series` and `sp`\n indicates `split`.\n\n Returns\n -------\n result : dict like {column -> {index -> value}}\n \"\"\"\n if not self.columns.is_unique:\n warnings.warn(\"DataFrame columns are not unique, some \"\n \"columns will be omitted.\", UserWarning)\n if orient.lower().startswith('d'):\n return dict((k, v.to_dict()) for k, v in compat.iteritems(self))\n elif orient.lower().startswith('l'):\n return dict((k, v.tolist()) for k, v in compat.iteritems(self))\n elif orient.lower().startswith('sp'):\n return {'index': self.index.tolist(),\n 'columns': self.columns.tolist(),\n 'data': self.values.tolist()}\n elif orient.lower().startswith('s'):\n return dict((k, v) for k, v in compat.iteritems(self))\n elif orient.lower().startswith('r'):\n return [dict((k, v) for k, v in zip(self.columns, row))\n for row in self.values]\n else:\n raise ValueError(\"orient '%s' not understood\" % orient)\n\n def to_gbq(self, destination_table, project_id=None, chunksize=10000,\n verbose=True, reauth=False):\n \"\"\"Write a DataFrame to a Google BigQuery table.\n\n THIS IS AN EXPERIMENTAL LIBRARY\n\n If the table exists, the dataframe will be written to the table using\n the defined table schema and column types. For simplicity, this method\n uses the Google BigQuery streaming API. The to_gbq method chunks data\n into a default chunk size of 10,000. Failures return the complete error\n response which can be quite long depending on the size of the insert.\n There are several important limitations of the Google streaming API\n which are detailed at:\n https://developers.google.com/bigquery/streaming-data-into-bigquery.\n\n Parameters\n ----------\n dataframe : DataFrame\n DataFrame to be written\n destination_table : string\n Name of table to be written, in the form 'dataset.tablename'\n project_id : str\n Google BigQuery Account project ID.\n chunksize : int (default 10000)\n Number of rows to be inserted in each chunk from the dataframe.\n verbose : boolean (default True)\n Show percentage complete\n reauth : boolean (default False)\n Force Google BigQuery to reauthenticate the user. This is useful\n if multiple accounts are used.\n\n \"\"\"\n\n from pandas.io import gbq\n return gbq.to_gbq(self, destination_table, project_id=project_id,\n chunksize=chunksize, verbose=verbose,\n reauth=reauth)\n\n @classmethod\n def from_records(cls, data, index=None, exclude=None, columns=None,\n coerce_float=False, nrows=None):\n \"\"\"\n Convert structured or record ndarray to DataFrame\n\n Parameters\n ----------\n data : ndarray (structured dtype), list of tuples, dict, or DataFrame\n index : string, list of fields, array-like\n Field of array to use as the index, alternately a specific set of\n input labels to use\n exclude : sequence, default None\n Columns or fields to exclude\n columns : sequence, default None\n Column names to use. If the passed data do not have names\n associated with them, this argument provides names for the\n columns. Otherwise this argument indicates the order of the columns\n in the result (any names not found in the data will become all-NA\n columns)\n coerce_float : boolean, default False\n Attempt to convert values to non-string, non-numeric objects (like\n decimal.Decimal) to floating point, useful for SQL result sets\n\n Returns\n -------\n df : DataFrame\n \"\"\"\n # Make a copy of the input columns so we can modify it\n if columns is not None:\n columns = _ensure_index(columns)\n\n if com.is_iterator(data):\n if nrows == 0:\n return cls()\n\n try:\n if compat.PY3:\n first_row = next(data)\n else:\n first_row = next(data)\n except StopIteration:\n return cls(index=index, columns=columns)\n\n dtype = None\n if hasattr(first_row, 'dtype') and first_row.dtype.names:\n dtype = first_row.dtype\n\n values = [first_row]\n\n if nrows is None:\n values += data\n else:\n values.extend(itertools.islice(data, nrows - 1))\n\n if dtype is not None:\n data = np.array(values, dtype=dtype)\n else:\n data = values\n\n if isinstance(data, dict):\n if columns is None:\n columns = arr_columns = _ensure_index(sorted(data))\n arrays = [data[k] for k in columns]\n else:\n arrays = []\n arr_columns = []\n for k, v in compat.iteritems(data):\n if k in columns:\n arr_columns.append(k)\n arrays.append(v)\n\n arrays, arr_columns = _reorder_arrays(arrays, arr_columns,\n columns)\n\n elif isinstance(data, (np.ndarray, DataFrame)):\n arrays, columns = _to_arrays(data, columns)\n if columns is not None:\n columns = _ensure_index(columns)\n arr_columns = columns\n else:\n arrays, arr_columns = _to_arrays(data, columns,\n coerce_float=coerce_float)\n\n arr_columns = _ensure_index(arr_columns)\n if columns is not None:\n columns = _ensure_index(columns)\n else:\n columns = arr_columns\n\n if exclude is None:\n exclude = set()\n else:\n exclude = set(exclude)\n\n result_index = None\n if index is not None:\n if (isinstance(index, compat.string_types) or\n not hasattr(index, \"__iter__\")):\n i = columns.get_loc(index)\n exclude.add(index)\n if len(arrays) > 0:\n result_index = Index(arrays[i], name=index)\n else:\n result_index = Index([], name=index)\n else:\n try:\n to_remove = [arr_columns.get_loc(field) for field in index]\n\n result_index = MultiIndex.from_arrays(\n [arrays[i] for i in to_remove], names=index)\n\n exclude.update(index)\n except Exception:\n result_index = index\n\n if any(exclude):\n arr_exclude = [x for x in exclude if x in arr_columns]\n to_remove = [arr_columns.get_loc(col) for col in arr_exclude]\n arrays = [v for i, v in enumerate(arrays) if i not in to_remove]\n\n arr_columns = arr_columns.drop(arr_exclude)\n columns = columns.drop(exclude)\n\n mgr = _arrays_to_mgr(arrays, arr_columns, result_index,\n columns)\n\n return cls(mgr)\n\n def to_records(self, index=True, convert_datetime64=True):\n \"\"\"\n Convert DataFrame to record array. Index will be put in the\n 'index' field of the record array if requested\n\n Parameters\n ----------\n index : boolean, default True\n Include index in resulting record array, stored in 'index' field\n convert_datetime64 : boolean, default True\n Whether to convert the index to datetime.datetime if it is a\n DatetimeIndex\n\n Returns\n -------\n y : recarray\n \"\"\"\n if index:\n if com.is_datetime64_dtype(self.index) and convert_datetime64:\n ix_vals = [self.index.to_pydatetime()]\n else:\n if isinstance(self.index, MultiIndex):\n # array of tuples to numpy cols. copy copy copy\n ix_vals = lmap(np.array, zip(*self.index.values))\n else:\n ix_vals = [self.index.values]\n\n arrays = ix_vals + [self[c].get_values() for c in self.columns]\n\n count = 0\n index_names = list(self.index.names)\n if isinstance(self.index, MultiIndex):\n for i, n in enumerate(index_names):\n if n is None:\n index_names[i] = 'level_%d' % count\n count += 1\n elif index_names[0] is None:\n index_names = ['index']\n names = index_names + lmap(str, self.columns)\n else:\n arrays = [self[c].get_values() for c in self.columns]\n names = lmap(str, self.columns)\n\n dtype = np.dtype([(x, v.dtype) for x, v in zip(names, arrays)])\n return np.rec.fromarrays(arrays, dtype=dtype, names=names)\n\n @classmethod\n def from_items(cls, items, columns=None, orient='columns'):\n \"\"\"\n Convert (key, value) pairs to DataFrame. The keys will be the axis\n index (usually the columns, but depends on the specified\n orientation). The values should be arrays or Series.\n\n Parameters\n ----------\n items : sequence of (key, value) pairs\n Values should be arrays or Series.\n columns : sequence of column labels, optional\n Must be passed if orient='index'.\n orient : {'columns', 'index'}, default 'columns'\n The \"orientation\" of the data. If the keys of the\n input correspond to column labels, pass 'columns'\n (default). Otherwise if the keys correspond to the index,\n pass 'index'.\n\n Returns\n -------\n frame : DataFrame\n \"\"\"\n keys, values = lzip(*items)\n\n if orient == 'columns':\n if columns is not None:\n columns = _ensure_index(columns)\n\n idict = dict(items)\n if len(idict) < len(items):\n if not columns.equals(_ensure_index(keys)):\n raise ValueError('With non-unique item names, passed '\n 'columns must be identical')\n arrays = values\n else:\n arrays = [idict[k] for k in columns if k in idict]\n else:\n columns = _ensure_index(keys)\n arrays = values\n\n return cls._from_arrays(arrays, columns, None)\n elif orient == 'index':\n if columns is None:\n raise TypeError(\"Must pass columns with orient='index'\")\n\n keys = _ensure_index(keys)\n\n arr = np.array(values, dtype=object).T\n data = [lib.maybe_convert_objects(v) for v in arr]\n return cls._from_arrays(data, columns, keys)\n else: # pragma: no cover\n raise ValueError(\"'orient' must be either 'columns' or 'index'\")\n\n @classmethod\n def _from_arrays(cls, arrays, columns, index, dtype=None):\n mgr = _arrays_to_mgr(arrays, columns, index, columns, dtype=dtype)\n return cls(mgr)\n\n @classmethod\n def from_csv(cls, path, header=0, sep=',', index_col=0,\n parse_dates=True, encoding=None, tupleize_cols=False,\n infer_datetime_format=False):\n \"\"\"\n Read delimited file into DataFrame\n\n Parameters\n ----------\n path : string file path or file handle / StringIO\n header : int, default 0\n Row to use at header (skip prior rows)\n sep : string, default ','\n Field delimiter\n index_col : int or sequence, default 0\n Column to use for index. If a sequence is given, a MultiIndex\n is used. Different default from read_table\n parse_dates : boolean, default True\n Parse dates. Different default from read_table\n tupleize_cols : boolean, default False\n write multi_index columns as a list of tuples (if True)\n or new (expanded format) if False)\n infer_datetime_format: boolean, default False\n If True and `parse_dates` is True for a column, try to infer the\n datetime format based on the first datetime string. If the format\n can be inferred, there often will be a large parsing speed-up.\n\n Notes\n -----\n Preferable to use read_table for most general purposes but from_csv\n makes for an easy roundtrip to and from file, especially with a\n DataFrame of time series data\n\n Returns\n -------\n y : DataFrame\n \"\"\"\n from pandas.io.parsers import read_table\n return read_table(path, header=header, sep=sep,\n parse_dates=parse_dates, index_col=index_col,\n encoding=encoding, tupleize_cols=tupleize_cols,\n infer_datetime_format=infer_datetime_format)\n\n def to_sparse(self, fill_value=None, kind='block'):\n \"\"\"\n Convert to SparseDataFrame\n\n Parameters\n ----------\n fill_value : float, default NaN\n kind : {'block', 'integer'}\n\n Returns\n -------\n y : SparseDataFrame\n \"\"\"\n from pandas.core.sparse import SparseDataFrame\n return SparseDataFrame(self._series, index=self.index,\n default_kind=kind,\n default_fill_value=fill_value)\n\n def to_panel(self):\n \"\"\"\n Transform long (stacked) format (DataFrame) into wide (3D, Panel)\n format.\n\n Currently the index of the DataFrame must be a 2-level MultiIndex. This\n may be generalized later\n\n Returns\n -------\n panel : Panel\n \"\"\"\n from pandas.core.panel import Panel\n\n # only support this kind for now\n if (not isinstance(self.index, MultiIndex) or # pragma: no cover\n len(self.index.levels) != 2):\n raise NotImplementedError('Only 2-level MultiIndex are supported.')\n\n if not self.index.is_unique:\n raise ValueError(\"Can't convert non-uniquely indexed \"\n \"DataFrame to Panel\")\n\n self._consolidate_inplace()\n\n # minor axis must be sorted\n if self.index.lexsort_depth < 2:\n selfsorted = self.sortlevel(0)\n else:\n selfsorted = self\n\n major_axis, minor_axis = selfsorted.index.levels\n major_labels, minor_labels = selfsorted.index.labels\n shape = len(major_axis), len(minor_axis)\n\n # preserve names, if any\n major_axis = major_axis.copy()\n major_axis.name = self.index.names[0]\n\n minor_axis = minor_axis.copy()\n minor_axis.name = self.index.names[1]\n\n # create new axes\n new_axes = [selfsorted.columns, major_axis, minor_axis]\n\n # create new manager\n new_mgr = selfsorted._data.reshape_nd(axes=new_axes,\n labels=[major_labels, minor_labels],\n shape=shape,\n ref_items=selfsorted.columns)\n\n return Panel(new_mgr)\n\n to_wide = deprecate('to_wide', to_panel)\n\n def to_csv(self, path_or_buf=None, sep=\",\", na_rep='', float_format=None,\n columns=None, header=True, index=True, index_label=None,\n mode='w', encoding=None, quoting=None,\n quotechar='\"', line_terminator='\\n', chunksize=None,\n tupleize_cols=False, date_format=None, doublequote=True,\n escapechar=None, decimal='.', **kwds):\n r\"\"\"Write DataFrame to a comma-separated values (csv) file\n\n Parameters\n ----------\n path_or_buf : string or file handle, default None\n File path or object, if None is provided the result is returned as\n a string.\n sep : character, default \",\"\n Field delimiter for the output file.\n na_rep : string, default ''\n Missing data representation\n float_format : string, default None\n Format string for floating point numbers\n columns : sequence, optional\n Columns to write\n header : boolean or list of string, default True\n Write out column names. If a list of string is given it is assumed\n to be aliases for the column names\n index : boolean, default True\n Write row names (index)\n index_label : string or sequence, or False, default None\n Column label for index column(s) if desired. If None is given, and\n `header` and `index` are True, then the index names are used. A\n sequence should be given if the DataFrame uses MultiIndex. If\n False do not print fields for index names. Use index_label=False\n for easier importing in R\n nanRep : None\n deprecated, use na_rep\n mode : str\n Python write mode, default 'w'\n encoding : string, optional\n A string representing the encoding to use in the output file,\n defaults to 'ascii' on Python 2 and 'utf-8' on Python 3.\n line_terminator : string, default '\\\\n'\n The newline character or character sequence to use in the output\n file\n quoting : optional constant from csv module\n defaults to csv.QUOTE_MINIMAL\n quotechar : string (length 1), default '\"'\n character used to quote fields\n doublequote : boolean, default True\n Control quoting of `quotechar` inside a field\n escapechar : string (length 1), default None\n character used to escape `sep` and `quotechar` when appropriate\n chunksize : int or None\n rows to write at a time\n tupleize_cols : boolean, default False\n write multi_index columns as a list of tuples (if True)\n or new (expanded format) if False)\n date_format : string, default None\n Format string for datetime objects\n decimal: string, default '.'\n Character recognized as decimal separator. E.g. use ',' for European data\n \"\"\"\n\n formatter = fmt.CSVFormatter(self, path_or_buf,\n line_terminator=line_terminator,\n sep=sep, encoding=encoding,\n quoting=quoting, na_rep=na_rep,\n float_format=float_format, cols=columns,\n header=header, index=index,\n index_label=index_label, mode=mode,\n chunksize=chunksize, quotechar=quotechar,\n engine=kwds.get(\"engine\"),\n tupleize_cols=tupleize_cols,\n date_format=date_format,\n doublequote=doublequote,\n escapechar=escapechar,\n decimal=decimal)\n formatter.save()\n\n if path_or_buf is None:\n return formatter.path_or_buf.getvalue()\n\n def to_excel(self, excel_writer, sheet_name='Sheet1', na_rep='',\n float_format=None, columns=None, header=True, index=True,\n index_label=None, startrow=0, startcol=0, engine=None,\n merge_cells=True, encoding=None, inf_rep='inf'):\n \"\"\"\n Write DataFrame to a excel sheet\n\n Parameters\n ----------\n excel_writer : string or ExcelWriter object\n File path or existing ExcelWriter\n sheet_name : string, default 'Sheet1'\n Name of sheet which will contain DataFrame\n na_rep : string, default ''\n Missing data representation\n float_format : string, default None\n Format string for floating point numbers\n columns : sequence, optional\n Columns to write\n header : boolean or list of string, default True\n Write out column names. If a list of string is given it is\n assumed to be aliases for the column names\n index : boolean, default True\n Write row names (index)\n index_label : string or sequence, default None\n Column label for index column(s) if desired. If None is given, and\n `header` and `index` are True, then the index names are used. A\n sequence should be given if the DataFrame uses MultiIndex.\n startrow :\n upper left cell row to dump data frame\n startcol :\n upper left cell column to dump data frame\n engine : string, default None\n write engine to use - you can also set this via the options\n ``io.excel.xlsx.writer``, ``io.excel.xls.writer``, and\n ``io.excel.xlsm.writer``.\n merge_cells : boolean, default True\n Write MultiIndex and Hierarchical Rows as merged cells.\n encoding: string, default None\n encoding of the resulting excel file. Only necessary for xlwt,\n other writers support unicode natively.\n inf_rep : string, default 'inf'\n Representation for infinity (there is no native representation for\n infinity in Excel)\n\n Notes\n -----\n If passing an existing ExcelWriter object, then the sheet will be added\n to the existing workbook. This can be used to save different\n DataFrames to one workbook:\n\n >>> writer = ExcelWriter('output.xlsx')\n >>> df1.to_excel(writer,'Sheet1')\n >>> df2.to_excel(writer,'Sheet2')\n >>> writer.save()\n \"\"\"\n from pandas.io.excel import ExcelWriter\n\n need_save = False\n if encoding == None:\n encoding = 'ascii'\n\n if isinstance(excel_writer, compat.string_types):\n excel_writer = ExcelWriter(excel_writer, engine=engine)\n need_save = True\n\n formatter = fmt.ExcelFormatter(self,\n na_rep=na_rep,\n cols=columns,\n header=header,\n float_format=float_format,\n index=index,\n index_label=index_label,\n merge_cells=merge_cells,\n inf_rep=inf_rep)\n formatted_cells = formatter.get_formatted_cells()\n excel_writer.write_cells(formatted_cells, sheet_name,\n startrow=startrow, startcol=startcol)\n if need_save:\n excel_writer.save()\n\n def to_stata(\n self, fname, convert_dates=None, write_index=True, encoding=\"latin-1\",\n byteorder=None, time_stamp=None, data_label=None):\n \"\"\"\n A class for writing Stata binary dta files from array-like objects\n\n Parameters\n ----------\n fname : file path or buffer\n Where to save the dta file.\n convert_dates : dict\n Dictionary mapping column of datetime types to the stata internal\n format that you want to use for the dates. Options are\n 'tc', 'td', 'tm', 'tw', 'th', 'tq', 'ty'. Column can be either a\n number or a name.\n encoding : str\n Default is latin-1. Note that Stata does not support unicode.\n byteorder : str\n Can be \">\", \"<\", \"little\", or \"big\". The default is None which uses\n `sys.byteorder`\n\n Examples\n --------\n >>> writer = StataWriter('./data_file.dta', data)\n >>> writer.write_file()\n\n Or with dates\n\n >>> writer = StataWriter('./date_data_file.dta', data, {2 : 'tw'})\n >>> writer.write_file()\n \"\"\"\n from pandas.io.stata import StataWriter\n writer = StataWriter(fname, self, convert_dates=convert_dates,\n encoding=encoding, byteorder=byteorder,\n time_stamp=time_stamp, data_label=data_label,\n write_index=write_index)\n writer.write_file()\n\n @Appender(fmt.docstring_to_string, indents=1)\n def to_string(self, buf=None, columns=None, col_space=None, colSpace=None,\n header=True, index=True, na_rep='NaN', formatters=None,\n float_format=None, sparsify=None, index_names=True,\n justify=None, line_width=None, max_rows=None, max_cols=None,\n show_dimensions=False):\n \"\"\"\n Render a DataFrame to a console-friendly tabular output.\n \"\"\"\n\n if colSpace is not None: # pragma: no cover\n warnings.warn(\"colSpace is deprecated, use col_space\",\n FutureWarning)\n col_space = colSpace\n\n formatter = fmt.DataFrameFormatter(self, buf=buf, columns=columns,\n col_space=col_space, na_rep=na_rep,\n formatters=formatters,\n float_format=float_format,\n sparsify=sparsify,\n justify=justify,\n index_names=index_names,\n header=header, index=index,\n line_width=line_width,\n max_rows=max_rows,\n max_cols=max_cols,\n show_dimensions=show_dimensions)\n formatter.to_string()\n\n if buf is None:\n result = formatter.buf.getvalue()\n return result\n\n @Appender(fmt.docstring_to_string, indents=1)\n def to_html(self, buf=None, columns=None, col_space=None, colSpace=None,\n header=True, index=True, na_rep='NaN', formatters=None,\n float_format=None, sparsify=None, index_names=True,\n justify=None, bold_rows=True, classes=None, escape=True,\n max_rows=None, max_cols=None, show_dimensions=False):\n \"\"\"\n Render a DataFrame as an HTML table.\n\n `to_html`-specific options:\n\n bold_rows : boolean, default True\n Make the row labels bold in the output\n classes : str or list or tuple, default None\n CSS class(es) to apply to the resulting html table\n escape : boolean, default True\n Convert the characters <, >, and & to HTML-safe sequences.=\n max_rows : int, optional\n Maximum number of rows to show before truncating. If None, show\n all.\n max_cols : int, optional\n Maximum number of columns to show before truncating. If None, show\n all.\n\n \"\"\"\n\n if colSpace is not None: # pragma: no cover\n warnings.warn(\"colSpace is deprecated, use col_space\",\n FutureWarning)\n col_space = colSpace\n\n formatter = fmt.DataFrameFormatter(self, buf=buf, columns=columns,\n col_space=col_space, na_rep=na_rep,\n formatters=formatters,\n float_format=float_format,\n sparsify=sparsify,\n justify=justify,\n index_names=index_names,\n header=header, index=index,\n bold_rows=bold_rows,\n escape=escape,\n max_rows=max_rows,\n max_cols=max_cols,\n show_dimensions=show_dimensions)\n formatter.to_html(classes=classes)\n\n if buf is None:\n return formatter.buf.getvalue()\n\n @Appender(fmt.docstring_to_string, indents=1)\n def to_latex(self, buf=None, columns=None, col_space=None, colSpace=None,\n header=True, index=True, na_rep='NaN', formatters=None,\n float_format=None, sparsify=None, index_names=True,\n bold_rows=True, longtable=False, escape=True):\n \"\"\"\n Render a DataFrame to a tabular environment table. You can splice\n this into a LaTeX document. Requires \\\\usepackage{booktabs}.\n\n `to_latex`-specific options:\n\n bold_rows : boolean, default True\n Make the row labels bold in the output\n longtable : boolean, default False\n Use a longtable environment instead of tabular. Requires adding\n a \\\\usepackage{longtable} to your LaTeX preamble.\n escape : boolean, default True\n When set to False prevents from escaping latex special\n characters in column names.\n\n \"\"\"\n\n if colSpace is not None: # pragma: no cover\n warnings.warn(\"colSpace is deprecated, use col_space\",\n FutureWarning)\n col_space = colSpace\n\n formatter = fmt.DataFrameFormatter(self, buf=buf, columns=columns,\n col_space=col_space, na_rep=na_rep,\n header=header, index=index,\n formatters=formatters,\n float_format=float_format,\n bold_rows=bold_rows,\n sparsify=sparsify,\n index_names=index_names,\n escape=escape)\n formatter.to_latex(longtable=longtable)\n\n if buf is None:\n return formatter.buf.getvalue()\n\n def info(self, verbose=None, buf=None, max_cols=None, memory_usage=None, null_counts=None):\n \"\"\"\n Concise summary of a DataFrame.\n\n Parameters\n ----------\n verbose : {None, True, False}, optional\n Whether to print the full summary.\n None follows the `display.max_info_columns` setting.\n True or False overrides the `display.max_info_columns` setting.\n buf : writable buffer, defaults to sys.stdout\n max_cols : int, default None\n Determines whether full summary or short summary is printed.\n None follows the `display.max_info_columns` setting.\n memory_usage : boolean, default None\n Specifies whether total memory usage of the DataFrame\n elements (including index) should be displayed. None follows\n the `display.memory_usage` setting. True or False overrides\n the `display.memory_usage` setting. Memory usage is shown in\n human-readable units (base-2 representation).\n null_counts : boolean, default None\n Whether to show the non-null counts\n If None, then only show if the frame is smaller than max_info_rows and max_info_columns.\n If True, always show counts.\n If False, never show counts.\n\n \"\"\"\n from pandas.core.format import _put_lines\n\n if buf is None: # pragma: no cover\n buf = sys.stdout\n\n lines = []\n\n lines.append(str(type(self)))\n lines.append(self.index.summary())\n\n if len(self.columns) == 0:\n lines.append('Empty %s' % type(self).__name__)\n _put_lines(buf, lines)\n return\n\n cols = self.columns\n\n # hack\n if max_cols is None:\n max_cols = get_option(\n 'display.max_info_columns', len(self.columns) + 1)\n\n max_rows = get_option('display.max_info_rows', len(self) + 1)\n\n if null_counts is None:\n show_counts = ((len(self.columns) <= max_cols) and\n (len(self) < max_rows))\n else:\n show_counts = null_counts\n exceeds_info_cols = len(self.columns) > max_cols\n\n def _verbose_repr():\n lines.append('Data columns (total %d columns):' %\n len(self.columns))\n space = max([len(com.pprint_thing(k)) for k in self.columns]) + 4\n counts = None\n\n tmpl = \"%s%s\"\n if show_counts:\n counts = self.count()\n if len(cols) != len(counts): # pragma: no cover\n raise AssertionError('Columns must equal counts (%d != %d)' %\n (len(cols), len(counts)))\n tmpl = \"%s non-null %s\"\n\n dtypes = self.dtypes\n for i, col in enumerate(self.columns):\n dtype = dtypes[col]\n col = com.pprint_thing(col)\n\n count = \"\"\n if show_counts:\n count = counts.iloc[i]\n\n lines.append(_put_str(col, space) +\n tmpl % (count, dtype))\n\n def _non_verbose_repr():\n lines.append(self.columns.summary(name='Columns'))\n\n def _sizeof_fmt(num, size_qualifier):\n # returns size in human readable format\n for x in ['bytes', 'KB', 'MB', 'GB', 'TB']:\n if num < 1024.0:\n return \"%3.1f%s %s\" % (num, size_qualifier, x)\n num /= 1024.0\n return \"%3.1f%s %s\" % (num, size_qualifier, 'PB')\n\n if verbose:\n _verbose_repr()\n elif verbose is False: # specifically set to False, not nesc None\n _non_verbose_repr()\n else:\n if exceeds_info_cols:\n _non_verbose_repr()\n else:\n _verbose_repr()\n\n counts = self.get_dtype_counts()\n dtypes = ['%s(%d)' % k for k in sorted(compat.iteritems(counts))]\n lines.append('dtypes: %s' % ', '.join(dtypes))\n if memory_usage is None:\n memory_usage = get_option('display.memory_usage')\n if memory_usage: # append memory usage of df to display\n # size_qualifier is just a best effort; not guaranteed to catch all\n # cases (e.g., it misses categorical data even with object\n # categories)\n size_qualifier = ('+' if 'object' in counts\n or is_object_dtype(self.index) else '')\n mem_usage = self.memory_usage(index=True).sum()\n lines.append(\"memory usage: %s\\n\" %\n _sizeof_fmt(mem_usage, size_qualifier))\n _put_lines(buf, lines)\n\n def memory_usage(self, index=False):\n \"\"\"Memory usage of DataFrame columns.\n\n Parameters\n ----------\n index : bool\n Specifies whether to include memory usage of DataFrame's\n index in returned Series. If `index=True` (default is False)\n the first index of the Series is `Index`.\n\n Returns\n -------\n sizes : Series\n A series with column names as index and memory usage of\n columns with units of bytes.\n\n Notes\n -----\n Memory usage does not include memory consumed by elements that\n are not components of the array.\n\n See Also\n --------\n numpy.ndarray.nbytes\n \"\"\"\n result = Series([ c.values.nbytes for col, c in self.iteritems() ],\n index=self.columns)\n if index:\n result = Series(self.index.nbytes,\n index=['Index']).append(result)\n return result\n\n def transpose(self):\n \"\"\"Transpose index and columns\"\"\"\n return super(DataFrame, self).transpose(1, 0)\n\n T = property(transpose)\n\n #----------------------------------------------------------------------\n # Picklability\n\n # legacy pickle formats\n def _unpickle_frame_compat(self, state): # pragma: no cover\n from pandas.core.common import _unpickle_array\n if len(state) == 2: # pragma: no cover\n series, idx = state\n columns = sorted(series)\n else:\n series, cols, idx = state\n columns = _unpickle_array(cols)\n\n index = _unpickle_array(idx)\n self._data = self._init_dict(series, index, columns, None)\n\n def _unpickle_matrix_compat(self, state): # pragma: no cover\n from pandas.core.common import _unpickle_array\n # old unpickling\n (vals, idx, cols), object_state = state\n\n index = _unpickle_array(idx)\n dm = DataFrame(vals, index=index, columns=_unpickle_array(cols),\n copy=False)\n\n if object_state is not None:\n ovals, _, ocols = object_state\n objects = DataFrame(ovals, index=index,\n columns=_unpickle_array(ocols),\n copy=False)\n\n dm = dm.join(objects)\n\n self._data = dm._data\n\n #----------------------------------------------------------------------\n #----------------------------------------------------------------------\n # Getting and setting elements\n\n def get_value(self, index, col, takeable=False):\n \"\"\"\n Quickly retrieve single value at passed column and index\n\n Parameters\n ----------\n index : row label\n col : column label\n takeable : interpret the index/col as indexers, default False\n\n Returns\n -------\n value : scalar value\n \"\"\"\n\n if takeable:\n series = self._iget_item_cache(col)\n return _maybe_box_datetimelike(series.values[index])\n\n series = self._get_item_cache(col)\n engine = self.index._engine\n return engine.get_value(series.get_values(), index)\n\n def set_value(self, index, col, value, takeable=False):\n \"\"\"\n Put single value at passed column and index\n\n Parameters\n ----------\n index : row label\n col : column label\n value : scalar value\n takeable : interpret the index/col as indexers, default False\n\n Returns\n -------\n frame : DataFrame\n If label pair is contained, will be reference to calling DataFrame,\n otherwise a new object\n \"\"\"\n try:\n if takeable is True:\n series = self._iget_item_cache(col)\n return series.set_value(index, value, takeable=True)\n\n series = self._get_item_cache(col)\n engine = self.index._engine\n engine.set_value(series.values, index, value)\n return self\n except (KeyError, TypeError):\n\n # set using a non-recursive method & reset the cache\n self.loc[index, col] = value\n self._item_cache.pop(col, None)\n\n return self\n\n def irow(self, i, copy=False):\n return self._ixs(i, axis=0)\n\n def icol(self, i):\n return self._ixs(i, axis=1)\n\n def _ixs(self, i, axis=0):\n \"\"\"\n i : int, slice, or sequence of integers\n axis : int\n \"\"\"\n\n # irow\n if axis == 0:\n\n \"\"\"\n Notes\n -----\n If slice passed, the resulting data will be a view\n \"\"\"\n\n if isinstance(i, slice):\n return self[i]\n else:\n label = self.index[i]\n if isinstance(label, Index):\n # a location index by definition\n result = self.take(i, axis=axis)\n copy=True\n else:\n new_values = self._data.fast_xs(i)\n\n # if we are a copy, mark as such\n copy = isinstance(new_values,np.ndarray) and new_values.base is None\n result = Series(new_values, index=self.columns,\n name=self.index[i], dtype=new_values.dtype)\n result._set_is_copy(self, copy=copy)\n return result\n\n # icol\n else:\n\n \"\"\"\n Notes\n -----\n If slice passed, the resulting data will be a view\n \"\"\"\n\n label = self.columns[i]\n if isinstance(i, slice):\n # need to return view\n lab_slice = slice(label[0], label[-1])\n return self.ix[:, lab_slice]\n else:\n label = self.columns[i]\n if isinstance(label, Index):\n return self.take(i, axis=1, convert=True)\n\n # if the values returned are not the same length\n # as the index (iow a not found value), iget returns\n # a 0-len ndarray. This is effectively catching\n # a numpy error (as numpy should really raise)\n values = self._data.iget(i)\n if not len(values):\n values = np.array([np.nan] * len(self.index), dtype=object)\n result = self._constructor_sliced.from_array(\n values, index=self.index,\n name=label, fastpath=True)\n\n # this is a cached value, mark it so\n result._set_as_cached(label, self)\n\n return result\n\n def iget_value(self, i, j):\n return self.iat[i, j]\n\n def __getitem__(self, key):\n\n # shortcut if we are an actual column\n is_mi_columns = isinstance(self.columns, MultiIndex)\n try:\n if key in self.columns and not is_mi_columns:\n return self._getitem_column(key)\n except:\n pass\n\n # see if we can slice the rows\n indexer = convert_to_index_sliceable(self, key)\n if indexer is not None:\n return self._getitem_slice(indexer)\n\n if isinstance(key, (Series, np.ndarray, Index, list)):\n # either boolean or fancy integer index\n return self._getitem_array(key)\n elif isinstance(key, DataFrame):\n return self._getitem_frame(key)\n elif is_mi_columns:\n return self._getitem_multilevel(key)\n else:\n return self._getitem_column(key)\n\n def _getitem_column(self, key):\n \"\"\" return the actual column \"\"\"\n\n # get column\n if self.columns.is_unique:\n return self._get_item_cache(key)\n\n # duplicate columns & possible reduce dimensionaility\n result = self._constructor(self._data.get(key))\n if result.columns.is_unique:\n result = result[key]\n\n return result\n\n def _getitem_slice(self, key):\n return self._slice(key, axis=0)\n\n def _getitem_array(self, key):\n # also raises Exception if object array with NA values\n if com.is_bool_indexer(key):\n # warning here just in case -- previously __setitem__ was\n # reindexing but __getitem__ was not; it seems more reasonable to\n # go with the __setitem__ behavior since that is more consistent\n # with all other indexing behavior\n if isinstance(key, Series) and not key.index.equals(self.index):\n warnings.warn(\"Boolean Series key will be reindexed to match \"\n \"DataFrame index.\", UserWarning)\n elif len(key) != len(self.index):\n raise ValueError('Item wrong length %d instead of %d.' %\n (len(key), len(self.index)))\n # check_bool_indexer will throw exception if Series key cannot\n # be reindexed to match DataFrame rows\n key = check_bool_indexer(self.index, key)\n indexer = key.nonzero()[0]\n return self.take(indexer, axis=0, convert=False)\n else:\n indexer = self.ix._convert_to_indexer(key, axis=1)\n return self.take(indexer, axis=1, convert=True)\n\n def _getitem_multilevel(self, key):\n loc = self.columns.get_loc(key)\n if isinstance(loc, (slice, Series, np.ndarray, Index)):\n new_columns = self.columns[loc]\n result_columns = maybe_droplevels(new_columns, key)\n if self._is_mixed_type:\n result = self.reindex(columns=new_columns)\n result.columns = result_columns\n else:\n new_values = self.values[:, loc]\n result = DataFrame(new_values, index=self.index,\n columns=result_columns).__finalize__(self)\n if len(result.columns) == 1:\n top = result.columns[0]\n if ((type(top) == str and top == '') or\n (type(top) == tuple and top[0] == '')):\n result = result['']\n if isinstance(result, Series):\n result = Series(result, index=self.index, name=key)\n\n result._set_is_copy(self)\n return result\n else:\n return self._get_item_cache(key)\n\n def _getitem_frame(self, key):\n if key.values.dtype != np.bool_:\n raise ValueError('Must pass DataFrame with boolean values only')\n return self.where(key)\n\n def query(self, expr, **kwargs):\n \"\"\"Query the columns of a frame with a boolean expression.\n\n .. versionadded:: 0.13\n\n Parameters\n ----------\n expr : string\n The query string to evaluate. You can refer to variables\n in the environment by prefixing them with an '@' character like\n ``@a + b``.\n kwargs : dict\n See the documentation for :func:`pandas.eval` for complete details\n on the keyword arguments accepted by :meth:`DataFrame.query`.\n\n Returns\n -------\n q : DataFrame\n\n Notes\n -----\n The result of the evaluation of this expression is first passed to\n :attr:`DataFrame.loc` and if that fails because of a\n multidimensional key (e.g., a DataFrame) then the result will be passed\n to :meth:`DataFrame.__getitem__`.\n\n This method uses the top-level :func:`pandas.eval` function to\n evaluate the passed query.\n\n The :meth:`~pandas.DataFrame.query` method uses a slightly\n modified Python syntax by default. For example, the ``&`` and ``|``\n (bitwise) operators have the precedence of their boolean cousins,\n :keyword:`and` and :keyword:`or`. This *is* syntactically valid Python,\n however the semantics are different.\n\n You can change the semantics of the expression by passing the keyword\n argument ``parser='python'``. This enforces the same semantics as\n evaluation in Python space. Likewise, you can pass ``engine='python'``\n to evaluate an expression using Python itself as a backend. This is not\n recommended as it is inefficient compared to using ``numexpr`` as the\n engine.\n\n The :attr:`DataFrame.index` and\n :attr:`DataFrame.columns` attributes of the\n :class:`~pandas.DataFrame` instance are placed in the query namespace\n by default, which allows you to treat both the index and columns of the\n frame as a column in the frame.\n The identifier ``index`` is used for the frame index; you can also\n use the name of the index to identify it in a query.\n\n For further details and examples see the ``query`` documentation in\n :ref:`indexing <indexing.query>`.\n\n See Also\n --------\n pandas.eval\n DataFrame.eval\n\n Examples\n --------\n >>> from numpy.random import randn\n >>> from pandas import DataFrame\n >>> df = DataFrame(randn(10, 2), columns=list('ab'))\n >>> df.query('a > b')\n >>> df[df.a > df.b] # same result as the previous expression\n \"\"\"\n kwargs['level'] = kwargs.pop('level', 0) + 1\n res = self.eval(expr, **kwargs)\n\n try:\n return self.loc[res]\n except ValueError:\n # when res is multi-dimensional loc raises, but this is sometimes a\n # valid query\n return self[res]\n\n def eval(self, expr, **kwargs):\n \"\"\"Evaluate an expression in the context of the calling DataFrame\n instance.\n\n Parameters\n ----------\n expr : string\n The expression string to evaluate.\n kwargs : dict\n See the documentation for :func:`~pandas.eval` for complete details\n on the keyword arguments accepted by\n :meth:`~pandas.DataFrame.query`.\n\n Returns\n -------\n ret : ndarray, scalar, or pandas object\n\n See Also\n --------\n pandas.DataFrame.query\n pandas.eval\n\n Notes\n -----\n For more details see the API documentation for :func:`~pandas.eval`.\n For detailed examples see :ref:`enhancing performance with eval\n <enhancingperf.eval>`.\n\n Examples\n --------\n >>> from numpy.random import randn\n >>> from pandas import DataFrame\n >>> df = DataFrame(randn(10, 2), columns=list('ab'))\n >>> df.eval('a + b')\n >>> df.eval('c = a + b')\n \"\"\"\n resolvers = kwargs.pop('resolvers', None)\n kwargs['level'] = kwargs.pop('level', 0) + 1\n if resolvers is None:\n index_resolvers = self._get_index_resolvers()\n resolvers = dict(self.iteritems()), index_resolvers\n kwargs['target'] = self\n kwargs['resolvers'] = kwargs.get('resolvers', ()) + resolvers\n return _eval(expr, **kwargs)\n\n def select_dtypes(self, include=None, exclude=None):\n \"\"\"Return a subset of a DataFrame including/excluding columns based on\n their ``dtype``.\n\n Parameters\n ----------\n include, exclude : list-like\n A list of dtypes or strings to be included/excluded. You must pass\n in a non-empty sequence for at least one of these.\n\n Raises\n ------\n ValueError\n * If both of ``include`` and ``exclude`` are empty\n * If ``include`` and ``exclude`` have overlapping elements\n * If any kind of string dtype is passed in.\n TypeError\n * If either of ``include`` or ``exclude`` is not a sequence\n\n Returns\n -------\n subset : DataFrame\n The subset of the frame including the dtypes in ``include`` and\n excluding the dtypes in ``exclude``.\n\n Notes\n -----\n * To select all *numeric* types use the numpy dtype ``numpy.number``\n * To select strings you must use the ``object`` dtype, but note that\n this will return *all* object dtype columns\n * See the `numpy dtype hierarchy\n <http://docs.scipy.org/doc/numpy/reference/arrays.scalars.html>`__\n * To select Pandas categorical dtypes, use 'category'\n\n Examples\n --------\n >>> df = pd.DataFrame({'a': np.random.randn(6).astype('f4'),\n ... 'b': [True, False] * 3,\n ... 'c': [1.0, 2.0] * 3})\n >>> df\n a b c\n 0 0.3962 True 1\n 1 0.1459 False 2\n 2 0.2623 True 1\n 3 0.0764 False 2\n 4 -0.9703 True 1\n 5 -1.2094 False 2\n >>> df.select_dtypes(include=['float64'])\n c\n 0 1\n 1 2\n 2 1\n 3 2\n 4 1\n 5 2\n >>> df.select_dtypes(exclude=['floating'])\n b\n 0 True\n 1 False\n 2 True\n 3 False\n 4 True\n 5 False\n \"\"\"\n include, exclude = include or (), exclude or ()\n if not (com.is_list_like(include) and com.is_list_like(exclude)):\n raise TypeError('include and exclude must both be non-string'\n ' sequences')\n selection = tuple(map(frozenset, (include, exclude)))\n\n if not any(selection):\n raise ValueError('at least one of include or exclude must be '\n 'nonempty')\n\n # convert the myriad valid dtypes object to a single representation\n include, exclude = map(lambda x:\n frozenset(map(com._get_dtype_from_object, x)),\n selection)\n for dtypes in (include, exclude):\n com._invalidate_string_dtypes(dtypes)\n\n # can't both include AND exclude!\n if not include.isdisjoint(exclude):\n raise ValueError('include and exclude overlap on %s'\n % (include & exclude))\n\n # empty include/exclude -> defaults to True\n # three cases (we've already raised if both are empty)\n # case 1: empty include, nonempty exclude\n # we have True, True, ... True for include, same for exclude\n # in the loop below we get the excluded\n # and when we call '&' below we get only the excluded\n # case 2: nonempty include, empty exclude\n # same as case 1, but with include\n # case 3: both nonempty\n # the \"union\" of the logic of case 1 and case 2:\n # we get the included and excluded, and return their logical and\n include_these = Series(not bool(include), index=self.columns)\n exclude_these = Series(not bool(exclude), index=self.columns)\n\n def is_dtype_instance_mapper(column, dtype):\n return column, functools.partial(issubclass, dtype.type)\n\n for column, f in itertools.starmap(is_dtype_instance_mapper,\n self.dtypes.iteritems()):\n if include: # checks for the case of empty include or exclude\n include_these[column] = any(map(f, include))\n if exclude:\n exclude_these[column] = not any(map(f, exclude))\n\n dtype_indexer = include_these & exclude_these\n return self.loc[com._get_info_slice(self, dtype_indexer)]\n\n def _box_item_values(self, key, values):\n items = self.columns[self.columns.get_loc(key)]\n if values.ndim == 2:\n return self._constructor(values.T, columns=items, index=self.index)\n else:\n return self._box_col_values(values, items)\n\n def _box_col_values(self, values, items):\n \"\"\" provide boxed values for a column \"\"\"\n return self._constructor_sliced.from_array(values, index=self.index,\n name=items, fastpath=True)\n\n def __setitem__(self, key, value):\n\n # see if we can slice the rows\n indexer = convert_to_index_sliceable(self, key)\n if indexer is not None:\n return self._setitem_slice(indexer, value)\n\n if isinstance(key, (Series, np.ndarray, list, Index)):\n self._setitem_array(key, value)\n elif isinstance(key, DataFrame):\n self._setitem_frame(key, value)\n else:\n # set column\n self._set_item(key, value)\n\n def _setitem_slice(self, key, value):\n self._check_setitem_copy()\n self.ix._setitem_with_indexer(key, value)\n\n def _setitem_array(self, key, value):\n # also raises Exception if object array with NA values\n if com.is_bool_indexer(key):\n if len(key) != len(self.index):\n raise ValueError('Item wrong length %d instead of %d!' %\n (len(key), len(self.index)))\n key = check_bool_indexer(self.index, key)\n indexer = key.nonzero()[0]\n self._check_setitem_copy()\n self.ix._setitem_with_indexer(indexer, value)\n else:\n if isinstance(value, DataFrame):\n if len(value.columns) != len(key):\n raise ValueError('Columns must be same length as key')\n for k1, k2 in zip(key, value.columns):\n self[k1] = value[k2]\n else:\n indexer = self.ix._convert_to_indexer(key, axis=1)\n self._check_setitem_copy()\n self.ix._setitem_with_indexer((slice(None), indexer), value)\n\n def _setitem_frame(self, key, value):\n # support boolean setting with DataFrame input, e.g.\n # df[df > df2] = 0\n if key.values.dtype != np.bool_:\n raise TypeError('Must pass DataFrame with boolean values only')\n\n self._check_inplace_setting(value)\n self._check_setitem_copy()\n self.where(-key, value, inplace=True)\n\n def _ensure_valid_index(self, value):\n \"\"\"\n ensure that if we don't have an index, that we can create one from the\n passed value\n \"\"\"\n if not len(self.index):\n\n # GH5632, make sure that we are a Series convertible\n if is_list_like(value):\n try:\n value = Series(value)\n except:\n pass\n\n if not isinstance(value, Series):\n raise ValueError('Cannot set a frame with no defined index '\n 'and a value that cannot be converted to a '\n 'Series')\n\n self._data = self._data.reindex_axis(value.index.copy(), axis=1,\n fill_value=np.nan)\n\n # we are a scalar\n # noop\n else:\n\n pass\n\n def _set_item(self, key, value):\n \"\"\"\n Add series to DataFrame in specified column.\n\n If series is a numpy-array (not a Series/TimeSeries), it must be the\n same length as the DataFrames index or an error will be thrown.\n\n Series/TimeSeries will be conformed to the DataFrames index to\n ensure homogeneity.\n \"\"\"\n\n self._ensure_valid_index(value)\n value = self._sanitize_column(key, value)\n NDFrame._set_item(self, key, value)\n\n # check if we are modifying a copy\n # try to set first as we want an invalid\n # value exeption to occur first\n if len(self):\n self._check_setitem_copy()\n\n def insert(self, loc, column, value, allow_duplicates=False):\n \"\"\"\n Insert column into DataFrame at specified location.\n\n If `allow_duplicates` is False, raises Exception if column\n is already contained in the DataFrame.\n\n Parameters\n ----------\n loc : int\n Must have 0 <= loc <= len(columns)\n column : object\n value : int, Series, or array-like\n \"\"\"\n self._ensure_valid_index(value)\n value = self._sanitize_column(column, value)\n self._data.insert(\n loc, column, value, allow_duplicates=allow_duplicates)\n\n def assign(self, **kwargs):\n \"\"\"\n Assign new columns to a DataFrame, returning a new object\n (a copy) with all the original columns in addition to the new ones.\n\n .. versionadded:: 0.16.0\n\n Parameters\n ----------\n kwargs : keyword, value pairs\n keywords are the column names. If the values are\n callable, they are computed on the DataFrame and\n assigned to the new columns. If the values are\n not callable, (e.g. a Series, scalar, or array),\n they are simply assigned.\n\n Returns\n -------\n df : DataFrame\n A new DataFrame with the new columns in addition to\n all the existing columns.\n\n Notes\n -----\n Since ``kwargs`` is a dictionary, the order of your\n arguments may not be preserved, and so the order of the\n new columns is not well defined. Assigning multiple\n columns within the same ``assign`` is possible, but you cannot\n reference other columns created within the same ``assign`` call.\n\n Examples\n --------\n >>> df = DataFrame({'A': range(1, 11), 'B': np.random.randn(10)})\n\n Where the value is a callable, evaluated on `df`:\n\n >>> df.assign(ln_A = lambda x: np.log(x.A))\n A B ln_A\n 0 1 0.426905 0.000000\n 1 2 -0.780949 0.693147\n 2 3 -0.418711 1.098612\n 3 4 -0.269708 1.386294\n 4 5 -0.274002 1.609438\n 5 6 -0.500792 1.791759\n 6 7 1.649697 1.945910\n 7 8 -1.495604 2.079442\n 8 9 0.549296 2.197225\n 9 10 -0.758542 2.302585\n\n Where the value already exists and is inserted:\n\n >>> newcol = np.log(df['A'])\n >>> df.assign(ln_A=newcol)\n A B ln_A\n 0 1 0.426905 0.000000\n 1 2 -0.780949 0.693147\n 2 3 -0.418711 1.098612\n 3 4 -0.269708 1.386294\n 4 5 -0.274002 1.609438\n 5 6 -0.500792 1.791759\n 6 7 1.649697 1.945910\n 7 8 -1.495604 2.079442\n 8 9 0.549296 2.197225\n 9 10 -0.758542 2.302585\n \"\"\"\n data = self.copy()\n\n # do all calculations first...\n results = {}\n for k, v in kwargs.items():\n\n if callable(v):\n results[k] = v(data)\n else:\n results[k] = v\n\n # ... and then assign\n for k, v in results.items():\n data[k] = v\n\n return data\n\n def _sanitize_column(self, key, value):\n # Need to make sure new columns (which go into the BlockManager as new\n # blocks) are always copied\n\n def reindexer(value):\n # reindex if necessary\n\n if value.index.equals(self.index) or not len(self.index):\n value = value.values.copy()\n else:\n\n # GH 4107\n try:\n value = value.reindex(self.index).values\n except Exception as e:\n\n # duplicate axis\n if not value.index.is_unique:\n raise e\n\n # other\n raise TypeError('incompatible index of inserted column '\n 'with frame index')\n return value\n\n if isinstance(value, Series):\n value = reindexer(value)\n\n elif isinstance(value, DataFrame):\n # align right-hand-side columns if self.columns\n # is multi-index and self[key] is a sub-frame\n if isinstance(self.columns, MultiIndex) and key in self.columns:\n loc = self.columns.get_loc(key)\n if isinstance(loc, (slice, Series, np.ndarray, Index)):\n cols = maybe_droplevels(self.columns[loc], key)\n if len(cols) and not cols.equals(value.columns):\n value = value.reindex_axis(cols, axis=1)\n # now align rows\n value = reindexer(value).T\n\n elif isinstance(value, Categorical):\n value = value.copy()\n\n elif (isinstance(value, Index) or is_sequence(value)):\n from pandas.core.series import _sanitize_index\n\n # turn me into an ndarray\n value = _sanitize_index(value, self.index, copy=False)\n if not isinstance(value, (np.ndarray, Index)):\n if isinstance(value, list) and len(value) > 0:\n value = com._possibly_convert_platform(value)\n else:\n value = com._asarray_tuplesafe(value)\n elif value.ndim == 2:\n value = value.copy().T\n else:\n value = value.copy()\n\n # possibly infer to datetimelike\n if is_object_dtype(value.dtype):\n value = _possibly_infer_to_datetimelike(value.ravel()).reshape(value.shape)\n\n else:\n # upcast the scalar\n dtype, value = _infer_dtype_from_scalar(value)\n value = np.repeat(value, len(self.index)).astype(dtype)\n value = com._possibly_cast_to_datetime(value, dtype)\n\n # return unconsolidatables directly\n if isinstance(value, (Categorical, SparseArray)):\n return value\n\n # broadcast across multiple columns if necessary\n if key in self.columns and value.ndim == 1:\n if not self.columns.is_unique or isinstance(self.columns,\n MultiIndex):\n existing_piece = self[key]\n if isinstance(existing_piece, DataFrame):\n value = np.tile(value, (len(existing_piece.columns), 1))\n\n return np.atleast_2d(np.asarray(value))\n\n @property\n def _series(self):\n result = {}\n for idx, item in enumerate(self.columns):\n result[item] = Series(self._data.iget(idx), index=self.index,\n name=item)\n return result\n\n def lookup(self, row_labels, col_labels):\n \"\"\"Label-based \"fancy indexing\" function for DataFrame.\n Given equal-length arrays of row and column labels, return an\n array of the values corresponding to each (row, col) pair.\n\n Parameters\n ----------\n row_labels : sequence\n The row labels to use for lookup\n col_labels : sequence\n The column labels to use for lookup\n\n Notes\n -----\n Akin to::\n\n result = []\n for row, col in zip(row_labels, col_labels):\n result.append(df.get_value(row, col))\n\n Examples\n --------\n values : ndarray\n The found values\n\n \"\"\"\n n = len(row_labels)\n if n != len(col_labels):\n raise ValueError('Row labels must have same size as column labels')\n\n thresh = 1000\n if not self._is_mixed_type or n > thresh:\n values = self.values\n ridx = self.index.get_indexer(row_labels)\n cidx = self.columns.get_indexer(col_labels)\n if (ridx == -1).any():\n raise KeyError('One or more row labels was not found')\n if (cidx == -1).any():\n raise KeyError('One or more column labels was not found')\n flat_index = ridx * len(self.columns) + cidx\n result = values.flat[flat_index]\n else:\n result = np.empty(n, dtype='O')\n for i, (r, c) in enumerate(zip(row_labels, col_labels)):\n result[i] = self.get_value(r, c)\n\n if is_object_dtype(result):\n result = lib.maybe_convert_objects(result)\n\n return result\n\n #----------------------------------------------------------------------\n # Reindexing and alignment\n\n def _reindex_axes(self, axes, level, limit, method, fill_value, copy):\n frame = self\n\n columns = axes['columns']\n if columns is not None:\n frame = frame._reindex_columns(columns, copy, level, fill_value,\n limit)\n\n index = axes['index']\n if index is not None:\n frame = frame._reindex_index(index, method, copy, level,\n fill_value, limit)\n\n return frame\n\n def _reindex_index(self, new_index, method, copy, level, fill_value=NA,\n limit=None):\n new_index, indexer = self.index.reindex(new_index, method, level,\n limit=limit)\n return self._reindex_with_indexers({0: [new_index, indexer]},\n copy=copy, fill_value=fill_value,\n allow_dups=False)\n\n def _reindex_columns(self, new_columns, copy, level, fill_value=NA,\n limit=None):\n new_columns, indexer = self.columns.reindex(new_columns, level=level,\n limit=limit)\n return self._reindex_with_indexers({1: [new_columns, indexer]},\n copy=copy, fill_value=fill_value,\n allow_dups=False)\n\n def _reindex_multi(self, axes, copy, fill_value):\n \"\"\" we are guaranteed non-Nones in the axes! \"\"\"\n\n new_index, row_indexer = self.index.reindex(axes['index'])\n new_columns, col_indexer = self.columns.reindex(axes['columns'])\n\n if row_indexer is not None and col_indexer is not None:\n indexer = row_indexer, col_indexer\n new_values = com.take_2d_multi(self.values, indexer,\n fill_value=fill_value)\n return self._constructor(new_values, index=new_index,\n columns=new_columns)\n else:\n return self._reindex_with_indexers({0: [new_index, row_indexer],\n 1: [new_columns, col_indexer]},\n copy=copy,\n fill_value=fill_value)\n\n @Appender(_shared_docs['reindex'] % _shared_doc_kwargs)\n def reindex(self, index=None, columns=None, **kwargs):\n return super(DataFrame, self).reindex(index=index, columns=columns,\n **kwargs)\n\n @Appender(_shared_docs['reindex_axis'] % _shared_doc_kwargs)\n def reindex_axis(self, labels, axis=0, method=None, level=None, copy=True,\n limit=None, fill_value=np.nan):\n return super(DataFrame, self).reindex_axis(labels=labels, axis=axis,\n method=method, level=level,\n copy=copy, limit=limit,\n fill_value=fill_value)\n\n @Appender(_shared_docs['rename'] % _shared_doc_kwargs)\n def rename(self, index=None, columns=None, **kwargs):\n return super(DataFrame, self).rename(index=index, columns=columns,\n **kwargs)\n\n def set_index(self, keys, drop=True, append=False, inplace=False,\n verify_integrity=False):\n \"\"\"\n Set the DataFrame index (row labels) using one or more existing\n columns. By default yields a new object.\n\n Parameters\n ----------\n keys : column label or list of column labels / arrays\n drop : boolean, default True\n Delete columns to be used as the new index\n append : boolean, default False\n Whether to append columns to existing index\n inplace : boolean, default False\n Modify the DataFrame in place (do not create a new object)\n verify_integrity : boolean, default False\n Check the new index for duplicates. Otherwise defer the check until\n necessary. Setting to False will improve the performance of this\n method\n\n Examples\n --------\n >>> indexed_df = df.set_index(['A', 'B'])\n >>> indexed_df2 = df.set_index(['A', [0, 1, 2, 0, 1, 2]])\n >>> indexed_df3 = df.set_index([[0, 1, 2, 0, 1, 2]])\n\n Returns\n -------\n dataframe : DataFrame\n \"\"\"\n if not isinstance(keys, list):\n keys = [keys]\n\n if inplace:\n frame = self\n else:\n frame = self.copy()\n\n arrays = []\n names = []\n if append:\n names = [x for x in self.index.names]\n if isinstance(self.index, MultiIndex):\n for i in range(self.index.nlevels):\n arrays.append(self.index.get_level_values(i))\n else:\n arrays.append(self.index)\n\n to_remove = []\n for col in keys:\n if isinstance(col, MultiIndex):\n # append all but the last column so we don't have to modify\n # the end of this loop\n for n in range(col.nlevels - 1):\n arrays.append(col.get_level_values(n))\n\n level = col.get_level_values(col.nlevels - 1)\n names.extend(col.names)\n elif isinstance(col, Series):\n level = col.values\n names.append(col.name)\n elif isinstance(col, Index):\n level = col\n names.append(col.name)\n elif isinstance(col, (list, np.ndarray, Index)):\n level = col\n names.append(None)\n else:\n level = frame[col].values\n names.append(col)\n if drop:\n to_remove.append(col)\n arrays.append(level)\n\n index = MultiIndex.from_arrays(arrays, names=names)\n\n if verify_integrity and not index.is_unique:\n duplicates = index.get_duplicates()\n raise ValueError('Index has duplicate keys: %s' % duplicates)\n\n for c in to_remove:\n del frame[c]\n\n # clear up memory usage\n index._cleanup()\n\n frame.index = index\n\n if not inplace:\n return frame\n\n def reset_index(self, level=None, drop=False, inplace=False, col_level=0,\n col_fill=''):\n \"\"\"\n For DataFrame with multi-level index, return new DataFrame with\n labeling information in the columns under the index names, defaulting\n to 'level_0', 'level_1', etc. if any are None. For a standard index,\n the index name will be used (if set), otherwise a default 'index' or\n 'level_0' (if 'index' is already taken) will be used.\n\n Parameters\n ----------\n level : int, str, tuple, or list, default None\n Only remove the given levels from the index. Removes all levels by\n default\n drop : boolean, default False\n Do not try to insert index into dataframe columns. This resets\n the index to the default integer index.\n inplace : boolean, default False\n Modify the DataFrame in place (do not create a new object)\n col_level : int or str, default 0\n If the columns have multiple levels, determines which level the\n labels are inserted into. By default it is inserted into the first\n level.\n col_fill : object, default ''\n If the columns have multiple levels, determines how the other\n levels are named. If None then the index name is repeated.\n\n Returns\n -------\n resetted : DataFrame\n \"\"\"\n if inplace:\n new_obj = self\n else:\n new_obj = self.copy()\n\n def _maybe_casted_values(index, labels=None):\n if isinstance(index, PeriodIndex):\n values = index.asobject.values\n elif (isinstance(index, DatetimeIndex) and\n index.tz is not None):\n values = index.asobject\n else:\n values = index.values\n if values.dtype == np.object_:\n values = lib.maybe_convert_objects(values)\n\n # if we have the labels, extract the values with a mask\n if labels is not None:\n mask = labels == -1\n values = values.take(labels)\n if mask.any():\n values, changed = com._maybe_upcast_putmask(values,\n mask, np.nan)\n return values\n\n new_index = np.arange(len(new_obj),dtype='int64')\n if isinstance(self.index, MultiIndex):\n if level is not None:\n if not isinstance(level, (tuple, list)):\n level = [level]\n level = [self.index._get_level_number(lev) for lev in level]\n if len(level) < len(self.index.levels):\n new_index = self.index.droplevel(level)\n\n if not drop:\n names = self.index.names\n zipped = lzip(self.index.levels, self.index.labels)\n\n multi_col = isinstance(self.columns, MultiIndex)\n for i, (lev, lab) in reversed(list(enumerate(zipped))):\n col_name = names[i]\n if col_name is None:\n col_name = 'level_%d' % i\n\n if multi_col:\n if col_fill is None:\n col_name = tuple([col_name] *\n self.columns.nlevels)\n else:\n name_lst = [col_fill] * self.columns.nlevels\n lev_num = self.columns._get_level_number(col_level)\n name_lst[lev_num] = col_name\n col_name = tuple(name_lst)\n\n # to ndarray and maybe infer different dtype\n level_values = _maybe_casted_values(lev, lab)\n if level is None or i in level:\n new_obj.insert(0, col_name, level_values)\n\n elif not drop:\n name = self.index.name\n if name is None or name == 'index':\n name = 'index' if 'index' not in self else 'level_0'\n if isinstance(self.columns, MultiIndex):\n if col_fill is None:\n name = tuple([name] * self.columns.nlevels)\n else:\n name_lst = [col_fill] * self.columns.nlevels\n lev_num = self.columns._get_level_number(col_level)\n name_lst[lev_num] = name\n name = tuple(name_lst)\n values = _maybe_casted_values(self.index)\n new_obj.insert(0, name, values)\n\n new_obj.index = new_index\n if not inplace:\n return new_obj\n\n\n #----------------------------------------------------------------------\n # Reindex-based selection methods\n\n def dropna(self, axis=0, how='any', thresh=None, subset=None,\n inplace=False):\n \"\"\"\n Return object with labels on given axis omitted where alternately any\n or all of the data are missing\n\n Parameters\n ----------\n axis : {0, 1}, or tuple/list thereof\n Pass tuple or list to drop on multiple axes\n how : {'any', 'all'}\n * any : if any NA values are present, drop that label\n * all : if all values are NA, drop that label\n thresh : int, default None\n int value : require that many non-NA values\n subset : array-like\n Labels along other axis to consider, e.g. if you are dropping rows\n these would be a list of columns to include\n inplace : boolean, defalt False\n If True, do operation inplace and return None.\n\n Returns\n -------\n dropped : DataFrame\n \"\"\"\n if isinstance(axis, (tuple, list)):\n result = self\n for ax in axis:\n result = result.dropna(how=how, thresh=thresh,\n subset=subset, axis=ax)\n else:\n axis = self._get_axis_number(axis)\n agg_axis = 1 - axis\n\n agg_obj = self\n if subset is not None:\n ax = self._get_axis(agg_axis)\n indices = ax.get_indexer_for(subset)\n check = indices == -1\n if check.any():\n raise KeyError(list(np.compress(check,subset)))\n agg_obj = self.take(indices,axis=agg_axis)\n\n count = agg_obj.count(axis=agg_axis)\n\n if thresh is not None:\n mask = count >= thresh\n elif how == 'any':\n mask = count == len(agg_obj._get_axis(agg_axis))\n elif how == 'all':\n mask = count > 0\n else:\n if how is not None:\n raise ValueError('invalid how option: %s' % how)\n else:\n raise TypeError('must specify how or thresh')\n\n result = self.take(mask.nonzero()[0], axis=axis, convert=False)\n\n if inplace:\n self._update_inplace(result)\n else:\n return result\n\n @deprecate_kwarg(old_arg_name='cols', new_arg_name='subset')\n def drop_duplicates(self, subset=None, take_last=False, inplace=False):\n \"\"\"\n Return DataFrame with duplicate rows removed, optionally only\n considering certain columns\n\n Parameters\n ----------\n subset : column label or sequence of labels, optional\n Only consider certain columns for identifying duplicates, by\n default use all of the columns\n take_last : boolean, default False\n Take the last observed row in a row. Defaults to the first row\n inplace : boolean, default False\n Whether to drop duplicates in place or to return a copy\n cols : kwargs only argument of subset [deprecated]\n\n Returns\n -------\n deduplicated : DataFrame\n \"\"\"\n duplicated = self.duplicated(subset, take_last=take_last)\n\n if inplace:\n inds, = (-duplicated).nonzero()\n new_data = self._data.take(inds)\n self._update_inplace(new_data)\n else:\n return self[-duplicated]\n\n @deprecate_kwarg(old_arg_name='cols', new_arg_name='subset')\n def duplicated(self, subset=None, take_last=False):\n \"\"\"\n Return boolean Series denoting duplicate rows, optionally only\n considering certain columns\n\n Parameters\n ----------\n subset : column label or sequence of labels, optional\n Only consider certain columns for identifying duplicates, by\n default use all of the columns\n take_last : boolean, default False\n For a set of distinct duplicate rows, flag all but the last row as\n duplicated. Default is for all but the first row to be flagged\n cols : kwargs only argument of subset [deprecated]\n\n Returns\n -------\n duplicated : Series\n \"\"\"\n from pandas.core.groupby import get_group_index\n from pandas.core.algorithms import factorize\n from pandas.hashtable import duplicated_int64, _SIZE_HINT_LIMIT\n\n def f(vals):\n labels, shape = factorize(vals, size_hint=min(len(self), _SIZE_HINT_LIMIT))\n return labels.astype('i8',copy=False), len(shape)\n\n if subset is None:\n subset = self.columns\n elif not np.iterable(subset) or \\\n isinstance(subset, compat.string_types) or \\\n isinstance(subset, tuple) and subset in self.columns:\n subset = subset,\n\n vals = (self[col].values for col in subset)\n labels, shape = map(list, zip( * map(f, vals)))\n\n ids = get_group_index(labels, shape, sort=False, xnull=False)\n return Series(duplicated_int64(ids, take_last), index=self.index)\n\n #----------------------------------------------------------------------\n # Sorting\n\n def sort(self, columns=None, axis=0, ascending=True,\n inplace=False, kind='quicksort', na_position='last'):\n \"\"\"\n Sort DataFrame either by labels (along either axis) or by the values in\n column(s)\n\n Parameters\n ----------\n columns : object\n Column name(s) in frame. Accepts a column name or a list\n for a nested sort. A tuple will be interpreted as the\n levels of a multi-index.\n ascending : boolean or list, default True\n Sort ascending vs. descending. Specify list for multiple sort\n orders\n axis : {0, 1}\n Sort index/rows versus columns\n inplace : boolean, default False\n Sort the DataFrame without creating a new instance\n kind : {'quicksort', 'mergesort', 'heapsort'}, optional\n This option is only applied when sorting on a single column or label.\n na_position : {'first', 'last'} (optional, default='last')\n 'first' puts NaNs at the beginning\n 'last' puts NaNs at the end\n\n Examples\n --------\n >>> result = df.sort(['A', 'B'], ascending=[1, 0])\n\n Returns\n -------\n sorted : DataFrame\n \"\"\"\n return self.sort_index(by=columns, axis=axis, ascending=ascending,\n inplace=inplace, kind=kind, na_position=na_position)\n\n def sort_index(self, axis=0, by=None, ascending=True, inplace=False,\n kind='quicksort', na_position='last'):\n \"\"\"\n Sort DataFrame either by labels (along either axis) or by the values in\n a column\n\n Parameters\n ----------\n axis : {0, 1}\n Sort index/rows versus columns\n by : object\n Column name(s) in frame. Accepts a column name or a list\n for a nested sort. A tuple will be interpreted as the\n levels of a multi-index.\n ascending : boolean or list, default True\n Sort ascending vs. descending. Specify list for multiple sort\n orders\n inplace : boolean, default False\n Sort the DataFrame without creating a new instance\n na_position : {'first', 'last'} (optional, default='last')\n 'first' puts NaNs at the beginning\n 'last' puts NaNs at the end\n kind : {'quicksort', 'mergesort', 'heapsort'}, optional\n This option is only applied when sorting on a single column or label.\n\n Examples\n --------\n >>> result = df.sort_index(by=['A', 'B'], ascending=[True, False])\n\n Returns\n -------\n sorted : DataFrame\n \"\"\"\n\n from pandas.core.groupby import _lexsort_indexer, _nargsort\n axis = self._get_axis_number(axis)\n if axis not in [0, 1]: # pragma: no cover\n raise AssertionError('Axis must be 0 or 1, got %s' % str(axis))\n\n labels = self._get_axis(axis)\n\n if by is not None:\n if axis != 0:\n raise ValueError('When sorting by column, axis must be 0 '\n '(rows)')\n if not isinstance(by, list):\n by = [by]\n if com.is_sequence(ascending) and len(by) != len(ascending):\n raise ValueError('Length of ascending (%d) != length of by'\n ' (%d)' % (len(ascending), len(by)))\n if len(by) > 1:\n def trans(v):\n if com.needs_i8_conversion(v):\n return v.view('i8')\n return v\n keys = []\n for x in by:\n k = self[x].values\n if k.ndim == 2:\n raise ValueError('Cannot sort by duplicate column %s' % str(x))\n keys.append(trans(k))\n indexer = _lexsort_indexer(keys, orders=ascending,\n na_position=na_position)\n indexer = com._ensure_platform_int(indexer)\n else:\n by = by[0]\n k = self[by].values\n if k.ndim == 2:\n\n # try to be helpful\n if isinstance(self.columns, MultiIndex):\n raise ValueError('Cannot sort by column %s in a multi-index'\n ' you need to explicity provide all the levels'\n % str(by))\n\n raise ValueError('Cannot sort by duplicate column %s'\n % str(by))\n if isinstance(ascending, (tuple, list)):\n ascending = ascending[0]\n\n indexer = _nargsort(k, kind=kind, ascending=ascending,\n na_position=na_position)\n\n elif isinstance(labels, MultiIndex):\n\n # make sure that the axis is lexsorted to start\n # if not we need to reconstruct to get the correct indexer\n if not labels.is_lexsorted():\n labels = MultiIndex.from_tuples(labels.values)\n\n indexer = _lexsort_indexer(labels.labels, orders=ascending,\n na_position=na_position)\n indexer = com._ensure_platform_int(indexer)\n else:\n indexer = _nargsort(labels, kind=kind, ascending=ascending,\n na_position=na_position)\n\n bm_axis = self._get_block_manager_axis(axis)\n new_data = self._data.take(indexer, axis=bm_axis,\n convert=False, verify=False)\n\n if inplace:\n return self._update_inplace(new_data)\n else:\n return self._constructor(new_data).__finalize__(self)\n\n def sortlevel(self, level=0, axis=0, ascending=True,\n inplace=False, sort_remaining=True):\n \"\"\"\n Sort multilevel index by chosen axis and primary level. Data will be\n lexicographically sorted by the chosen level followed by the other\n levels (in order)\n\n Parameters\n ----------\n level : int\n axis : {0, 1}\n ascending : boolean, default True\n inplace : boolean, default False\n Sort the DataFrame without creating a new instance\n sort_remaining : boolean, default True\n Sort by the other levels too.\n\n Returns\n -------\n sorted : DataFrame\n \"\"\"\n axis = self._get_axis_number(axis)\n the_axis = self._get_axis(axis)\n if not isinstance(the_axis, MultiIndex):\n raise TypeError('can only sort by level with a hierarchical index')\n\n new_axis, indexer = the_axis.sortlevel(level, ascending=ascending,\n sort_remaining=sort_remaining)\n\n if self._is_mixed_type and not inplace:\n ax = 'index' if axis == 0 else 'columns'\n\n if new_axis.is_unique:\n return self.reindex(**{ax: new_axis})\n else:\n return self.take(indexer, axis=axis, convert=False)\n\n bm_axis = self._get_block_manager_axis(axis)\n new_data = self._data.take(indexer, axis=bm_axis,\n convert=False, verify=False)\n if inplace:\n return self._update_inplace(new_data)\n else:\n return self._constructor(new_data).__finalize__(self)\n\n def swaplevel(self, i, j, axis=0):\n \"\"\"\n Swap levels i and j in a MultiIndex on a particular axis\n\n Parameters\n ----------\n i, j : int, string (can be mixed)\n Level of index to be swapped. Can pass level name as string.\n\n Returns\n -------\n swapped : type of caller (new object)\n \"\"\"\n result = self.copy()\n\n axis = self._get_axis_number(axis)\n if axis == 0:\n result.index = result.index.swaplevel(i, j)\n else:\n result.columns = result.columns.swaplevel(i, j)\n return result\n\n def reorder_levels(self, order, axis=0):\n \"\"\"\n Rearrange index levels using input order.\n May not drop or duplicate levels\n\n Parameters\n ----------\n order : list of int or list of str\n List representing new level order. Reference level by number\n (position) or by key (label).\n axis : int\n Where to reorder levels.\n\n Returns\n -------\n type of caller (new object)\n \"\"\"\n axis = self._get_axis_number(axis)\n if not isinstance(self._get_axis(axis),\n MultiIndex): # pragma: no cover\n raise TypeError('Can only reorder levels on a hierarchical axis.')\n\n result = self.copy()\n\n if axis == 0:\n result.index = result.index.reorder_levels(order)\n else:\n result.columns = result.columns.reorder_levels(order)\n return result\n\n #----------------------------------------------------------------------\n # Arithmetic / combination related\n\n def _combine_frame(self, other, func, fill_value=None, level=None):\n this, other = self.align(other, join='outer', level=level, copy=False)\n new_index, new_columns = this.index, this.columns\n\n def _arith_op(left, right):\n if fill_value is not None:\n left_mask = isnull(left)\n right_mask = isnull(right)\n left = left.copy()\n right = right.copy()\n\n # one but not both\n mask = left_mask ^ right_mask\n left[left_mask & mask] = fill_value\n right[right_mask & mask] = fill_value\n\n return func(left, right)\n\n if this._is_mixed_type or other._is_mixed_type:\n\n # unique\n if this.columns.is_unique:\n\n def f(col):\n r = _arith_op(this[col].values, other[col].values)\n return self._constructor_sliced(r, index=new_index,\n dtype=r.dtype)\n\n result = dict([(col, f(col)) for col in this])\n\n # non-unique\n else:\n\n def f(i):\n r = _arith_op(this.iloc[:, i].values,\n other.iloc[:, i].values)\n return self._constructor_sliced(r, index=new_index,\n dtype=r.dtype)\n\n result = dict([\n (i, f(i)) for i, col in enumerate(this.columns)\n ])\n result = self._constructor(result, index=new_index, copy=False)\n result.columns = new_columns\n return result\n\n else:\n result = _arith_op(this.values, other.values)\n\n return self._constructor(result, index=new_index,\n columns=new_columns, copy=False)\n\n def _combine_series(self, other, func, fill_value=None, axis=None,\n level=None):\n if axis is not None:\n axis = self._get_axis_name(axis)\n if axis == 'index':\n return self._combine_match_index(other, func, level=level, fill_value=fill_value)\n else:\n return self._combine_match_columns(other, func, level=level, fill_value=fill_value)\n return self._combine_series_infer(other, func, level=level, fill_value=fill_value)\n\n def _combine_series_infer(self, other, func, level=None, fill_value=None):\n if len(other) == 0:\n return self * NA\n\n if len(self) == 0:\n # Ambiguous case, use _series so works with DataFrame\n return self._constructor(data=self._series, index=self.index,\n columns=self.columns)\n\n # teeny hack because one does DataFrame + TimeSeries all the time\n if self.index.is_all_dates and other.index.is_all_dates:\n warnings.warn((\"TimeSeries broadcasting along DataFrame index \"\n \"by default is deprecated. Please use \"\n \"DataFrame.<op> to explicitly broadcast arithmetic \"\n \"operations along the index\"),\n FutureWarning)\n return self._combine_match_index(other, func, level=level, fill_value=fill_value)\n else:\n return self._combine_match_columns(other, func, level=level, fill_value=fill_value)\n\n def _combine_match_index(self, other, func, level=None, fill_value=None):\n left, right = self.align(other, join='outer', axis=0, level=level, copy=False)\n if fill_value is not None:\n raise NotImplementedError(\"fill_value %r not supported.\" %\n fill_value)\n return self._constructor(func(left.values.T, right.values).T,\n index=left.index,\n columns=self.columns, copy=False)\n\n def _combine_match_columns(self, other, func, level=None, fill_value=None):\n left, right = self.align(other, join='outer', axis=1, level=level, copy=False)\n if fill_value is not None:\n raise NotImplementedError(\"fill_value %r not supported\" %\n fill_value)\n\n new_data = left._data.eval(\n func=func, other=right, axes=[left.columns, self.index])\n return self._constructor(new_data)\n\n def _combine_const(self, other, func, raise_on_error=True):\n if self.empty:\n return self\n\n new_data = self._data.eval(func=func, other=other, raise_on_error=raise_on_error)\n return self._constructor(new_data)\n\n def _compare_frame_evaluate(self, other, func, str_rep):\n\n # unique\n if self.columns.is_unique:\n def _compare(a, b):\n return dict([(col, func(a[col], b[col])) for col in a.columns])\n new_data = expressions.evaluate(_compare, str_rep, self, other)\n return self._constructor(data=new_data, index=self.index,\n columns=self.columns, copy=False)\n # non-unique\n else:\n def _compare(a, b):\n return dict([(i, func(a.iloc[:, i], b.iloc[:, i]))\n for i, col in enumerate(a.columns)])\n new_data = expressions.evaluate(_compare, str_rep, self, other)\n result = self._constructor(data=new_data, index=self.index,\n copy=False)\n result.columns = self.columns\n return result\n\n def _compare_frame(self, other, func, str_rep):\n if not self._indexed_same(other):\n raise ValueError('Can only compare identically-labeled '\n 'DataFrame objects')\n return self._compare_frame_evaluate(other, func, str_rep)\n\n def _flex_compare_frame(self, other, func, str_rep, level):\n if not self._indexed_same(other):\n self, other = self.align(other, 'outer', level=level, copy=False)\n return self._compare_frame_evaluate(other, func, str_rep)\n\n def combine(self, other, func, fill_value=None, overwrite=True):\n \"\"\"\n Add two DataFrame objects and do not propagate NaN values, so if for a\n (column, time) one frame is missing a value, it will default to the\n other frame's value (which might be NaN as well)\n\n Parameters\n ----------\n other : DataFrame\n func : function\n fill_value : scalar value\n overwrite : boolean, default True\n If True then overwrite values for common keys in the calling frame\n\n Returns\n -------\n result : DataFrame\n \"\"\"\n\n other_idxlen = len(other.index) # save for compare\n\n this, other = self.align(other, copy=False)\n new_index = this.index\n\n if other.empty and len(new_index) == len(self.index):\n return self.copy()\n\n if self.empty and len(other) == other_idxlen:\n return other.copy()\n\n # sorts if possible\n new_columns = this.columns.union(other.columns)\n do_fill = fill_value is not None\n\n result = {}\n for col in new_columns:\n series = this[col]\n otherSeries = other[col]\n\n this_dtype = series.dtype\n other_dtype = otherSeries.dtype\n\n this_mask = isnull(series)\n other_mask = isnull(otherSeries)\n\n # don't overwrite columns unecessarily\n # DO propogate if this column is not in the intersection\n if not overwrite and other_mask.all():\n result[col] = this[col].copy()\n continue\n\n if do_fill:\n series = series.copy()\n otherSeries = otherSeries.copy()\n series[this_mask] = fill_value\n otherSeries[other_mask] = fill_value\n\n # if we have different dtypes, possibily promote\n new_dtype = this_dtype\n if this_dtype != other_dtype:\n new_dtype = com._lcd_dtypes(this_dtype, other_dtype)\n series = series.astype(new_dtype)\n otherSeries = otherSeries.astype(new_dtype)\n\n # see if we need to be represented as i8 (datetimelike)\n # try to keep us at this dtype\n needs_i8_conversion = com.needs_i8_conversion(new_dtype)\n if needs_i8_conversion:\n this_dtype = new_dtype\n arr = func(series, otherSeries, True)\n else:\n arr = func(series, otherSeries)\n\n if do_fill:\n arr = com.ensure_float(arr)\n arr[this_mask & other_mask] = NA\n\n # try to downcast back to the original dtype\n if needs_i8_conversion:\n arr = com._possibly_cast_to_datetime(arr, this_dtype)\n else:\n arr = com._possibly_downcast_to_dtype(arr, this_dtype)\n\n result[col] = arr\n\n # convert_objects just in case\n return self._constructor(result,\n index=new_index,\n columns=new_columns).convert_objects(\n convert_dates=True,\n copy=False)\n\n def combine_first(self, other):\n \"\"\"\n Combine two DataFrame objects and default to non-null values in frame\n calling the method. Result index columns will be the union of the\n respective indexes and columns\n\n Parameters\n ----------\n other : DataFrame\n\n Examples\n --------\n a's values prioritized, use values from b to fill holes:\n\n >>> a.combine_first(b)\n\n\n Returns\n -------\n combined : DataFrame\n \"\"\"\n def combiner(x, y, needs_i8_conversion=False):\n x_values = x.values if hasattr(x, 'values') else x\n y_values = y.values if hasattr(y, 'values') else y\n if needs_i8_conversion:\n mask = isnull(x)\n x_values = x_values.view('i8')\n y_values = y_values.view('i8')\n else:\n mask = isnull(x_values)\n\n return expressions.where(mask, y_values, x_values,\n raise_on_error=True)\n\n return self.combine(other, combiner, overwrite=False)\n\n def update(self, other, join='left', overwrite=True, filter_func=None,\n raise_conflict=False):\n \"\"\"\n Modify DataFrame in place using non-NA values from passed\n DataFrame. Aligns on indices\n\n Parameters\n ----------\n other : DataFrame, or object coercible into a DataFrame\n join : {'left'}, default 'left'\n overwrite : boolean, default True\n If True then overwrite values for common keys in the calling frame\n filter_func : callable(1d-array) -> 1d-array<boolean>, default None\n Can choose to replace values other than NA. Return True for values\n that should be updated\n raise_conflict : boolean\n If True, will raise an error if the DataFrame and other both\n contain data in the same place.\n \"\"\"\n # TODO: Support other joins\n if join != 'left': # pragma: no cover\n raise NotImplementedError(\"Only left join is supported\")\n\n if not isinstance(other, DataFrame):\n other = DataFrame(other)\n\n other = other.reindex_like(self)\n\n for col in self.columns:\n this = self[col].values\n that = other[col].values\n if filter_func is not None:\n mask = ~filter_func(this) | isnull(that)\n else:\n if raise_conflict:\n mask_this = notnull(that)\n mask_that = notnull(this)\n if any(mask_this & mask_that):\n raise ValueError(\"Data overlaps.\")\n\n if overwrite:\n mask = isnull(that)\n\n # don't overwrite columns unecessarily\n if mask.all():\n continue\n else:\n mask = notnull(this)\n\n self[col] = expressions.where(\n mask, this, that, raise_on_error=True)\n\n #----------------------------------------------------------------------\n # Misc methods\n\n def first_valid_index(self):\n \"\"\"\n Return label for first non-NA/null value\n \"\"\"\n return self.index[self.count(1) > 0][0]\n\n def last_valid_index(self):\n \"\"\"\n Return label for last non-NA/null value\n \"\"\"\n return self.index[self.count(1) > 0][-1]\n\n #----------------------------------------------------------------------\n # Data reshaping\n\n def pivot(self, index=None, columns=None, values=None):\n \"\"\"\n Reshape data (produce a \"pivot\" table) based on column values. Uses\n unique values from index / columns to form axes and return either\n DataFrame or Panel, depending on whether you request a single value\n column (DataFrame) or all columns (Panel)\n\n Parameters\n ----------\n index : string or object\n Column name to use to make new frame's index\n columns : string or object\n Column name to use to make new frame's columns\n values : string or object, optional\n Column name to use for populating new frame's values\n\n Notes\n -----\n For finer-tuned control, see hierarchical indexing documentation along\n with the related stack/unstack methods\n\n Examples\n --------\n >>> df\n foo bar baz\n 0 one A 1.\n 1 one B 2.\n 2 one C 3.\n 3 two A 4.\n 4 two B 5.\n 5 two C 6.\n\n >>> df.pivot('foo', 'bar', 'baz')\n A B C\n one 1 2 3\n two 4 5 6\n\n >>> df.pivot('foo', 'bar')['baz']\n A B C\n one 1 2 3\n two 4 5 6\n\n Returns\n -------\n pivoted : DataFrame\n If no values column specified, will have hierarchically indexed\n columns\n \"\"\"\n from pandas.core.reshape import pivot\n return pivot(self, index=index, columns=columns, values=values)\n\n def stack(self, level=-1, dropna=True):\n \"\"\"\n Pivot a level of the (possibly hierarchical) column labels, returning a\n DataFrame (or Series in the case of an object with a single level of\n column labels) having a hierarchical index with a new inner-most level\n of row labels.\n The level involved will automatically get sorted.\n\n Parameters\n ----------\n level : int, string, or list of these, default last level\n Level(s) to stack, can pass level name\n dropna : boolean, default True\n Whether to drop rows in the resulting Frame/Series with no valid\n values\n\n Examples\n ----------\n >>> s\n a b\n one 1. 2.\n two 3. 4.\n\n >>> s.stack()\n one a 1\n b 2\n two a 3\n b 4\n\n Returns\n -------\n stacked : DataFrame or Series\n \"\"\"\n from pandas.core.reshape import stack, stack_multiple\n\n if isinstance(level, (tuple, list)):\n return stack_multiple(self, level, dropna=dropna)\n else:\n return stack(self, level, dropna=dropna)\n\n def unstack(self, level=-1):\n \"\"\"\n Pivot a level of the (necessarily hierarchical) index labels, returning\n a DataFrame having a new level of column labels whose inner-most level\n consists of the pivoted index labels. If the index is not a MultiIndex,\n the output will be a Series (the analogue of stack when the columns are\n not a MultiIndex).\n The level involved will automatically get sorted.\n\n Parameters\n ----------\n level : int, string, or list of these, default -1 (last level)\n Level(s) of index to unstack, can pass level name\n\n See also\n --------\n DataFrame.pivot : Pivot a table based on column values.\n DataFrame.stack : Pivot a level of the column labels (inverse operation\n from `unstack`).\n\n Examples\n --------\n >>> index = pd.MultiIndex.from_tuples([('one', 'a'), ('one', 'b'),\n ... ('two', 'a'), ('two', 'b')])\n >>> s = pd.Series(np.arange(1.0, 5.0), index=index)\n >>> s\n one a 1\n b 2\n two a 3\n b 4\n dtype: float64\n\n >>> s.unstack(level=-1)\n a b\n one 1 2\n two 3 4\n\n >>> s.unstack(level=0)\n one two\n a 1 3\n b 2 4\n\n >>> df = s.unstack(level=0)\n >>> df.unstack()\n one a 1.\n b 3.\n two a 2.\n b 4.\n\n Returns\n -------\n unstacked : DataFrame or Series\n \"\"\"\n from pandas.core.reshape import unstack\n return unstack(self, level)\n\n #----------------------------------------------------------------------\n # Time series-related\n\n def diff(self, periods=1):\n \"\"\"\n 1st discrete difference of object\n\n Parameters\n ----------\n periods : int, default 1\n Periods to shift for forming difference\n\n Returns\n -------\n diffed : DataFrame\n \"\"\"\n new_data = self._data.diff(n=periods)\n return self._constructor(new_data)\n\n #----------------------------------------------------------------------\n # Function application\n\n def apply(self, func, axis=0, broadcast=False, raw=False, reduce=None,\n args=(), **kwds):\n \"\"\"\n Applies function along input axis of DataFrame.\n\n Objects passed to functions are Series objects having index\n either the DataFrame's index (axis=0) or the columns (axis=1).\n Return type depends on whether passed function aggregates, or the\n reduce argument if the DataFrame is empty.\n\n Parameters\n ----------\n func : function\n Function to apply to each column/row\n axis : {0, 1}\n * 0 : apply function to each column\n * 1 : apply function to each row\n broadcast : boolean, default False\n For aggregation functions, return object of same size with values\n propagated\n reduce : boolean or None, default None\n Try to apply reduction procedures. If the DataFrame is empty,\n apply will use reduce to determine whether the result should be a\n Series or a DataFrame. If reduce is None (the default), apply's\n return value will be guessed by calling func an empty Series (note:\n while guessing, exceptions raised by func will be ignored). If\n reduce is True a Series will always be returned, and if False a\n DataFrame will always be returned.\n raw : boolean, default False\n If False, convert each row or column into a Series. If raw=True the\n passed function will receive ndarray objects instead. If you are\n just applying a NumPy reduction function this will achieve much\n better performance\n args : tuple\n Positional arguments to pass to function in addition to the\n array/series\n Additional keyword arguments will be passed as keywords to the function\n\n Notes\n -----\n In the current implementation apply calls func twice on the\n first column/row to decide whether it can take a fast or slow\n code path. This can lead to unexpected behavior if func has\n side-effects, as they will take effect twice for the first\n column/row.\n\n Examples\n --------\n >>> df.apply(numpy.sqrt) # returns DataFrame\n >>> df.apply(numpy.sum, axis=0) # equiv to df.sum(0)\n >>> df.apply(numpy.sum, axis=1) # equiv to df.sum(1)\n\n See also\n --------\n DataFrame.applymap: For elementwise operations\n\n Returns\n -------\n applied : Series or DataFrame\n \"\"\"\n axis = self._get_axis_number(axis)\n if kwds or args and not isinstance(func, np.ufunc):\n f = lambda x: func(x, *args, **kwds)\n else:\n f = func\n\n if len(self.columns) == 0 and len(self.index) == 0:\n return self._apply_empty_result(func, axis, reduce, *args, **kwds)\n\n if isinstance(f, np.ufunc):\n results = f(self.values)\n return self._constructor(data=results, index=self.index,\n columns=self.columns, copy=False)\n else:\n if not broadcast:\n if not all(self.shape):\n return self._apply_empty_result(func, axis, reduce, *args,\n **kwds)\n\n if raw and not self._is_mixed_type:\n return self._apply_raw(f, axis)\n else:\n if reduce is None:\n reduce = True\n return self._apply_standard(f, axis, reduce=reduce)\n else:\n return self._apply_broadcast(f, axis)\n\n def _apply_empty_result(self, func, axis, reduce, *args, **kwds):\n if reduce is None:\n reduce = False\n try:\n reduce = not isinstance(func(_EMPTY_SERIES, *args, **kwds),\n Series)\n except Exception:\n pass\n\n if reduce:\n return Series(NA, index=self._get_agg_axis(axis))\n else:\n return self.copy()\n\n def _apply_raw(self, func, axis):\n try:\n result = lib.reduce(self.values, func, axis=axis)\n except Exception:\n result = np.apply_along_axis(func, axis, self.values)\n\n # TODO: mixed type case\n if result.ndim == 2:\n return DataFrame(result, index=self.index,\n columns=self.columns)\n else:\n return Series(result, index=self._get_agg_axis(axis))\n\n def _apply_standard(self, func, axis, ignore_failures=False, reduce=True):\n\n # skip if we are mixed datelike and trying reduce across axes\n # GH6125\n if reduce and axis==1 and self._is_mixed_type and self._is_datelike_mixed_type:\n reduce=False\n\n # try to reduce first (by default)\n # this only matters if the reduction in values is of different dtype\n # e.g. if we want to apply to a SparseFrame, then can't directly reduce\n if reduce:\n\n try:\n\n # the is the fast-path\n values = self.values\n dummy = Series(NA, index=self._get_axis(axis),\n dtype=values.dtype)\n\n labels = self._get_agg_axis(axis)\n result = lib.reduce(values, func, axis=axis, dummy=dummy,\n labels=labels)\n return Series(result, index=labels)\n except Exception:\n pass\n\n dtype = object if self._is_mixed_type else None\n if axis == 0:\n series_gen = (self.icol(i) for i in range(len(self.columns)))\n res_index = self.columns\n res_columns = self.index\n elif axis == 1:\n res_index = self.index\n res_columns = self.columns\n values = self.values\n series_gen = (Series.from_array(arr, index=res_columns, name=name, dtype=dtype)\n for i, (arr, name) in\n enumerate(zip(values, res_index)))\n else: # pragma : no cover\n raise AssertionError('Axis must be 0 or 1, got %s' % str(axis))\n\n i = None\n keys = []\n results = {}\n if ignore_failures:\n successes = []\n for i, v in enumerate(series_gen):\n try:\n results[i] = func(v)\n keys.append(v.name)\n successes.append(i)\n except Exception:\n pass\n # so will work with MultiIndex\n if len(successes) < len(res_index):\n res_index = res_index.take(successes)\n else:\n try:\n for i, v in enumerate(series_gen):\n results[i] = func(v)\n keys.append(v.name)\n except Exception as e:\n if hasattr(e, 'args'):\n # make sure i is defined\n if i is not None:\n k = res_index[i]\n e.args = e.args + ('occurred at index %s' %\n com.pprint_thing(k),)\n raise\n\n if len(results) > 0 and is_sequence(results[0]):\n if not isinstance(results[0], Series):\n index = res_columns\n else:\n index = None\n\n result = self._constructor(data=results, index=index)\n result.columns = res_index\n\n if axis == 1:\n result = result.T\n result = result.convert_objects(copy=False)\n\n else:\n\n result = Series(results)\n result.index = res_index\n\n return result\n\n def _apply_broadcast(self, func, axis):\n if axis == 0:\n target = self\n elif axis == 1:\n target = self.T\n else: # pragma: no cover\n raise AssertionError('Axis must be 0 or 1, got %s' % axis)\n\n result_values = np.empty_like(target.values)\n columns = target.columns\n for i, col in enumerate(columns):\n result_values[:, i] = func(target[col])\n\n result = self._constructor(result_values, index=target.index,\n columns=target.columns)\n\n if axis == 1:\n result = result.T\n\n return result\n\n def applymap(self, func):\n \"\"\"\n Apply a function to a DataFrame that is intended to operate\n elementwise, i.e. like doing map(func, series) for each series in the\n DataFrame\n\n Parameters\n ----------\n func : function\n Python function, returns a single value from a single value\n\n Returns\n -------\n applied : DataFrame\n\n See also\n --------\n DataFrame.apply : For operations on rows/columns\n\n \"\"\"\n\n # if we have a dtype == 'M8[ns]', provide boxed values\n def infer(x):\n if com.needs_i8_conversion(x):\n f = com.i8_boxer(x)\n x = lib.map_infer(_values_from_object(x), f)\n return lib.map_infer(_values_from_object(x), func)\n return self.apply(infer)\n\n #----------------------------------------------------------------------\n # Merging / joining methods\n\n def append(self, other, ignore_index=False, verify_integrity=False):\n \"\"\"\n Append rows of `other` to the end of this frame, returning a new\n object. Columns not in this frame are added as new columns.\n\n Parameters\n ----------\n other : DataFrame or Series/dict-like object, or list of these\n The data to append.\n ignore_index : boolean, default False\n If True, do not use the index labels.\n verify_integrity : boolean, default False\n If True, raise ValueError on creating index with duplicates.\n\n Returns\n -------\n appended : DataFrame\n\n Notes\n -----\n If a list of dict/series is passed and the keys are all contained in the\n DataFrame's index, the order of the columns in the resulting DataFrame\n will be unchanged.\n\n See also\n --------\n pandas.concat : General function to concatenate DataFrame, Series\n or Panel objects\n\n Examples\n --------\n\n >>> df = pd.DataFrame([[1, 2], [3, 4]], columns=list('AB'))\n >>> df\n A B\n 0 1 2\n 1 3 4\n >>> df2 = pd.DataFrame([[5, 6], [7, 8]], columns=list('AB'))\n >>> df.append(df2)\n A B\n 0 1 2\n 1 3 4\n 0 5 6\n 1 7 8\n\n With `ignore_index` set to True:\n\n >>> df.append(df2, ignore_index=True)\n A B\n 0 1 2\n 1 3 4\n 2 5 6\n 3 7 8\n\n \"\"\"\n if isinstance(other, (Series, dict)):\n if isinstance(other, dict):\n other = Series(other)\n if other.name is None and not ignore_index:\n raise TypeError('Can only append a Series if ignore_index=True'\n ' or if the Series has a name')\n\n index = None if other.name is None else [other.name]\n combined_columns = self.columns.tolist() + self.columns.union(other.index).difference(self.columns).tolist()\n other = other.reindex(combined_columns, copy=False)\n other = DataFrame(other.values.reshape((1, len(other))),\n index=index, columns=combined_columns).convert_objects()\n if not self.columns.equals(combined_columns):\n self = self.reindex(columns=combined_columns)\n elif isinstance(other, list) and not isinstance(other[0], DataFrame):\n other = DataFrame(other)\n if (self.columns.get_indexer(other.columns) >= 0).all():\n other = other.ix[:, self.columns]\n\n from pandas.tools.merge import concat\n if isinstance(other, (list, tuple)):\n to_concat = [self] + other\n else:\n to_concat = [self, other]\n return concat(to_concat, ignore_index=ignore_index,\n verify_integrity=verify_integrity)\n\n def join(self, other, on=None, how='left', lsuffix='', rsuffix='',\n sort=False):\n \"\"\"\n Join columns with other DataFrame either on index or on a key\n column. Efficiently Join multiple DataFrame objects by index at once by\n passing a list.\n\n Parameters\n ----------\n other : DataFrame, Series with name field set, or list of DataFrame\n Index should be similar to one of the columns in this one. If a\n Series is passed, its name attribute must be set, and that will be\n used as the column name in the resulting joined DataFrame\n on : column name, tuple/list of column names, or array-like\n Column(s) to use for joining, otherwise join on index. If multiples\n columns given, the passed DataFrame must have a MultiIndex. Can\n pass an array as the join key if not already contained in the\n calling DataFrame. Like an Excel VLOOKUP operation\n how : {'left', 'right', 'outer', 'inner'}\n How to handle indexes of the two objects. Default: 'left'\n for joining on index, None otherwise\n\n * left: use calling frame's index\n * right: use input frame's index\n * outer: form union of indexes\n * inner: use intersection of indexes\n lsuffix : string\n Suffix to use from left frame's overlapping columns\n rsuffix : string\n Suffix to use from right frame's overlapping columns\n sort : boolean, default False\n Order result DataFrame lexicographically by the join key. If False,\n preserves the index order of the calling (left) DataFrame\n\n Notes\n -----\n on, lsuffix, and rsuffix options are not supported when passing a list\n of DataFrame objects\n\n Returns\n -------\n joined : DataFrame\n \"\"\"\n # For SparseDataFrame's benefit\n return self._join_compat(other, on=on, how=how, lsuffix=lsuffix,\n rsuffix=rsuffix, sort=sort)\n\n def _join_compat(self, other, on=None, how='left', lsuffix='', rsuffix='',\n sort=False):\n from pandas.tools.merge import merge, concat\n\n if isinstance(other, Series):\n if other.name is None:\n raise ValueError('Other Series must have a name')\n other = DataFrame({other.name: other})\n\n if isinstance(other, DataFrame):\n return merge(self, other, left_on=on, how=how,\n left_index=on is None, right_index=True,\n suffixes=(lsuffix, rsuffix), sort=sort)\n else:\n if on is not None:\n raise ValueError('Joining multiple DataFrames only supported'\n ' for joining on index')\n\n # join indexes only using concat\n if how == 'left':\n how = 'outer'\n join_axes = [self.index]\n else:\n join_axes = None\n\n frames = [self] + list(other)\n\n can_concat = all(df.index.is_unique for df in frames)\n\n if can_concat:\n return concat(frames, axis=1, join=how, join_axes=join_axes,\n verify_integrity=True)\n\n joined = frames[0]\n\n for frame in frames[1:]:\n joined = merge(joined, frame, how=how,\n left_index=True, right_index=True)\n\n return joined\n\n @Substitution('')\n @Appender(_merge_doc, indents=2)\n def merge(self, right, how='inner', on=None, left_on=None, right_on=None,\n left_index=False, right_index=False, sort=False,\n suffixes=('_x', '_y'), copy=True):\n from pandas.tools.merge import merge\n return merge(self, right, how=how, on=on,\n left_on=left_on, right_on=right_on,\n left_index=left_index, right_index=right_index, sort=sort,\n suffixes=suffixes, copy=copy)\n\n #----------------------------------------------------------------------\n # Statistical methods, etc.\n\n def corr(self, method='pearson', min_periods=1):\n \"\"\"\n Compute pairwise correlation of columns, excluding NA/null values\n\n Parameters\n ----------\n method : {'pearson', 'kendall', 'spearman'}\n * pearson : standard correlation coefficient\n * kendall : Kendall Tau correlation coefficient\n * spearman : Spearman rank correlation\n min_periods : int, optional\n Minimum number of observations required per pair of columns\n to have a valid result. Currently only available for pearson\n and spearman correlation\n\n Returns\n -------\n y : DataFrame\n \"\"\"\n numeric_df = self._get_numeric_data()\n cols = numeric_df.columns\n mat = numeric_df.values\n\n if method == 'pearson':\n correl = _algos.nancorr(com._ensure_float64(mat),\n minp=min_periods)\n elif method == 'spearman':\n correl = _algos.nancorr_spearman(com._ensure_float64(mat),\n minp=min_periods)\n else:\n if min_periods is None:\n min_periods = 1\n mat = mat.T\n corrf = nanops.get_corr_func(method)\n K = len(cols)\n correl = np.empty((K, K), dtype=float)\n mask = np.isfinite(mat)\n for i, ac in enumerate(mat):\n for j, bc in enumerate(mat):\n valid = mask[i] & mask[j]\n if valid.sum() < min_periods:\n c = NA\n elif not valid.all():\n c = corrf(ac[valid], bc[valid])\n else:\n c = corrf(ac, bc)\n correl[i, j] = c\n correl[j, i] = c\n\n return self._constructor(correl, index=cols, columns=cols)\n\n def cov(self, min_periods=None):\n \"\"\"\n Compute pairwise covariance of columns, excluding NA/null values\n\n Parameters\n ----------\n min_periods : int, optional\n Minimum number of observations required per pair of columns\n to have a valid result.\n\n Returns\n -------\n y : DataFrame\n\n Notes\n -----\n `y` contains the covariance matrix of the DataFrame's time series.\n The covariance is normalized by N-1 (unbiased estimator).\n \"\"\"\n numeric_df = self._get_numeric_data()\n cols = numeric_df.columns\n mat = numeric_df.values\n\n if notnull(mat).all():\n if min_periods is not None and min_periods > len(mat):\n baseCov = np.empty((mat.shape[1], mat.shape[1]))\n baseCov.fill(np.nan)\n else:\n baseCov = np.cov(mat.T)\n baseCov = baseCov.reshape((len(cols), len(cols)))\n else:\n baseCov = _algos.nancorr(com._ensure_float64(mat), cov=True,\n minp=min_periods)\n\n return self._constructor(baseCov, index=cols, columns=cols)\n\n def corrwith(self, other, axis=0, drop=False):\n \"\"\"\n Compute pairwise correlation between rows or columns of two DataFrame\n objects.\n\n Parameters\n ----------\n other : DataFrame\n axis : {0, 1}\n 0 to compute column-wise, 1 for row-wise\n drop : boolean, default False\n Drop missing indices from result, default returns union of all\n\n Returns\n -------\n correls : Series\n \"\"\"\n axis = self._get_axis_number(axis)\n if isinstance(other, Series):\n return self.apply(other.corr, axis=axis)\n\n this = self._get_numeric_data()\n other = other._get_numeric_data()\n\n left, right = this.align(other, join='inner', copy=False)\n\n # mask missing values\n left = left + right * 0\n right = right + left * 0\n\n if axis == 1:\n left = left.T\n right = right.T\n\n # demeaned data\n ldem = left - left.mean()\n rdem = right - right.mean()\n\n num = (ldem * rdem).sum()\n dom = (left.count() - 1) * left.std() * right.std()\n\n correl = num / dom\n\n if not drop:\n raxis = 1 if axis == 0 else 0\n result_index = this._get_axis(raxis).union(other._get_axis(raxis))\n correl = correl.reindex(result_index)\n\n return correl\n\n #----------------------------------------------------------------------\n # ndarray-like stats methods\n\n def count(self, axis=0, level=None, numeric_only=False):\n \"\"\"\n Return Series with number of non-NA/null observations over requested\n axis. Works with non-floating point data as well (detects NaN and None)\n\n Parameters\n ----------\n axis : {0, 1}\n 0 for row-wise, 1 for column-wise\n level : int or level name, default None\n If the axis is a MultiIndex (hierarchical), count along a\n particular level, collapsing into a DataFrame\n numeric_only : boolean, default False\n Include only float, int, boolean data\n\n Returns\n -------\n count : Series (or DataFrame if level specified)\n \"\"\"\n axis = self._get_axis_number(axis)\n if level is not None:\n return self._count_level(level, axis=axis,\n numeric_only=numeric_only)\n\n if numeric_only:\n frame = self._get_numeric_data()\n else:\n frame = self\n\n # GH #423\n if len(frame._get_axis(axis)) == 0:\n result = Series(0, index=frame._get_agg_axis(axis))\n else:\n if frame._is_mixed_type:\n result = notnull(frame).sum(axis=axis)\n else:\n counts = notnull(frame.values).sum(axis=axis)\n result = Series(counts, index=frame._get_agg_axis(axis))\n\n return result.astype('int64')\n\n def _count_level(self, level, axis=0, numeric_only=False):\n if numeric_only:\n frame = self._get_numeric_data()\n else:\n frame = self\n\n count_axis = frame._get_axis(axis)\n agg_axis = frame._get_agg_axis(axis)\n\n if not isinstance(count_axis, MultiIndex):\n raise TypeError(\"Can only count levels on hierarchical %s.\" %\n self._get_axis_name(axis))\n\n if frame._is_mixed_type:\n # Since we have mixed types, calling notnull(frame.values) might\n # upcast everything to object\n mask = notnull(frame).values\n else:\n # But use the speedup when we have homogeneous dtypes\n mask = notnull(frame.values)\n\n if axis == 1:\n # We're transposing the mask rather than frame to avoid potential\n # upcasts to object, which induces a ~20x slowdown\n mask = mask.T\n\n if isinstance(level, compat.string_types):\n level = count_axis._get_level_number(level)\n\n level_index = count_axis.levels[level]\n labels = com._ensure_int64(count_axis.labels[level])\n counts = lib.count_level_2d(mask, labels, len(level_index))\n\n result = DataFrame(counts, index=level_index,\n columns=agg_axis)\n\n if axis == 1:\n # Undo our earlier transpose\n return result.T\n else:\n return result\n\n def _reduce(self, op, name, axis=0, skipna=True, numeric_only=None,\n filter_type=None, **kwds):\n axis = self._get_axis_number(axis)\n f = lambda x: op(x, axis=axis, skipna=skipna, **kwds)\n labels = self._get_agg_axis(axis)\n\n # exclude timedelta/datetime unless we are uniform types\n if axis == 1 and self._is_mixed_type and self._is_datelike_mixed_type:\n numeric_only = True\n\n if numeric_only is None:\n try:\n values = self.values\n result = f(values)\n except Exception as e:\n\n # try by-column first\n if filter_type is None and axis == 0:\n try:\n\n # this can end up with a non-reduction\n # but not always. if the types are mixed\n # with datelike then need to make sure a series\n result = self.apply(f,reduce=False)\n if result.ndim == self.ndim:\n result = result.iloc[0]\n return result\n except:\n pass\n\n if filter_type is None or filter_type == 'numeric':\n data = self._get_numeric_data()\n elif filter_type == 'bool':\n data = self._get_bool_data()\n else: # pragma: no cover\n e = NotImplementedError(\"Handling exception with filter_\"\n \"type %s not implemented.\"\n % filter_type)\n raise_with_traceback(e)\n result = f(data.values)\n labels = data._get_agg_axis(axis)\n else:\n if numeric_only:\n if filter_type is None or filter_type == 'numeric':\n data = self._get_numeric_data()\n elif filter_type == 'bool':\n data = self._get_bool_data()\n else: # pragma: no cover\n msg = (\"Generating numeric_only data with filter_type %s\"\n \"not supported.\" % filter_type)\n raise NotImplementedError(msg)\n values = data.values\n labels = data._get_agg_axis(axis)\n else:\n values = self.values\n result = f(values)\n\n if is_object_dtype(result.dtype):\n try:\n if filter_type is None or filter_type == 'numeric':\n result = result.astype(np.float64)\n elif filter_type == 'bool' and notnull(result).all():\n result = result.astype(np.bool_)\n except (ValueError, TypeError):\n\n # try to coerce to the original dtypes item by item if we can\n if axis == 0:\n result = com._coerce_to_dtypes(result, self.dtypes)\n\n return Series(result, index=labels)\n\n def idxmin(self, axis=0, skipna=True):\n \"\"\"\n Return index of first occurrence of minimum over requested axis.\n NA/null values are excluded.\n\n Parameters\n ----------\n axis : {0, 1}\n 0 for row-wise, 1 for column-wise\n skipna : boolean, default True\n Exclude NA/null values. If an entire row/column is NA, the result\n will be NA\n\n Returns\n -------\n idxmin : Series\n\n Notes\n -----\n This method is the DataFrame version of ``ndarray.argmin``.\n\n See Also\n --------\n Series.idxmin\n \"\"\"\n axis = self._get_axis_number(axis)\n indices = nanops.nanargmin(self.values, axis=axis, skipna=skipna)\n index = self._get_axis(axis)\n result = [index[i] if i >= 0 else NA for i in indices]\n return Series(result, index=self._get_agg_axis(axis))\n\n def idxmax(self, axis=0, skipna=True):\n \"\"\"\n Return index of first occurrence of maximum over requested axis.\n NA/null values are excluded.\n\n Parameters\n ----------\n axis : {0, 1}\n 0 for row-wise, 1 for column-wise\n skipna : boolean, default True\n Exclude NA/null values. If an entire row/column is NA, the result\n will be first index.\n\n Returns\n -------\n idxmax : Series\n\n Notes\n -----\n This method is the DataFrame version of ``ndarray.argmax``.\n\n See Also\n --------\n Series.idxmax\n \"\"\"\n axis = self._get_axis_number(axis)\n indices = nanops.nanargmax(self.values, axis=axis, skipna=skipna)\n index = self._get_axis(axis)\n result = [index[i] if i >= 0 else NA for i in indices]\n return Series(result, index=self._get_agg_axis(axis))\n\n def _get_agg_axis(self, axis_num):\n \"\"\" let's be explict about this \"\"\"\n if axis_num == 0:\n return self.columns\n elif axis_num == 1:\n return self.index\n else:\n raise ValueError('Axis must be 0 or 1 (got %r)' % axis_num)\n\n def mode(self, axis=0, numeric_only=False):\n \"\"\"\n Gets the mode(s) of each element along the axis selected. Empty if nothing\n has 2+ occurrences. Adds a row for each mode per label, fills in gaps\n with nan. \n \n Note that there could be multiple values returned for the selected\n axis (when more than one item share the maximum frequency), which is the \n reason why a dataframe is returned. If you want to impute missing values \n with the mode in a dataframe ``df``, you can just do this: \n ``df.fillna(df.mode().iloc[0])``\n\n Parameters\n ----------\n axis : {0, 1, 'index', 'columns'} (default 0)\n * 0/'index' : get mode of each column\n * 1/'columns' : get mode of each row\n numeric_only : boolean, default False\n if True, only apply to numeric columns\n\n Returns\n -------\n modes : DataFrame (sorted)\n\n Examples\n --------\n >>> df = pd.DataFrame({'A': [1, 2, 1, 2, 1, 2, 3]})\n >>> df.mode()\n A\n 0 1\n 1 2\n \"\"\"\n data = self if not numeric_only else self._get_numeric_data()\n f = lambda s: s.mode()\n return data.apply(f, axis=axis)\n\n def quantile(self, q=0.5, axis=0, numeric_only=True):\n \"\"\"\n Return values at the given quantile over requested axis, a la\n numpy.percentile.\n\n Parameters\n ----------\n q : float or array-like, default 0.5 (50% quantile)\n 0 <= q <= 1, the quantile(s) to compute\n axis : {0, 1}\n 0 for row-wise, 1 for column-wise\n\n Returns\n -------\n quantiles : Series or DataFrame\n If ``q`` is an array, a DataFrame will be returned where the\n index is ``q``, the columns are the columns of self, and the\n values are the quantiles.\n If ``q`` is a float, a Series will be returned where the\n index is the columns of self and the values are the quantiles.\n\n Examples\n --------\n\n >>> df = DataFrame(np.array([[1, 1], [2, 10], [3, 100], [4, 100]]),\n columns=['a', 'b'])\n >>> df.quantile(.1)\n a 1.3\n b 3.7\n dtype: float64\n >>> df.quantile([.1, .5])\n a b\n 0.1 1.3 3.7\n 0.5 2.5 55.0\n \"\"\"\n per = np.asarray(q) * 100\n\n if not com.is_list_like(per):\n per = [per]\n q = [q]\n squeeze = True\n else:\n squeeze = False\n\n def f(arr, per):\n if arr._is_datelike_mixed_type:\n values = _values_from_object(arr).view('i8')\n else:\n values = arr.astype(float)\n values = values[notnull(values)]\n if len(values) == 0:\n return NA\n else:\n return _quantile(values, per)\n\n data = self._get_numeric_data() if numeric_only else self\n if axis == 1:\n data = data.T\n\n # need to know which cols are timestamp going in so that we can\n # map timestamp over them after getting the quantile.\n is_dt_col = data.dtypes.map(com.is_datetime64_dtype)\n is_dt_col = is_dt_col[is_dt_col].index\n\n quantiles = [[f(vals, x) for x in per]\n for (_, vals) in data.iteritems()]\n result = DataFrame(quantiles, index=data._info_axis, columns=q).T\n if len(is_dt_col) > 0:\n result[is_dt_col] = result[is_dt_col].applymap(lib.Timestamp)\n if squeeze:\n if result.shape == (1, 1):\n result = result.T.iloc[:, 0] # don't want scalar\n else:\n result = result.T.squeeze()\n result.name = None # For groupby, so it can set an index name\n return result\n\n def rank(self, axis=0, numeric_only=None, method='average',\n na_option='keep', ascending=True, pct=False):\n \"\"\"\n Compute numerical data ranks (1 through n) along axis. Equal values are\n assigned a rank that is the average of the ranks of those values\n\n Parameters\n ----------\n axis : {0, 1}, default 0\n Ranks over columns (0) or rows (1)\n numeric_only : boolean, default None\n Include only float, int, boolean data\n method : {'average', 'min', 'max', 'first', 'dense'}\n * average: average rank of group\n * min: lowest rank in group\n * max: highest rank in group\n * first: ranks assigned in order they appear in the array\n * dense: like 'min', but rank always increases by 1 between groups\n na_option : {'keep', 'top', 'bottom'}\n * keep: leave NA values where they are\n * top: smallest rank if ascending\n * bottom: smallest rank if descending\n ascending : boolean, default True\n False for ranks by high (1) to low (N)\n pct : boolean, default False\n Computes percentage rank of data\n\n Returns\n -------\n ranks : DataFrame\n \"\"\"\n axis = self._get_axis_number(axis)\n if numeric_only is None:\n try:\n ranks = algos.rank(self.values, axis=axis, method=method,\n ascending=ascending, na_option=na_option,\n pct=pct)\n return self._constructor(ranks, index=self.index,\n columns=self.columns)\n except TypeError:\n numeric_only = True\n if numeric_only:\n data = self._get_numeric_data()\n else:\n data = self\n ranks = algos.rank(data.values, axis=axis, method=method,\n ascending=ascending, na_option=na_option, pct=pct)\n return self._constructor(ranks, index=data.index, columns=data.columns)\n\n def to_timestamp(self, freq=None, how='start', axis=0, copy=True):\n \"\"\"\n Cast to DatetimeIndex of timestamps, at *beginning* of period\n\n Parameters\n ----------\n freq : string, default frequency of PeriodIndex\n Desired frequency\n how : {'s', 'e', 'start', 'end'}\n Convention for converting period to timestamp; start of period\n vs. end\n axis : {0, 1} default 0\n The axis to convert (the index by default)\n copy : boolean, default True\n If false then underlying input data is not copied\n\n Returns\n -------\n df : DataFrame with DatetimeIndex\n \"\"\"\n new_data = self._data\n if copy:\n new_data = new_data.copy()\n\n axis = self._get_axis_number(axis)\n if axis == 0:\n new_data.set_axis(1, self.index.to_timestamp(freq=freq, how=how))\n elif axis == 1:\n new_data.set_axis(0, self.columns.to_timestamp(freq=freq, how=how))\n else: # pragma: no cover\n raise AssertionError('Axis must be 0 or 1. Got %s' % str(axis))\n\n return self._constructor(new_data)\n\n def to_period(self, freq=None, axis=0, copy=True):\n \"\"\"\n Convert DataFrame from DatetimeIndex to PeriodIndex with desired\n frequency (inferred from index if not passed)\n\n Parameters\n ----------\n freq : string, default\n axis : {0, 1}, default 0\n The axis to convert (the index by default)\n copy : boolean, default True\n If False then underlying input data is not copied\n\n Returns\n -------\n ts : TimeSeries with PeriodIndex\n \"\"\"\n new_data = self._data\n if copy:\n new_data = new_data.copy()\n\n axis = self._get_axis_number(axis)\n if axis == 0:\n new_data.set_axis(1, self.index.to_period(freq=freq))\n elif axis == 1:\n new_data.set_axis(0, self.columns.to_period(freq=freq))\n else: # pragma: no cover\n raise AssertionError('Axis must be 0 or 1. Got %s' % str(axis))\n\n return self._constructor(new_data)\n\n def isin(self, values):\n \"\"\"\n Return boolean DataFrame showing whether each element in the\n DataFrame is contained in values.\n\n Parameters\n ----------\n values : iterable, Series, DataFrame or dictionary\n The result will only be true at a location if all the\n labels match. If `values` is a Series, that's the index. If\n `values` is a dictionary, the keys must be the column names,\n which must match. If `values` is a DataFrame,\n then both the index and column labels must match.\n\n Returns\n -------\n\n DataFrame of booleans\n\n Examples\n --------\n When ``values`` is a list:\n\n >>> df = DataFrame({'A': [1, 2, 3], 'B': ['a', 'b', 'f']})\n >>> df.isin([1, 3, 12, 'a'])\n A B\n 0 True True\n 1 False False\n 2 True False\n\n When ``values`` is a dict:\n\n >>> df = DataFrame({'A': [1, 2, 3], 'B': [1, 4, 7]})\n >>> df.isin({'A': [1, 3], 'B': [4, 7, 12]})\n A B\n 0 True False # Note that B didn't match the 1 here.\n 1 False True\n 2 True True\n\n When ``values`` is a Series or DataFrame:\n\n >>> df = DataFrame({'A': [1, 2, 3], 'B': ['a', 'b', 'f']})\n >>> other = DataFrame({'A': [1, 3, 3, 2], 'B': ['e', 'f', 'f', 'e']})\n >>> df.isin(other)\n A B\n 0 True False\n 1 False False # Column A in `other` has a 3, but not at index 1.\n 2 True True\n \"\"\"\n if isinstance(values, dict):\n from collections import defaultdict\n from pandas.tools.merge import concat\n values = defaultdict(list, values)\n return concat((self.iloc[:, [i]].isin(values[col])\n for i, col in enumerate(self.columns)), axis=1)\n elif isinstance(values, Series):\n if not values.index.is_unique:\n raise ValueError(\"ValueError: cannot compute isin with\"\n \" a duplicate axis.\")\n return self.eq(values.reindex_like(self), axis='index')\n elif isinstance(values, DataFrame):\n if not (values.columns.is_unique and values.index.is_unique):\n raise ValueError(\"ValueError: cannot compute isin with\"\n \" a duplicate axis.\")\n return self.eq(values.reindex_like(self))\n else:\n if not is_list_like(values):\n raise TypeError(\"only list-like or dict-like objects are\"\n \" allowed to be passed to DataFrame.isin(), \"\n \"you passed a \"\n \"{0!r}\".format(type(values).__name__))\n return DataFrame(lib.ismember(self.values.ravel(),\n set(values)).reshape(self.shape),\n self.index,\n self.columns)\n\n #----------------------------------------------------------------------\n # Deprecated stuff\n\n def combineAdd(self, other):\n \"\"\"\n Add two DataFrame objects and do not propagate\n NaN values, so if for a (column, time) one frame is missing a\n value, it will default to the other frame's value (which might\n be NaN as well)\n\n Parameters\n ----------\n other : DataFrame\n\n Returns\n -------\n DataFrame\n \"\"\"\n return self.add(other, fill_value=0.)\n\n def combineMult(self, other):\n \"\"\"\n Multiply two DataFrame objects and do not propagate NaN values, so if\n for a (column, time) one frame is missing a value, it will default to\n the other frame's value (which might be NaN as well)\n\n Parameters\n ----------\n other : DataFrame\n\n Returns\n -------\n DataFrame\n \"\"\"\n return self.mul(other, fill_value=1.)\n\n\nDataFrame._setup_axes(['index', 'columns'], info_axis=1, stat_axis=0,\n axes_are_reversed=True, aliases={'rows': 0})\nDataFrame._add_numeric_operations()\n\n_EMPTY_SERIES = Series([])\n\ndef _arrays_to_mgr(arrays, arr_names, index, columns, dtype=None):\n \"\"\"\n Segregate Series based on type and coerce into matrices.\n Needs to handle a lot of exceptional cases.\n \"\"\"\n # figure out the index, if necessary\n if index is None:\n index = extract_index(arrays)\n else:\n index = _ensure_index(index)\n\n # don't force copy because getting jammed in an ndarray anyway\n arrays = _homogenize(arrays, index, dtype)\n\n # from BlockManager perspective\n axes = [_ensure_index(columns), _ensure_index(index)]\n\n return create_block_manager_from_arrays(arrays, arr_names, axes)\n\n\ndef extract_index(data):\n from pandas.core.index import _union_indexes\n\n index = None\n if len(data) == 0:\n index = Index([])\n elif len(data) > 0:\n raw_lengths = []\n indexes = []\n\n have_raw_arrays = False\n have_series = False\n have_dicts = False\n\n for v in data:\n if isinstance(v, Series):\n have_series = True\n indexes.append(v.index)\n elif isinstance(v, dict):\n have_dicts = True\n indexes.append(list(v.keys()))\n elif is_list_like(v) and getattr(v, 'ndim', 1) == 1:\n have_raw_arrays = True\n raw_lengths.append(len(v))\n\n if not indexes and not raw_lengths:\n raise ValueError('If using all scalar values, you must pass'\n ' an index')\n\n if have_series or have_dicts:\n index = _union_indexes(indexes)\n\n if have_raw_arrays:\n lengths = list(set(raw_lengths))\n if len(lengths) > 1:\n raise ValueError('arrays must all be same length')\n\n if have_dicts:\n raise ValueError('Mixing dicts with non-Series may lead to '\n 'ambiguous ordering.')\n\n if have_series:\n if lengths[0] != len(index):\n msg = ('array length %d does not match index length %d'\n % (lengths[0], len(index)))\n raise ValueError(msg)\n else:\n index = Index(np.arange(lengths[0]))\n\n return _ensure_index(index)\n\n\ndef _prep_ndarray(values, copy=True):\n if not isinstance(values, (np.ndarray, Series, Index)):\n if len(values) == 0:\n return np.empty((0, 0), dtype=object)\n\n def convert(v):\n return com._possibly_convert_platform(v)\n\n # we could have a 1-dim or 2-dim list here\n # this is equiv of np.asarray, but does object conversion\n # and platform dtype preservation\n try:\n if com.is_list_like(values[0]) or hasattr(values[0], 'len'):\n values = np.array([convert(v) for v in values])\n else:\n values = convert(values)\n except:\n values = convert(values)\n\n else:\n\n # drop subclass info, do not copy data\n values = np.asarray(values)\n if copy:\n values = values.copy()\n\n if values.ndim == 1:\n values = values.reshape((values.shape[0], 1))\n elif values.ndim != 2:\n raise ValueError('Must pass 2-d input')\n\n return values\n\n\ndef _to_arrays(data, columns, coerce_float=False, dtype=None):\n \"\"\"\n Return list of arrays, columns\n \"\"\"\n if isinstance(data, DataFrame):\n if columns is not None:\n arrays = [data.icol(i).values for i, col in enumerate(data.columns)\n if col in columns]\n else:\n columns = data.columns\n arrays = [data.icol(i).values for i in range(len(columns))]\n\n return arrays, columns\n\n if not len(data):\n if isinstance(data, np.ndarray):\n columns = data.dtype.names\n if columns is not None:\n return [[]] * len(columns), columns\n return [], [] # columns if columns is not None else []\n if isinstance(data[0], (list, tuple)):\n return _list_to_arrays(data, columns, coerce_float=coerce_float,\n dtype=dtype)\n elif isinstance(data[0], collections.Mapping):\n return _list_of_dict_to_arrays(data, columns,\n coerce_float=coerce_float,\n dtype=dtype)\n elif isinstance(data[0], Series):\n return _list_of_series_to_arrays(data, columns,\n coerce_float=coerce_float,\n dtype=dtype)\n elif isinstance(data[0], Categorical):\n if columns is None:\n columns = _default_index(len(data))\n return data, columns\n elif (isinstance(data, (np.ndarray, Series, Index))\n and data.dtype.names is not None):\n\n columns = list(data.dtype.names)\n arrays = [data[k] for k in columns]\n return arrays, columns\n else:\n # last ditch effort\n data = lmap(tuple, data)\n return _list_to_arrays(data, columns,\n coerce_float=coerce_float,\n dtype=dtype)\n\n\ndef _masked_rec_array_to_mgr(data, index, columns, dtype, copy):\n \"\"\" extract from a masked rec array and create the manager \"\"\"\n\n # essentially process a record array then fill it\n fill_value = data.fill_value\n fdata = ma.getdata(data)\n if index is None:\n index = _get_names_from_index(fdata)\n if index is None:\n index = _default_index(len(data))\n index = _ensure_index(index)\n\n if columns is not None:\n columns = _ensure_index(columns)\n arrays, arr_columns = _to_arrays(fdata, columns)\n\n # fill if needed\n new_arrays = []\n for fv, arr, col in zip(fill_value, arrays, arr_columns):\n mask = ma.getmaskarray(data[col])\n if mask.any():\n arr, fv = _maybe_upcast(arr, fill_value=fv, copy=True)\n arr[mask] = fv\n new_arrays.append(arr)\n\n # create the manager\n arrays, arr_columns = _reorder_arrays(new_arrays, arr_columns, columns)\n if columns is None:\n columns = arr_columns\n\n mgr = _arrays_to_mgr(arrays, arr_columns, index, columns)\n\n if copy:\n mgr = mgr.copy()\n return mgr\n\n\ndef _reorder_arrays(arrays, arr_columns, columns):\n # reorder according to the columns\n if (columns is not None and len(columns) and arr_columns is not None and\n len(arr_columns)):\n indexer = _ensure_index(\n arr_columns).get_indexer(columns)\n arr_columns = _ensure_index(\n [arr_columns[i] for i in indexer])\n arrays = [arrays[i] for i in indexer]\n return arrays, arr_columns\n\n\ndef _list_to_arrays(data, columns, coerce_float=False, dtype=None):\n if len(data) > 0 and isinstance(data[0], tuple):\n content = list(lib.to_object_array_tuples(data).T)\n else:\n # list of lists\n content = list(lib.to_object_array(data).T)\n return _convert_object_array(content, columns, dtype=dtype,\n coerce_float=coerce_float)\n\n\ndef _list_of_series_to_arrays(data, columns, coerce_float=False, dtype=None):\n from pandas.core.index import _get_combined_index\n\n if columns is None:\n columns = _get_combined_index([\n s.index for s in data if getattr(s, 'index', None) is not None\n ])\n\n indexer_cache = {}\n\n aligned_values = []\n for s in data:\n index = getattr(s, 'index', None)\n if index is None:\n index = _default_index(len(s))\n\n if id(index) in indexer_cache:\n indexer = indexer_cache[id(index)]\n else:\n indexer = indexer_cache[id(index)] = index.get_indexer(columns)\n\n values = _values_from_object(s)\n aligned_values.append(com.take_1d(values, indexer))\n\n values = np.vstack(aligned_values)\n\n if values.dtype == np.object_:\n content = list(values.T)\n return _convert_object_array(content, columns, dtype=dtype,\n coerce_float=coerce_float)\n else:\n return values.T, columns\n\n\ndef _list_of_dict_to_arrays(data, columns, coerce_float=False, dtype=None):\n if columns is None:\n gen = (list(x.keys()) for x in data)\n columns = lib.fast_unique_multiple_list_gen(gen)\n\n # assure that they are of the base dict class and not of derived\n # classes\n data = [(type(d) is dict) and d or dict(d) for d in data]\n\n content = list(lib.dicts_to_array(data, list(columns)).T)\n return _convert_object_array(content, columns, dtype=dtype,\n coerce_float=coerce_float)\n\n\ndef _convert_object_array(content, columns, coerce_float=False, dtype=None):\n if columns is None:\n columns = _default_index(len(content))\n else:\n if len(columns) != len(content): # pragma: no cover\n # caller's responsibility to check for this...\n raise AssertionError('%d columns passed, passed data had %s '\n 'columns' % (len(columns), len(content)))\n\n # provide soft conversion of object dtypes\n def convert(arr):\n if dtype != object and dtype != np.object:\n arr = lib.maybe_convert_objects(arr, try_float=coerce_float)\n arr = com._possibly_cast_to_datetime(arr, dtype)\n return arr\n\n arrays = [ convert(arr) for arr in content ]\n\n return arrays, columns\n\n\ndef _get_names_from_index(data):\n index = lrange(len(data))\n has_some_name = any([getattr(s, 'name', None) is not None for s in data])\n if not has_some_name:\n return index\n\n count = 0\n for i, s in enumerate(data):\n n = getattr(s, 'name', None)\n if n is not None:\n index[i] = n\n else:\n index[i] = 'Unnamed %d' % count\n count += 1\n\n return index\n\n\ndef _homogenize(data, index, dtype=None):\n from pandas.core.series import _sanitize_array\n\n oindex = None\n homogenized = []\n\n for v in data:\n if isinstance(v, Series):\n if dtype is not None:\n v = v.astype(dtype)\n if v.index is not index:\n # Forces alignment. No need to copy data since we\n # are putting it into an ndarray later\n v = v.reindex(index, copy=False)\n else:\n if isinstance(v, dict):\n if oindex is None:\n oindex = index.astype('O')\n if type(v) == dict:\n # fast cython method\n v = lib.fast_multiget(v, oindex.values, default=NA)\n else:\n v = lib.map_infer(oindex.values, v.get)\n\n v = _sanitize_array(v, index, dtype=dtype, copy=False,\n raise_cast_failure=False)\n\n homogenized.append(v)\n\n return homogenized\n\n\ndef _from_nested_dict(data):\n # TODO: this should be seriously cythonized\n new_data = OrderedDict()\n for index, s in compat.iteritems(data):\n for col, v in compat.iteritems(s):\n new_data[col] = new_data.get(col, OrderedDict())\n new_data[col][index] = v\n return new_data\n\n\ndef _put_str(s, space):\n return ('%s' % s)[:space].ljust(space)\n\n\n#----------------------------------------------------------------------\n# Add plotting methods to DataFrame\n\nimport pandas.tools.plotting as gfx\n\nDataFrame.plot = gfx.plot_frame\nDataFrame.hist = gfx.hist_frame\n\n\n@Appender(_shared_docs['boxplot'] % _shared_doc_kwargs)\ndef boxplot(self, column=None, by=None, ax=None, fontsize=None,\n rot=0, grid=True, figsize=None, layout=None, return_type=None,\n **kwds):\n import pandas.tools.plotting as plots\n import matplotlib.pyplot as plt\n ax = plots.boxplot(self, column=column, by=by, ax=ax,\n fontsize=fontsize, grid=grid, rot=rot,\n figsize=figsize, layout=layout, return_type=return_type,\n **kwds)\n plt.draw_if_interactive()\n return ax\n\nDataFrame.boxplot = boxplot\n\nops.add_flex_arithmetic_methods(DataFrame, **ops.frame_flex_funcs)\nops.add_special_arithmetic_methods(DataFrame, **ops.frame_special_funcs)\n\nif __name__ == '__main__':\n import nose\n nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],\n exit=False)\n"
] | [
[
"pandas.core.generic.NDFrame._set_item",
"pandas.core.internals.create_block_manager_from_arrays",
"numpy.asarray",
"pandas.core.indexing.convert_to_index_sliceable",
"pandas.lib.map_infer",
"pandas.util.decorators.deprecate_kwarg",
"pandas.core.format.DataFrameFormatter",
"pandas.lib.fast_unique_multiple_list_gen",
"pandas.core.common.is_list_like",
"pandas.core.reshape.unstack",
"numpy.ma.getdata",
"pandas.core.nanops.nanargmin",
"pandas.core.common.in_ipython_frontend",
"pandas.core.indexing.maybe_droplevels",
"pandas.core.groupby._nargsort",
"pandas.core.groupby.get_group_index",
"pandas.core.common._possibly_infer_to_datetimelike",
"pandas.core.indexing.check_bool_indexer",
"pandas.core.common.ensure_float",
"pandas.compat.OrderedDict",
"numpy.rec.fromarrays",
"numpy.array",
"pandas.core.common._coerce_to_dtypes",
"numpy.dot",
"pandas.core.common.in_interactive_session",
"pandas.core.reshape.stack",
"pandas.compat.iteritems",
"pandas.io.stata.StataWriter",
"pandas.core.algorithms.rank",
"pandas.core.common.take_1d",
"pandas.core.sparse.SparseDataFrame",
"numpy.cov",
"pandas.core.common._get_info_slice",
"numpy.vstack",
"pandas.core.series.Series.from_array",
"pandas.core.common.is_categorical_dtype",
"pandas.util.decorators.Appender",
"pandas.core.format.ExcelFormatter",
"pandas.computation.eval.eval",
"pandas.core.common.i8_boxer",
"pandas.compat.lzip",
"pandas.lib.reduce",
"pandas.tools.merge.concat",
"pandas.core.common._possibly_downcast_to_dtype",
"pandas.core.common.is_datetime64_dtype",
"pandas.core.format.get_console_size",
"pandas.core.reshape.pivot",
"pandas.core.common.notnull",
"pandas.lib.to_object_array_tuples",
"pandas.core.common._ensure_int64",
"pandas.computation.expressions.evaluate",
"pandas.lib.maybe_convert_objects",
"pandas.io.excel.ExcelWriter",
"pandas.core.common._maybe_upcast_putmask",
"pandas.core.common.isnull",
"pandas.core.common._default_index",
"pandas.compat.zip",
"numpy.apply_along_axis",
"pandas.core.reshape.stack_multiple",
"pandas.core.panel.Panel",
"pandas.core.format._put_lines",
"numpy.ma.getmaskarray",
"pandas.hashtable.duplicated_int64",
"pandas.core.index.MultiIndex.from_arrays",
"pandas.core.common.is_object_dtype",
"pandas.core.common.take_2d_multi",
"pandas.core.common._invalidate_string_dtypes",
"numpy.iterable",
"pandas.compat.StringIO",
"numpy.empty_like",
"pandas.core.common._unpickle_array",
"pandas.compat.raise_with_traceback",
"pandas.core.common.in_qtconsole",
"pandas.core.index._ensure_index",
"pandas.io.parsers.read_table",
"pandas.core.common._lcd_dtypes",
"pandas.core.series._sanitize_index",
"numpy.arange",
"pandas.compat.u",
"pandas.core.ops.add_flex_arithmetic_methods",
"pandas.computation.expressions.where",
"pandas.core.series.Series",
"pandas.lib.fast_multiget",
"pandas.core.index._union_indexes",
"pandas.compat.range",
"pandas.core.nanops.get_corr_func",
"pandas.core.internals.create_block_manager_from_blocks",
"pandas.core.common._ensure_platform_int",
"pandas.core.common.needs_i8_conversion",
"pandas.lib.to_object_array",
"pandas.core.nanops.nanargmax",
"pandas.io.gbq.to_gbq",
"pandas.core.common._asarray_tuplesafe",
"pandas.tools.merge.merge",
"numpy.isfinite",
"pandas.core.config.get_option",
"pandas.core.common.PandasError",
"pandas.core.common._values_from_object",
"matplotlib.pyplot.draw_if_interactive",
"pandas.core.common._maybe_box_datetimelike",
"pandas.core.common.is_sequence",
"pandas.core.generic.NDFrame.__init__",
"pandas.core.common._infer_dtype_from_scalar",
"pandas.core.groupby._lexsort_indexer",
"pandas.core.ops.add_special_arithmetic_methods",
"pandas.core.common._possibly_cast_to_datetime",
"pandas.core.common._try_sort",
"pandas.core.common.is_iterator",
"pandas.util.decorators.deprecate",
"pandas.core.common._possibly_convert_platform",
"pandas.core.common.is_bool_indexer",
"pandas.core.common._ensure_float64",
"numpy.percentile",
"numpy.compress",
"numpy.empty",
"pandas.core.common._maybe_upcast",
"pandas.compat.lmap",
"pandas.core.index.MultiIndex.from_tuples",
"pandas.util.decorators.Substitution",
"pandas.core.index.Index",
"pandas.core.series._sanitize_array",
"pandas.tools.plotting.boxplot",
"pandas.core.common.pprint_thing"
]
] |
AIDefender/MyMBPO | [
"d75699b65af8eea14acffc1b5738900d1079ad46"
] | [
"mbpo/static/reacher.py"
] | [
"import numpy as np\n\nclass StaticFns:\n\n @staticmethod\n def termination_fn(obs, act, next_obs):\n\n done = np.array([False]).repeat(len(obs))\n done = done[:,None]\n return done\n"
] | [
[
"numpy.array"
]
] |
colizoli/letter_color_mri | [
"f4c4d8a91aa17664bdeb16b0436fc8f8fdac2710"
] | [
"experiment/Behav_Consistency.py"
] | [
"\n#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\nLetter-color Consistency test\nO.Colizoli 2020\nEach letter of the alphabet in random order x 2\nColor wheel opens at a randomized color on each trial (but does not turn)\nPython 2..7\n\"\"\"\n# data saved in ~/LogFiles/sub-XXX\n\n# Import necessary modules\nimport random\nimport numpy as np\nimport pandas as pd\nimport os, time # for paths and data\nfrom IPython import embed as shell\ntry:\n import Tkinter as tk # py27\n from tkColorChooser import askcolor\nexcept:\n import tkinter as tk\n from tkinter.colorchooser import askcolor\n\n\n# Get subject number via tkinter (command line doesn't work in PsychoPy)\nsubject_ID = []\nsession = []\n## INPUT WINDOW\nclass GetInput():\n def __init__(self):\n self.root2 = tk.Tk()\n self.root2.title(\"Subject and Session\")\n # always put in same location\n w = 400 # width for the Tk root\n h = 200 # height for the Tk root\n # get screen width and height\n ws = self.root2.winfo_screenwidth() # width of the screen\n hs = self.root2.winfo_screenheight() # height of the screen\n # calculate x and y coordinates for the Tk root window\n x = (ws/6) - (w/6)\n y = (hs/6) - (h/6)\n self.root2.geometry('%dx%d+%d+%d' % (w, h, x, y))\n # Subject\n self.e = tk.Entry(self.root2)\n self.e.insert(0, 'Subject Number')\n self.e.pack()\n self.e.focus_set()\n # Session\n self.e2 = tk.Entry(self.root2)\n self.e2.insert(0, 'Session')\n self.e2.pack()\n self.e2.focus_set()\n \n txt='If each letter of the alphabet\\\n \\nwere to have a unique color,\\\n \\nwhat color would it have?\\\n \\n\\nThere are no right or wrong answers.'\n # instructions\n self.instr = tk.Label(self.root2, bg='white', text=txt, font=(\"Helvetica\", 14))\n self.instr.pack()\n \n b = tk.Button(self.root2,text='OK',command=self.get_input)\n b.pack(side='bottom')\n \n self.root2.mainloop()\n \n def get_input(self):\n subj_str = self.e.get() \n sess_str = self.e2.get()\n subject_ID.append(subj_str)\n session.append(sess_str)\n self.root2.destroy()\n \n## ASK INPUT\napp = GetInput() # subject and session\nsubject_ID = int(subject_ID[0])\nsession = int(session[0])\n\n## Create LogFile folder cwd/LogFiles\ncwd = os.getcwd()\nlogfile_dir = os.path.join(cwd,'LogFiles','sub-{}'.format(subject_ID),'sess-{}'.format(session),'behav') \nif not os.path.isdir(logfile_dir):\n os.makedirs(logfile_dir)\ntimestr = time.strftime(\"%Y%m%d-%H%M%S\") \noutput_alphabet = os.path.join(logfile_dir,'sub-{}_sess-{}_task-consistency_events_{}.tsv'.format(subject_ID,session,timestr))\n\n### CONSISTENCY TASK ###\nalphabet = ['a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z']\n#alphabet = ['a','b','c']\n\nREPS = 2 # number of times to repeat whole alphabet\n\nRGBS = [] # save output\nL = '2' # place holder \n\nclass Test():\n def __init__(self):\n self.counter = 1\n self.root = tk.Tk()\n self.root.title(\"Subject {} Session {}\".format(subject_ID, session))\n # always put in same location\n # get screen width and height\n ws = self.root.winfo_screenwidth() # width of the screen\n hs = self.root.winfo_screenheight() # height of the screen\n # open in full screen\n self.root.geometry('%dx%d+%d+%d' % (ws, hs, 0, 0))\n self.open1 = tk.Button(self.root, text='Pick a color:', command=self.pick_a_color, font=('Helvetica', '36'),padx=5, pady=5)\n self.open1.pack(fill=tk.X, expand=False) \n self.letter = tk.Label(self.root, bg='white', text=L, font=(\"Helvetica\", 90))\n self.letter.pack()\n self.root.mainloop()\n \n def quit(self):\n RGBS.append( [L ,self.RGB, self.HEX, abc] )\n self.root.destroy()\n \n def pick_a_color(self,): \n # GET COLOR CHOOSER NOT OPEN ON TOP OF ROOT\n self.RGB,self.HEX = askcolor((random.randint(0,255), random.randint(0,255), random.randint(0,255)), parent=None, title='Pick a color: {}'.format(L) )\n self.letter.configure(fg = self.HEX)\n if self.counter:\n exit_button = tk.Button(self.root, text='FINISHED', command=self.quit, font=('Helvetica', '28'))\n exit_button.pack()\n self.counter = 0\n self.root.mainloop()\n\n# MAIN LOOP \nabc = 1 # round\nfor R in np.arange(REPS):\n random.shuffle(alphabet) \n # Open a new GUI per letter \n for L in alphabet: \n app = Test()\n # save colors on each trial to prevent losing data\n \n DFS = pd.DataFrame(RGBS)\n print(RGBS)\n\n try:\n DFS.columns = [\"letter\",\"rgb\",\"hex\",\"choice\"]\n DFS['subject'] = np.repeat(subject_ID,len(DFS))\n DFS['r'] = [c[0] for c in DFS['rgb']]\n DFS['g'] = [c[1] for c in DFS['rgb']]\n DFS['b'] = [c[2] for c in DFS['rgb']]\n except:\n # clicked window away\n pass\n DFS.to_csv(output_alphabet, sep='\\t') # save all alphabet/preferences for both groups (also in case it goes wrong)\n abc+=1\n\n####################################\n## SAVE OUTPUT & determine conditions\nprint(RGBS)\nprint('consistency test - success!')\n\n\n##### OUTPUT FIGURE WITH COLORS #####\n# Sort and show letters x 2 side by side\ndel tk # py27\ndel askcolor\nimport matplotlib.pyplot as plt # doesn't work together with tkinter\nimport seaborn as sns\nfig = plt.figure(figsize=(10,5))\n\n# Sort so the same letters go side by side for each choice\ntry:\n DFS.sort_values(by=['choice', 'letter'],inplace=True)\nexcept:\n DFS = DFS.sort(['choice', 'letter'])\n\nDFS.reset_index(inplace=True)\nfor i,A in enumerate(alphabet):\n ax = fig.add_subplot(6,5,i+1)\n ax.text(0.5, 0.5, DFS['letter'][i], color=DFS['hex'][i],fontsize=18)\n ax.text(0.25, 0.5, DFS['letter'][i+len(alphabet)], color=DFS['hex'][i+len(alphabet)],fontsize=18)\n ax.set_axis_off() \n\nsns.despine(offset=10, trim=True)\nplt.tight_layout()\nfig.savefig(os.path.join(cwd,'LogFiles','sub-{}'.format(subject_ID),'sess-{}'.format(session),'behav','sub-{}_sess-{}_colors.pdf'.format(subject_ID,session)))\nprint('success: sub-{}_sess-{}_colors.pdf'.format(subject_ID,session))\n\n \n \n"
] | [
[
"numpy.arange",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.tight_layout",
"pandas.DataFrame"
]
] |
terraPulse/boreal-tcc-analysis | [
"e8a7b4bae727811d03bb57c5738945af7fe2920d"
] | [
"src/bin/create_esta_layers.py"
] | [
"'''\r\nFile: detect_forest_change.py\r\nAuthor: Min Feng\r\nVersion: 0.1\r\nCreate: 2018-04-20 15:42:37\r\nDescription: detect forest changes from foest probility layers and tree cover layers\r\n'''\r\n\r\nimport logging\r\n\r\ndef _load_tcc(f_tcc, msk):\r\n from gio import geo_raster_ex as gx\r\n from gio import config\r\n import numpy as np\r\n\r\n _bnd = gx.read_block(f_tcc, msk)\r\n if _bnd is None:\r\n return None\r\n \r\n _dat = np.zeros(msk.data.shape, dtype=np.uint8)\r\n\r\n _m_tcc = config.getfloat('conf', 'min_tcc')\r\n _idx = _bnd.data >= _m_tcc\r\n _dat[_idx] = 100\r\n\r\n _idx = _bnd.data > 100\r\n _dat[_idx] = _bnd.data[_idx]\r\n\r\n return msk.from_grid(_dat, nodata=255)\r\n\r\ndef _task(tile, d_out, d_ref, opts):\r\n from gio import file_unzip\r\n from gio import config\r\n from gio import file_mag\r\n from gio import metadata\r\n from gio import geo_raster as ge\r\n from gio import mod_filter\r\n import numpy as np\r\n import os\r\n import re\r\n\r\n _tag = tile.tag\r\n\r\n _ttt = config.get('conf', 'test_tile')\r\n if _ttt and _tag not in _ttt.replace(' ', '').split(','):\r\n return\r\n\r\n _m = re.match(r'(h\\d+)(v\\d+)', _tag)\r\n _h = _m.group(1)\r\n _v = _m.group(2)\r\n \r\n _d_out = os.path.join(d_out, _h, _v, _tag)\r\n _d_ref = os.path.join(d_ref, _h, _v, _tag)\r\n _f_met = os.path.join(_d_out, '%s_met.txt' % _tag)\r\n \r\n _fname = lambda t: os.path.join(_d_out, '%s_%s.tif' % (_tag, t))\r\n _fname_ref = lambda t: os.path.join(_d_ref, '%s_%s.tif' % (_tag, t))\r\n _fname_m1 = lambda t, a='_m1': _fname('%s_n0%s' % (t, a))\r\n\r\n # if not file_mag.get(_f_met).exists():\r\n # logging.info('skip non-existing result for %s' % _tag)\r\n # return\r\n\r\n if not file_mag.get(_fname_m1('loss_year')).exists():\r\n logging.info('skip non-existing result for %s' % _tag)\r\n return\r\n \r\n if (not _ttt) and file_mag.get(_fname_m1('esta_year')).exists() and \\\r\n (not config.getboolean('conf', 'over_write', False)):\r\n logging.info('skip processed esta result for %s' % _tag)\r\n return\r\n \r\n _b_loss_year = ge.open(_fname_m1('loss_year')).get_band().cache()\r\n _b_gain_year = ge.open(_fname_m1('gain_year')).get_band().cache()\r\n \r\n _b_loss_prob = ge.open(_fname_m1('loss_prob')).get_band().cache()\r\n _b_gain_prob = ge.open(_fname_m1('gain_prob')).get_band().cache()\r\n\r\n _f_tcc = config.get('conf', 'latest_tcc')\r\n _b_prob = _load_tcc(_f_tcc, _b_loss_year) if _f_tcc else ge.open(_fname_ref('age_prob')).get_band().cache()\r\n if _b_prob is None:\r\n logging.info('forced to use age_prob layer %s' % _fname_ref('age_prob'))\r\n _b_prob = ge.open(_fname_ref('age_prob')).get_band().cache()\r\n\r\n _d_forest_prob = _b_prob.data\r\n _d_loss = _b_loss_year.data\r\n _d_gain = _b_gain_year.data\r\n\r\n _d_esta = np.zeros(_d_forest_prob.shape, dtype=np.uint8)\r\n \r\n _d_prob = np.empty(_d_forest_prob.shape, dtype=np.float32)\r\n _d_prob.fill(100)\r\n _d_prob[_b_prob.data == _b_prob.nodata] = -9999\r\n \r\n _b_esta = _b_loss_year.from_grid(_d_esta, nodata=255)\r\n _b_esta.color_table = ge.load_colortable(config.get('conf', 'color'))\r\n\r\n _d_esta[_d_forest_prob > 100] = _d_forest_prob[_d_forest_prob > 100]\r\n \r\n for _y in range(1970, 2021):\r\n _y = _y - 1970\r\n \r\n _idx = _d_loss == _y\r\n _d_esta[_idx] = 100\r\n _d_prob[_idx] = _b_loss_prob.data[_idx]\r\n \r\n _idx = _d_gain == _y\r\n _d_esta[_idx] = _y\r\n _d_prob[_idx] = _b_gain_prob.data[_idx]\r\n \r\n _d_esta[_d_forest_prob < 50] = 100\r\n \r\n _d_test = (_d_esta < 100).astype(np.uint8)\r\n _d_test[(_d_esta < 100) & (_d_esta > 0)] = 1\r\n _b_test = _b_esta.from_grid(_d_test, nodata=255)\r\n mod_filter.filter_band_mmu(_b_test, area=config.getfloat('conf', 'min_patch'))\r\n _d_esta[(_d_esta == 100) & (_b_test.data == 1)] = 0\r\n \r\n _d_test = ((_d_esta > 0) & (_d_esta <= 100)).astype(np.uint8)\r\n _d_test[(_d_esta < 100) & (_d_esta > 0)] = 1\r\n _b_test = _b_esta.from_grid(_d_test, nodata=255)\r\n mod_filter.filter_band_mmu(_b_test, area=config.getfloat('conf', 'min_patch'))\r\n _d_esta[(_d_esta == 0) & (_b_test.data == 1)] = 100\r\n \r\n with file_unzip.file_unzip() as _zip:\r\n _zip.save(_b_esta, _fname_m1('esta_year'))\r\n _zip.save(_b_esta.from_grid(_d_prob, nodata=-9999), _fname_m1('esta_prob'))\r\n \r\n return True\r\n\r\ndef main(opts):\r\n import logging\r\n from gio import config\r\n from gio import file_mag\r\n from gio import global_task\r\n import os\r\n \r\n _d_inp = config.get('conf', 'input')\r\n _d_ref = config.get('conf', 'refer', _d_inp)\r\n \r\n _f_mak = file_mag.get(os.path.join(_d_inp, 'tasks.txt'))\r\n _ts = global_task.load(_f_mak)\r\n\r\n from gio import multi_task\r\n _rs = multi_task.run(_task, [(_t, os.path.join(_d_inp, 'data'), os.path.join(_d_ref, 'data'), opts) for _t in multi_task.load(_ts, opts)], opts)\r\n print('processed', len([_r for _r in _rs if _r]), 'tiles')\r\n\r\ndef usage():\r\n _p = environ_mag.usage(True)\r\n\r\n _p.add_argument('-i', '--input', dest='input')\r\n _p.add_argument('-r', '--refer', dest='refer')\r\n _p.add_argument('--latest-tcc', dest='latest_tcc')\r\n _p.add_argument('-w', '--over-write', dest='over_write', type='bool')\r\n _p.add_argument('--min-tcc', dest='min_tcc', type=int, default=30)\r\n _p.add_argument('-m', '--min-patch', dest='min_patch', type=float, default=100 * 100)\r\n _p.add_argument('--test-tile', dest='test_tile')\r\n\r\n return _p\r\n\r\nif __name__ == '__main__':\r\n from gio import environ_mag\r\n environ_mag.init_path()\r\n environ_mag.run(main, [environ_mag.config(usage())])\r\n"
] | [
[
"numpy.empty",
"numpy.zeros"
]
] |
DonaldWhyte/high-performance-data-processing-in-python | [
"f7f8076ff67d53be09e1d2f9988976e31b92f8e9"
] | [
"code/rolling_tests.py"
] | [
"import numpy as np\n\n\ndef _main():\n # Inputs\n n = 3\n x = np.arange(20, dtype=np.float64)\n\n # Slow average/std\n avg = np.zeros(len(x) - n + 1)\n std = np.zeros(len(x) - n + 1)\n for i in range(len(avg)):\n avg[i] = np.mean(x[i:i+n])\n std[i] = np.std(x[i:i+n])\n\n print('AVG')\n print('\\n'.join(str(x) for x in avg))\n print('STD:')\n print('\\n'.join(str(x) for x in std))\n\n # Fast std\n squares = np.square(x)\n sum_of_squares = np.convolve(squares, np.ones(n, dtype=int), 'valid')\n var_fast = (sum_of_squares / n) - np.square(avg)\n std_fast = np.sqrt(var_fast)\n\n print('STD FAST:')\n print('\\n'.join(str(x) for x in std_fast))\n\n\nif __name__ == '__main__':\n _main()\n"
] | [
[
"numpy.ones",
"numpy.arange",
"numpy.sqrt",
"numpy.std",
"numpy.square",
"numpy.mean"
]
] |
INK-USC/RiddleSense | [
"a3d57eaf084da9cf6b77692c608e2cd2870fbd97"
] | [
"methods/transformers/examples/deebert/src/modeling_highway_bert.py"
] | [
"import torch\r\nfrom torch import nn\r\nfrom torch.nn import CrossEntropyLoss, MSELoss\r\n\r\nfrom transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward\r\nfrom transformers.modeling_bert import (\r\n BERT_INPUTS_DOCSTRING,\r\n BERT_START_DOCSTRING,\r\n BertEmbeddings,\r\n BertLayer,\r\n BertPooler,\r\n BertPreTrainedModel,\r\n)\r\n\r\n\r\ndef entropy(x):\r\n \"\"\"Calculate entropy of a pre-softmax logit Tensor\"\"\"\r\n exp_x = torch.exp(x)\r\n A = torch.sum(exp_x, dim=1) # sum of exp(x_i)\r\n B = torch.sum(x * exp_x, dim=1) # sum of x_i * exp(x_i)\r\n return torch.log(A) - B / A\r\n\r\n\r\nclass DeeBertEncoder(nn.Module):\r\n def __init__(self, config):\r\n super().__init__()\r\n self.output_attentions = config.output_attentions\r\n self.output_hidden_states = config.output_hidden_states\r\n self.layer = nn.ModuleList([BertLayer(config) for _ in range(config.num_hidden_layers)])\r\n self.highway = nn.ModuleList([BertHighway(config) for _ in range(config.num_hidden_layers)])\r\n\r\n self.early_exit_entropy = [-1 for _ in range(config.num_hidden_layers)]\r\n\r\n def set_early_exit_entropy(self, x):\r\n if (type(x) is float) or (type(x) is int):\r\n for i in range(len(self.early_exit_entropy)):\r\n self.early_exit_entropy[i] = x\r\n else:\r\n self.early_exit_entropy = x\r\n\r\n def init_highway_pooler(self, pooler):\r\n loaded_model = pooler.state_dict()\r\n for highway in self.highway:\r\n for name, param in highway.pooler.state_dict().items():\r\n param.copy_(loaded_model[name])\r\n\r\n def forward(\r\n self,\r\n hidden_states,\r\n attention_mask=None,\r\n head_mask=None,\r\n encoder_hidden_states=None,\r\n encoder_attention_mask=None,\r\n ):\r\n all_hidden_states = ()\r\n all_attentions = ()\r\n all_highway_exits = ()\r\n for i, layer_module in enumerate(self.layer):\r\n if self.output_hidden_states:\r\n all_hidden_states = all_hidden_states + (hidden_states,)\r\n\r\n layer_outputs = layer_module(\r\n hidden_states, attention_mask, head_mask[i], encoder_hidden_states, encoder_attention_mask\r\n )\r\n hidden_states = layer_outputs[0]\r\n\r\n if self.output_attentions:\r\n all_attentions = all_attentions + (layer_outputs[1],)\r\n\r\n current_outputs = (hidden_states,)\r\n if self.output_hidden_states:\r\n current_outputs = current_outputs + (all_hidden_states,)\r\n if self.output_attentions:\r\n current_outputs = current_outputs + (all_attentions,)\r\n\r\n highway_exit = self.highway[i](current_outputs)\r\n # logits, pooled_output\r\n\r\n if not self.training:\r\n highway_logits = highway_exit[0]\r\n highway_entropy = entropy(highway_logits)\r\n highway_exit = highway_exit + (highway_entropy,) # logits, hidden_states(?), entropy\r\n all_highway_exits = all_highway_exits + (highway_exit,)\r\n\r\n if highway_entropy < self.early_exit_entropy[i]:\r\n new_output = (highway_logits,) + current_outputs[1:] + (all_highway_exits,)\r\n raise HighwayException(new_output, i + 1)\r\n else:\r\n all_highway_exits = all_highway_exits + (highway_exit,)\r\n\r\n # Add last layer\r\n if self.output_hidden_states:\r\n all_hidden_states = all_hidden_states + (hidden_states,)\r\n\r\n outputs = (hidden_states,)\r\n if self.output_hidden_states:\r\n outputs = outputs + (all_hidden_states,)\r\n if self.output_attentions:\r\n outputs = outputs + (all_attentions,)\r\n\r\n outputs = outputs + (all_highway_exits,)\r\n return outputs # last-layer hidden state, (all hidden states), (all attentions), all highway exits\r\n\r\n\r\n@add_start_docstrings(\r\n \"The Bert Model transformer with early exiting (DeeBERT). \",\r\n BERT_START_DOCSTRING,\r\n)\r\nclass DeeBertModel(BertPreTrainedModel):\r\n def __init__(self, config):\r\n super().__init__(config)\r\n self.config = config\r\n\r\n self.embeddings = BertEmbeddings(config)\r\n self.encoder = DeeBertEncoder(config)\r\n self.pooler = BertPooler(config)\r\n\r\n self.init_weights()\r\n\r\n def init_highway_pooler(self):\r\n self.encoder.init_highway_pooler(self.pooler)\r\n\r\n def get_input_embeddings(self):\r\n return self.embeddings.word_embeddings\r\n\r\n def set_input_embeddings(self, value):\r\n self.embeddings.word_embeddings = value\r\n\r\n def _prune_heads(self, heads_to_prune):\r\n \"\"\"Prunes heads of the model.\r\n heads_to_prune: dict of {layer_num: list of heads to prune in this layer}\r\n See base class PreTrainedModel\r\n \"\"\"\r\n for layer, heads in heads_to_prune.items():\r\n self.encoder.layer[layer].attention.prune_heads(heads)\r\n\r\n @add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING)\r\n def forward(\r\n self,\r\n input_ids=None,\r\n attention_mask=None,\r\n token_type_ids=None,\r\n position_ids=None,\r\n head_mask=None,\r\n inputs_embeds=None,\r\n encoder_hidden_states=None,\r\n encoder_attention_mask=None,\r\n ):\r\n r\"\"\"\r\n Return:\r\n :obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.BertConfig`) and inputs:\r\n last_hidden_state (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`):\r\n Sequence of hidden-states at the output of the last layer of the model.\r\n pooler_output (:obj:`torch.FloatTensor`: of shape :obj:`(batch_size, hidden_size)`):\r\n Last layer hidden-state of the first token of the sequence (classification token)\r\n further processed by a Linear layer and a Tanh activation function. The Linear\r\n layer weights are trained from the next sentence prediction (classification)\r\n objective during pre-training.\r\n\r\n This output is usually *not* a good summary\r\n of the semantic content of the input, you're often better with averaging or pooling\r\n the sequence of hidden-states for the whole input sequence.\r\n hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):\r\n Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)\r\n of shape :obj:`(batch_size, sequence_length, hidden_size)`.\r\n\r\n Hidden-states of the model at the output of each layer plus the initial embedding outputs.\r\n attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):\r\n Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape\r\n :obj:`(batch_size, num_heads, sequence_length, sequence_length)`.\r\n\r\n Attentions weights after the attention softmax, used to compute the weighted average in the self-attention\r\n heads.\r\n highway_exits (:obj:`tuple(tuple(torch.Tensor))`:\r\n Tuple of each early exit's results (total length: number of layers)\r\n Each tuple is again, a tuple of length 2 - the first entry is logits and the second entry is hidden states.\r\n \"\"\"\r\n if input_ids is not None and inputs_embeds is not None:\r\n raise ValueError(\"You cannot specify both input_ids and inputs_embeds at the same time\")\r\n elif input_ids is not None:\r\n input_shape = input_ids.size()\r\n elif inputs_embeds is not None:\r\n input_shape = inputs_embeds.size()[:-1]\r\n else:\r\n raise ValueError(\"You have to specify either input_ids or inputs_embeds\")\r\n\r\n device = input_ids.device if input_ids is not None else inputs_embeds.device\r\n\r\n if attention_mask is None:\r\n attention_mask = torch.ones(input_shape, device=device)\r\n if encoder_attention_mask is None:\r\n encoder_attention_mask = torch.ones(input_shape, device=device)\r\n if token_type_ids is None:\r\n token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)\r\n\r\n # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]\r\n # ourselves in which case we just need to make it broadcastable to all heads.\r\n extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape, device)\r\n\r\n # If a 2D ou 3D attention mask is provided for the cross-attention\r\n # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]\r\n if encoder_attention_mask.dim() == 3:\r\n encoder_extended_attention_mask = encoder_attention_mask[:, None, :, :]\r\n if encoder_attention_mask.dim() == 2:\r\n encoder_extended_attention_mask = encoder_attention_mask[:, None, None, :]\r\n\r\n encoder_extended_attention_mask = encoder_extended_attention_mask.to(\r\n dtype=next(self.parameters()).dtype\r\n ) # fp16 compatibility\r\n encoder_extended_attention_mask = (1.0 - encoder_extended_attention_mask) * -10000.0\r\n\r\n # Prepare head mask if needed\r\n # 1.0 in head_mask indicate we keep the head\r\n # attention_probs has shape bsz x n_heads x N x N\r\n # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]\r\n # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]\r\n head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)\r\n\r\n embedding_output = self.embeddings(\r\n input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds\r\n )\r\n encoder_outputs = self.encoder(\r\n embedding_output,\r\n attention_mask=extended_attention_mask,\r\n head_mask=head_mask,\r\n encoder_hidden_states=encoder_hidden_states,\r\n encoder_attention_mask=encoder_extended_attention_mask,\r\n )\r\n sequence_output = encoder_outputs[0]\r\n pooled_output = self.pooler(sequence_output)\r\n\r\n outputs = (sequence_output, pooled_output,) + encoder_outputs[\r\n 1:\r\n ] # add hidden_states and attentions if they are here\r\n return outputs # sequence_output, pooled_output, (hidden_states), (attentions), highway exits\r\n\r\n\r\nclass HighwayException(Exception):\r\n def __init__(self, message, exit_layer):\r\n self.message = message\r\n self.exit_layer = exit_layer # start from 1!\r\n\r\n\r\nclass BertHighway(nn.Module):\r\n \"\"\"A module to provide a shortcut\r\n from (the output of one non-final BertLayer in BertEncoder) to (cross-entropy computation in BertForSequenceClassification)\r\n \"\"\"\r\n\r\n def __init__(self, config):\r\n super().__init__()\r\n self.pooler = BertPooler(config)\r\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\r\n self.classifier = nn.Linear(config.hidden_size, config.num_labels)\r\n\r\n def forward(self, encoder_outputs):\r\n # Pooler\r\n pooler_input = encoder_outputs[0]\r\n pooler_output = self.pooler(pooler_input)\r\n # \"return\" pooler_output\r\n\r\n # BertModel\r\n bmodel_output = (pooler_input, pooler_output) + encoder_outputs[1:]\r\n # \"return\" bmodel_output\r\n\r\n # Dropout and classification\r\n pooled_output = bmodel_output[1]\r\n\r\n pooled_output = self.dropout(pooled_output)\r\n logits = self.classifier(pooled_output)\r\n\r\n return logits, pooled_output\r\n\r\n\r\n@add_start_docstrings(\r\n \"\"\"Bert Model (with early exiting - DeeBERT) with a classifier on top,\r\n also takes care of multi-layer training. \"\"\",\r\n BERT_START_DOCSTRING,\r\n)\r\nclass DeeBertForSequenceClassification(BertPreTrainedModel):\r\n def __init__(self, config):\r\n super().__init__(config)\r\n self.num_labels = config.num_labels\r\n self.num_layers = config.num_hidden_layers\r\n\r\n self.bert = DeeBertModel(config)\r\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\r\n self.classifier = nn.Linear(config.hidden_size, self.config.num_labels)\r\n\r\n self.init_weights()\r\n\r\n @add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING)\r\n def forward(\r\n self,\r\n input_ids=None,\r\n attention_mask=None,\r\n token_type_ids=None,\r\n position_ids=None,\r\n head_mask=None,\r\n inputs_embeds=None,\r\n labels=None,\r\n output_layer=-1,\r\n train_highway=False,\r\n ):\r\n r\"\"\"\r\n labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):\r\n Labels for computing the sequence classification/regression loss.\r\n Indices should be in :obj:`[0, ..., config.num_labels - 1]`.\r\n If :obj:`config.num_labels == 1` a regression loss is computed (Mean-Square loss),\r\n If :obj:`config.num_labels > 1` a classification loss is computed (Cross-Entropy).\r\n\r\n Returns:\r\n :obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.BertConfig`) and inputs:\r\n loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`label` is provided):\r\n Classification (or regression if config.num_labels==1) loss.\r\n logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, config.num_labels)`):\r\n Classification (or regression if config.num_labels==1) scores (before SoftMax).\r\n hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):\r\n Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)\r\n of shape :obj:`(batch_size, sequence_length, hidden_size)`.\r\n\r\n Hidden-states of the model at the output of each layer plus the initial embedding outputs.\r\n attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):\r\n Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape\r\n :obj:`(batch_size, num_heads, sequence_length, sequence_length)`.\r\n\r\n Attentions weights after the attention softmax, used to compute the weighted average in the self-attention\r\n heads.\r\n highway_exits (:obj:`tuple(tuple(torch.Tensor))`:\r\n Tuple of each early exit's results (total length: number of layers)\r\n Each tuple is again, a tuple of length 2 - the first entry is logits and the second entry is hidden states.\r\n \"\"\"\r\n\r\n exit_layer = self.num_layers\r\n try:\r\n outputs = self.bert(\r\n input_ids,\r\n attention_mask=attention_mask,\r\n token_type_ids=token_type_ids,\r\n position_ids=position_ids,\r\n head_mask=head_mask,\r\n inputs_embeds=inputs_embeds,\r\n )\r\n # sequence_output, pooled_output, (hidden_states), (attentions), highway exits\r\n\r\n pooled_output = outputs[1]\r\n\r\n pooled_output = self.dropout(pooled_output)\r\n logits = self.classifier(pooled_output)\r\n outputs = (logits,) + outputs[2:] # add hidden states and attention if they are here\r\n except HighwayException as e:\r\n outputs = e.message\r\n exit_layer = e.exit_layer\r\n logits = outputs[0]\r\n\r\n if not self.training:\r\n original_entropy = entropy(logits)\r\n highway_entropy = []\r\n highway_logits_all = []\r\n if labels is not None:\r\n if self.num_labels == 1:\r\n # We are doing regression\r\n loss_fct = MSELoss()\r\n loss = loss_fct(logits.view(-1), labels.view(-1))\r\n else:\r\n loss_fct = CrossEntropyLoss()\r\n loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))\r\n\r\n # work with highway exits\r\n highway_losses = []\r\n for highway_exit in outputs[-1]:\r\n highway_logits = highway_exit[0]\r\n if not self.training:\r\n highway_logits_all.append(highway_logits)\r\n highway_entropy.append(highway_exit[2])\r\n if self.num_labels == 1:\r\n # We are doing regression\r\n loss_fct = MSELoss()\r\n highway_loss = loss_fct(highway_logits.view(-1), labels.view(-1))\r\n else:\r\n loss_fct = CrossEntropyLoss()\r\n highway_loss = loss_fct(highway_logits.view(-1, self.num_labels), labels.view(-1))\r\n highway_losses.append(highway_loss)\r\n\r\n if train_highway:\r\n outputs = (sum(highway_losses[:-1]),) + outputs\r\n # exclude the final highway, of course\r\n else:\r\n outputs = (loss,) + outputs\r\n if not self.training:\r\n outputs = outputs + ((original_entropy, highway_entropy), exit_layer)\r\n if output_layer >= 0:\r\n outputs = (\r\n (outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]\r\n ) # use the highway of the last layer\r\n\r\n return outputs # (loss), logits, (hidden_states), (attentions), (highway_exits)\r\n"
] | [
[
"torch.sum",
"torch.ones",
"torch.nn.Linear",
"torch.nn.MSELoss",
"torch.exp",
"torch.nn.CrossEntropyLoss",
"torch.log",
"torch.zeros",
"torch.nn.Dropout"
]
] |
pailabteam/pailab | [
"3995b25f105827ae631e6120f380748d7d284c9f",
"3995b25f105827ae631e6120f380748d7d284c9f"
] | [
"pailab/tools/tree.py",
"pailab/analysis/tools_jupyter.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"This module contains all functions and classes for the MLTree. The MLTree buils a tree-like\nstructure of the objects in a given repository. This allows the user to access objects in a\ncomfortable way allowing for autocompletion (i.e. in Jupyter notebooks).\n\nTo use it one can simply call the :py:meth:`pailab.tools.tree.MLTree.add_tree` method to \nadd such a tree to the current repository::\n\n >>from pailab.tools.tree import MLTree\n >>MLTree.add_tree(ml_repo)\n\nAfter the tree has been added, one can simply use the tree. Here, using autocompletion makes the basic work wih repo objects quite simply.\nEach tree node provides useful functions that can be applied:\n\n- ``load`` loads the object of the given tree node or the child tree nodes of the current node. a\n After calling load the respective nodes have a new attribute ``obj`` that contains the respective loaded object. To load all objects belonging to the models subtree like \n parameters, evaluations or measures one can call::\n\n >> ml_repo.tree.models.load()\n\n- ``history`` lists the history of all objects of the respective subtree, where history excepts certain parameters such as a range of versions or \n which repo object information to include. To list th history of all training data just use::\n\n >> ml_repo.tree.training_data.history()\n\n- ``modifications`` lists all objects of the respective subtree that have been modified and no yet been committed.\n\nThere are also node dependent function (depending on what object the node represents).\n\"\"\"\nimport logging\nfrom numpy import load\nfrom deepdiff import DeepDiff\nfrom pailab.ml_repo.repo import MLObjectType, MLRepo\nfrom pailab.ml_repo.repo_objects import RepoInfoKey, DataSet # pylint: disable=E0401\nfrom pailab.ml_repo.repo_store import RepoStore # pylint: disable=E0401\nimport pailab.ml_repo.repo_store as repo_store\nimport pailab.ml_repo.repo_objects as repo_objects\nlogger = logging.getLogger(__name__)\n\n#region collections and items\n\n\n\nclass _RepoObjectItem:\n\n def __init__(self, name, ml_repo, repo_obj = None):\n self._name = name\n self._repo = ml_repo\n if repo_obj is not None:\n self.obj = repo_obj\n \n def _set(self, path, items):\n if len(path) > 0:\n if len(path) == 1:\n setattr(self, path[0], items[0])\n return\n if hasattr(self, path[0]):\n getattr(self, path[0])._set(path[1:], items[1:])\n else:\n setattr(self, path[0], items[0])\n items[0]._set(path[1:], items[1:])\n\n def load(self, version=repo_store.LAST_VERSION, full_object=False,\n modifier_versions=None, containing_str=None):\n \"\"\"Loads the object into the tree and stores it in obj member.\n \n Args:\n version (str, optional): The version of the object to be loaded. Defaults to repo_store.LAST_VERSION.\n full_object (bool, optional): If True, also the bigobject-members of the object will be loaded and stored. Defaults to False.\n modifier_versions (dict of str to str, optional): The version of the object that has been created with the objects \n and their respective versions defined in the dict will be loaded. Defaults to None.\n containing_str (str, optional): The object will only be loaded if the given string is contained in the objects \n name (intended for internal use). Defaults to None.\n \"\"\"\n if containing_str is None or containing_str in self._name:\n if self._repo is not None:\n self.obj = self._repo.get(self._name, version, full_object, modifier_versions, throw_error_not_exist = False)\n for v in self.__dict__.values():\n if hasattr(v,'load'):\n v.load(version, full_object, modifier_versions, containing_str)\n\n def modifications(self, commit=False, commit_message=''):\n result = {}\n if self._name is not None:\n try:\n if self._repo is not None:\n obj_orig = self._repo.get(\n self.obj.repo_info[RepoInfoKey.NAME], version=self.obj.repo_info[RepoInfoKey.VERSION])\n diff = DeepDiff(obj_orig, self.obj,\n ignore_order=True)\n except AttributeError:\n return None\n if len(diff) == 0:\n return None\n else:\n if commit and (self._repo is not None):\n version = self._repo.add(\n self.obj, message=commit_message)\n self.obj = self._repo.get(self._name, version=version)\n result = {self._name: diff}\n for v in self.__dict__.values():\n if hasattr(v, 'modifications'):\n tmp = v.modifications(commit, commit_message)\n if tmp is not None:\n result.update(tmp)\n return result\n\n def history(self, version = (repo_store.FIRST_VERSION,repo_store.LAST_VERSION), \n repo_info = [RepoInfoKey.NAME, RepoInfoKey.AUTHOR, RepoInfoKey.COMMIT_DATE, RepoInfoKey.COMMIT_MESSAGE], \n obj_data = []):\n history = []\n if self._repo is not None:\n history = self._repo.get(self._name, version = version, throw_error_not_exist=False)\n if not isinstance(history, list):\n history = [history]\n result = {}\n tmp = []\n for h in history:\n r = {}\n for r_info in repo_info:\n r[str(r_info)] = h.repo_info[r_info]\n for o_info in obj_data:\n r[o_info] = obj_data.__dict__[o_info]\n tmp.append(r)\n result[self._name] = tmp\n for v in self.__dict__.values():\n if isinstance(v, _RepoObjectItem):\n tmp2 = v.history(version, repo_info, obj_data)\n if tmp2 is not None:\n result.update(tmp2)\n if len(result) > 0:\n return result\n \n\n def __call__(self, containing_str=None):\n # if len(self.__dict__) == 1:\n if containing_str is not None:\n result = []\n if containing_str in self._name:\n result.append(self._name)\n for v in self.__dict__.values():\n if isinstance(v, _RepoObjectItem):\n d = v(containing_str)\n if isinstance(d, str):\n result.append(d)\n else:\n result.extend(d)\n return [x for x in result if containing_str in x]\n else:\n return self._name\n return result\n\n\nclass _RawDataItem(_RepoObjectItem):\n def __init__(self, name, ml_repo, repo_obj = None):\n super(_RawDataItem,self).__init__(name, ml_repo, repo_obj)\n\n def append(self, x_data, y_data = None):\n \"\"\"Append data to a RawData object\n\n It appends data to the given RawData object and updates all training and test DataSets which implicitely changed by this update.\n\n Args:\n name (string): name of RawData object\n x_data (numpy matrix): the x_data to append\n y_data (numpy matrix, optional): Defaults to None. The y_data to append\n \n Raises:\n Exception: If the data is not consistent to the RawData (e.g. different number of x-coordinates) it throws an exception.\n \"\"\"\n logger.info('Start appending ' + str(x_data.shape[0]) + ' datapoints to RawData' + self._name)\n raw_data = self._repo.get(self._name)\n if len(raw_data.x_coord_names) != x_data.shape[1]:\n raise Exception('Number of columns of x_data of RawData object is not equal to number of columns of additional x_data.')\n if raw_data.y_coord_names is None and y_data is not None:\n raise Exception('RawData object does not contain y_data but y_data is given')\n if raw_data.y_coord_names is not None:\n if y_data is None:\n raise Exception('RawData object has y_data but no y_data is given')\n if y_data.shape[1] != len(raw_data.y_coord_names ):\n raise Exception('Number of columns of y_data of RawData object is not equal to number of columns of additional y_data.')\n numpy_dict = {'x_data' : x_data}\n if raw_data.y_coord_names is not None:\n numpy_dict['y_data'] = y_data\n raw_data.n_data += x_data.shape[0]\n old_version = raw_data.repo_info[RepoInfoKey.VERSION]\n new_version = self._repo.add(raw_data)\n self._repo._numpy_repo.append(self._name, old_version, new_version, numpy_dict)\n # now find all datasets which are affected by the updated data\n changed_data_sets = []\n training_data = self._repo.get_training_data(full_object = False)\n if isinstance(training_data, DataSet):\n if training_data.raw_data == self._name and training_data.raw_data_version == repo_store.RepoStore.LAST_VERSION:\n if training_data.end_index is None or training_data.end_index < 0:\n training_data.raw_data_version = new_version\n changed_data_sets.append(training_data)\n test_data = self._repo.get_names(MLObjectType.TEST_DATA)\n for d in test_data:\n data = self._repo.get(d)\n if isinstance(data, DataSet):\n if data.raw_data == self._name and data.raw_data_version == repo_store.RepoStore.LAST_VERSION:\n if data.end_index is None or data.end_index < 0:\n data.raw_data_version = new_version\n changed_data_sets.append(data)\n self._repo.add(changed_data_sets, 'RawData ' + self._name + ' updated, add DataSets depending om the updated RawData.')\n if hasattr(self, 'obj'):#update current object\n self.obj = self._repo.get(self._name, version=new_version)\n logger.info('Finished appending data to RawData' + self._name)\n\nclass _RawDataCollection(_RepoObjectItem):\n @staticmethod\n def __get_name_from_path(path):\n return path.split('/')[-1]\n\n def __init__(self, repo):\n super(_RawDataCollection, self).__init__('raw_data', repo)\n names = repo.get_names(MLObjectType.RAW_DATA)\n for n in names:\n setattr(self, _RawDataCollection.__get_name_from_path(n), _RawDataItem(n, repo))\n \n def add(self, name, data, input_variables = None, target_variables = None):\n \"\"\"Add raw data to the repository\n\n Arguments:\n data_name {name of data} -- the name of the data added\n data {pandas DataFrame} -- the data as pandas datatable\n input_variables {str or iterable of str} -- column name or iterable of column names defining the input variables of the given data\n target_variables {str or iterable of str} -- column name or iterable of column names defining the target variables of the given data\n \n Keyword Arguments:\n input_variables {list of strings} -- list of column names defining the input variables for the machine learning (default: {None}). If None, all variables are used as input\n target_variables {list of strings} -- list of column names defining the target variables for the machine learning (default: {None}). If None, no target data is added from the table.\n \"\"\"\n path = 'raw_data/' + name\n\n if input_variables is None:\n input_variables = list(data)\n if not target_variables is None:\n [input_variables.remove(x) for x in target_variables]\n else:\n if isinstance(input_variables, str):\n input_variables = [input_variables]\n # check whether the input_variables are included in the data\n if not [item for item in input_variables if item in list(data)] == list(input_variables):\n raise Exception('RawData does not include at least one column included in input_variables')\n \n if target_variables is not None:\n if isinstance(target_variables, str):\n target_variables = [target_variables]\n # check if target variables are in list\n if not [item for item in target_variables if item in list(data)] == list(target_variables):\n raise Exception('RawData does not include at least one column included in target_variables')\n raw_data = repo_objects.RawData(data.loc[:, input_variables].values, input_variables, data.loc[:, target_variables].values, \n target_variables, repo_info = {RepoInfoKey.NAME: path})\n else:\n raw_data = repo_objects.RawData(data.loc[:, input_variables].values, input_variables, repo_info = {RepoInfoKey.NAME: path})\n v = self._repo.add(raw_data, 'data ' + path + ' added to repository' , category = MLObjectType.RAW_DATA)\n obj = self._repo.get(path, version=v, full_object = False)\n setattr(self, name, _RawDataItem(path, self._repo, obj))\n\n def add_from_numpy_file(self, name, filename_X, x_names, filename_Y=None, y_names = None):\n path = name\n X = load(filename_X)\n Y = None\n if filename_Y is not None:\n Y = load(filename_Y)\n raw_data = repo_objects.RawData(X, x_names, Y, y_names, repo_info = {RepoInfoKey.NAME: path})\n v = self._repo.add(raw_data, 'data ' + path + ' added to repository' , category = MLObjectType.RAW_DATA)\n obj = self._repo.get(path, version=v, full_object = False)\n setattr(self, name, _RawDataItem(path, self._repo, obj))\n\nclass _TrainingDataCollection(_RepoObjectItem):\n\n @staticmethod\n def __get_name_from_path(path):\n return path.split('/')[-1]\n \n def __init__(self, repo):\n super(_TrainingDataCollection, self).__init__('training_data', None)\n self.__repo = repo # we store ml_repo in __repo to circumvent that obj is loaded from eneric base class\n names = repo.get_names(MLObjectType.TRAINING_DATA)\n for n in names:\n setattr(self, _TrainingDataCollection.__get_name_from_path(n), _RepoObjectItem(n, repo))\n \n def add(self, name, raw_data, start_index=0, \n end_index=None, raw_data_version='last'):\n #path = 'training_data/' + name\n data_set = repo_objects.DataSet(raw_data, start_index, end_index, \n raw_data_version, repo_info = {RepoInfoKey.NAME: name, RepoInfoKey.CATEGORY: MLObjectType.TRAINING_DATA})\n v = self.__repo.add(data_set)\n tmp = self.__repo.get(name, version=v)\n item = _RepoObjectItem(name, self.__repo, tmp)\n setattr(self, name, item)\n\nclass _TestDataCollection(_RepoObjectItem):\n @staticmethod\n def __get_name_from_path(path):\n return path.split('/')[-1]\n \n def __init__(self, repo):\n super(_TestDataCollection, self).__init__('test_data', None)\n self.__repo = repo # we store ml_repo in __repo to circumvent that obj is loaded from eneric base class\n names = repo.get_names(MLObjectType.TEST_DATA)\n for n in names:\n setattr(self, _TestDataCollection.__get_name_from_path(n), _RepoObjectItem(n,repo))\n \n def add(self, name, raw_data, start_index=0, \n end_index=None, raw_data_version='last'):\n data_set = repo_objects.DataSet(raw_data, start_index, end_index, \n raw_data_version, repo_info = {RepoInfoKey.NAME: name, RepoInfoKey.CATEGORY: MLObjectType.TEST_DATA})\n v = self.__repo.add(data_set)\n tmp = self.__repo.get(name, version=v)\n item = _RepoObjectItem(name, self.__repo, tmp)\n setattr(self, name, item)\n\nclass _MeasureItem(_RepoObjectItem):\n def __init__(self, name, ml_repo, repo_obj = None):\n super(_MeasureItem, self).__init__(name, ml_repo, repo_obj) \n\nclass _JobItem(_RepoObjectItem):\n def __init__(self, name, ml_repo, repo_obj = None):\n super(_JobItem, self).__init__(name, ml_repo, repo_obj) \n\nclass _MeasureCollection(_RepoObjectItem):\n def __init__(self, name, ml_repo):\n super(_MeasureCollection, self).__init__('measures', None)\n names = ml_repo.get_names(MLObjectType.MEASURE)\n for n in names:\n path = n.split('/')[2:]\n items = [None] * len(path)\n for i in range(len(items)-1):\n items[i] = _RepoObjectItem(path[i], None)\n items[-1] = _MeasureItem(n, ml_repo)\n self._set(path, items)\n #items[-2] = MeasuresOnDataItem\n\nclass _EvalCollection(_RepoObjectItem):\n def __init__(self, name, ml_repo):\n super(_EvalCollection, self).__init__('eval', None)\n names = ml_repo.get_names(MLObjectType.EVAL_DATA)\n for n in names:\n path = n.split('/')[2:]\n items = [None] * len(path)\n for i in range(len(items)-1):\n items[i] = _RepoObjectItem(path[i], None)\n items[-1] = _MeasureItem(n, ml_repo)\n self._set(path, items)\n\nclass _TestCollection(_RepoObjectItem):\n def __init__(self, name, ml_repo):\n super(_TestCollection, self).__init__('tests', None)\n names = ml_repo.get_names(MLObjectType.TEST)\n for n in names:\n path = n.split('/')[2:]\n items = [None] * len(path)\n for i in range(len(items)-1):\n items[i] = _RepoObjectItem(path[i], None)\n items[-1] = _RepoObjectItem(n, ml_repo)\n self._set(path, items)\n\nclass _JobCollection(_RepoObjectItem):\n def __init__(self, name, ml_repo, model_name):\n super(_JobCollection, self).__init__('jobs', None)\n names = ml_repo.get_names(MLObjectType.JOB)\n for n in names:\n if model_name in n:\n path = n.split('/')\n path = path[path.index('jobs')+1:]\n items = [None] * len(path)\n for i in range(len(items)-1):\n items[i] = _RepoObjectItem(path[i], None)\n items[-1] = _JobItem(n, ml_repo)\n self._set(path, items)\n\nclass _ModelItem(_RepoObjectItem):\n def __init__(self, name, ml_repo, repo_obj = None):\n super(_ModelItem,self).__init__(name, ml_repo, repo_obj)\n self.model = _RepoObjectItem(name + '/model', ml_repo)\n self.eval = _EvalCollection(name + '/eval', ml_repo)\n self.model_param = _RepoObjectItem(name + '/model_param', ml_repo)\n self.tests = _TestCollection(name + '/tests', ml_repo)\n self.measures = _MeasureCollection(name+ '/measure', ml_repo)\n self.jobs = _JobCollection(name+'/jobs', ml_repo, name)\n if ml_repo._object_exists(name+'/training_stat'):\n self.training_statistic = _RepoObjectItem(name+'/training_stat', ml_repo)\n if ml_repo._object_exists(name+'/training_param'):\n self.training_param = _RepoObjectItem(name + '/training_param', ml_repo)\n\n\n def set_label(self, label_name, version = repo_store.RepoStore.LAST_VERSION, message=''):\n self._repo.set_label(label_name, self._name+ '/model', version, message)\n\nclass _LabelCollection(_RepoObjectItem):\n def __init__(self, repo):\n super(_LabelCollection,self).__init__('labels', None)\n names = repo.get_names(MLObjectType.LABEL)\n for n in names:\n #label = ml_repo.get()\n setattr(self, n, _RepoObjectItem(n, repo))\n \nclass _ModelCollection(_RepoObjectItem):\n @staticmethod\n def __get_name_from_path(name):\n return name\n\n def __init__(self, repo):\n super(_ModelCollection,self).__init__('models', None)\n names = repo.get_names(MLObjectType.MODEL)\n for n in names:\n setattr(self, _ModelCollection.__get_name_from_path(n), _ModelItem(n, repo))\n self.labels = _LabelCollection(repo)\n \n def add(self, name):\n setattr(self, name, _ModelItem(name,self._repo))\n\n\n\nclass _CacheDataCollection(_RepoObjectItem):\n\n @staticmethod\n def __get_name_from_path(path):\n return path.split('/')[-1]\n \n def __init__(self, repo):\n super(_CacheDataCollection, self).__init__('cache', None)\n self.__repo = repo # we store ml_repo in __repo to circumvent that obj is loaded from eneric base class\n names = repo.get_names(MLObjectType.CACHED_VALUE)\n for n in names:\n setattr(self, _CacheDataCollection.__get_name_from_path(n), _RepoObjectItem(n, repo))\n#endregion\n\n\n\nclass MLTree:\n\n @staticmethod\n def add_tree(ml_repo):\n \"\"\"Adds an MLTree to a repository.\n\n Args:\n ml_repo (MLRepo): the repository the tre is added\n \"\"\"\n setattr(ml_repo, 'tree', MLTree(ml_repo))\n ml_repo._add_triggers.append(ml_repo.tree.reload)\n \n def __create(self):\n self.raw_data = _RawDataCollection(self.__ml_repo)\n self.training_data = _TrainingDataCollection(self.__ml_repo)\n self.test_data = _TestDataCollection(self.__ml_repo)\n self.models = _ModelCollection(self.__ml_repo)\n self.cache = _CacheDataCollection(self.__ml_repo)\n\n def __init__(self, ml_repo):\n self.__ml_repo = ml_repo\n self.__create()\n\n def reload(self, **kwargs):\n \"\"\"Method to reload the tree after objects have been added or deleted from the repository.\n \"\"\"\n self.__create() # todo make this more efficient by just updating collections and items which are affected by this\n\n def modifications(self):\n \"\"\"Return a dictionary of all objects that were modified but no yet \n commited to the repository.\n \n Returns:\n dict: dictionary mapping object ids to dictionary of the modified attributes \n \"\"\"\n result = {}\n tmp = self.raw_data.modifications()\n if tmp is not None:\n result.update(tmp)\n tmp = self.training_data.modifications()\n if tmp is not None:\n result.update(tmp)\n tmp = self.test_data.modifications()\n if tmp is not None:\n result.update(stmp)\n tmp = self.models.modifications()\n if tmp is not None:\n result.update(tmp)\n if len(result) == 0:\n return None\n return result\n\n \n",
"import numpy as np\nimport copy\nimport logging\nfrom IPython.display import display, clear_output\nfrom collections import defaultdict\nimport pailab.analysis.plot as paiplot\nimport pailab.analysis.plot_helper as plt_helper\nimport ipywidgets as widgets\n\nfrom pailab import MLObjectType, RepoInfoKey, FIRST_VERSION, LAST_VERSION\nfrom pailab.ml_repo.repo import NamingConventions\nimport pailab.tools.checker as checker\nimport pailab.tools.tools as tools\nimport pailab.tools.interpretation as interpretation\nimport pandas as pd\n\nimport matplotlib.pyplot as plt\nimport matplotlib.dates as mdates\n\nlogger = logging.getLogger(__name__)\n\n# set option so that long lines have a linebreak\npd.set_option('display.max_colwidth', -1)\n# set widget use to True so that plotlys FigureWidget is used\npaiplot.use_within_widget = True\n\nif paiplot.has_plotly:\n import plotly.graph_objs as go\n\nbeakerX = False\nif beakerX:\n from beakerx import TableDisplay\n # from beakerx.object import beakerx\nelse:\n def TableDisplay(dt):\n display(dt)\n\n\nclass _MLRepoModel:\n\n class _DataModel:\n def __init__(self, ml_repo):\n self._training_data = {}\n self._test_data = {}\n for k in ml_repo.get_names(MLObjectType.TRAINING_DATA):\n tmp = ml_repo.get(k)\n self._training_data[k] = tmp.n_data\n self._x_coord_names = tmp.x_coord_names\n self._y_coord_names = tmp.y_coord_names\n for k in ml_repo.get_names(MLObjectType.TEST_DATA):\n tmp = ml_repo.get(k)\n self._test_data[k] = tmp.n_data\n\n def get_data_names(self):\n result = [k for k in self._test_data.keys()]\n result.extend([k for k in self._training_data.keys()])\n return result\n\n def get_num_data(self, data):\n result = []\n for d in data:\n if d in self._test_data.keys():\n result.append(self._test_data[d])\n elif d in self._training_data.keys():\n result.append(self._training_data[d])\n else:\n raise Exception('Cannot find data ' + d)\n return result\n\n class _ModelModel:\n def __init__(self, ml_repo):\n self.labels = {} # dictionary label->model and version\n # dictionary (model,version)->labelname or None\n self.model_to_label = defaultdict(lambda: None)\n self._setup_labels(ml_repo)\n self._model_info_table = self._setup_model_info_table(ml_repo)\n self._model_names = ml_repo.get_names(\n MLObjectType.CALIBRATED_MODEL)\n\n def _setup_labels(self, ml_repo):\n label_names = ml_repo.get_names(MLObjectType.LABEL)\n if label_names is None:\n return\n if isinstance(label_names, str):\n label_names = [label_names]\n for l in label_names:\n label = ml_repo.get(l)\n self.labels[l] = {'model': label.name,\n 'version': label.version}\n self.model_to_label[(label.name, label.version,)] = l\n\n def _setup_model_info_table(self, ml_repo):\n model_rows = []\n model_names = ml_repo.get_names(MLObjectType.CALIBRATED_MODEL)\n for model_name in model_names:\n models = ml_repo.get(model_name, version=(\n FIRST_VERSION, LAST_VERSION), full_object=False)\n if not isinstance(models, list):\n models = [models]\n for model in models:\n tmp = copy.deepcopy(model.repo_info.get_dictionary())\n tmp['model'] = tmp['name']\n del tmp['big_objects']\n del tmp['modifiers']\n del tmp['modification_info']\n tmp['label'] = self.model_to_label[(\n tmp['model'], tmp['version'],)]\n tmp['widget_key'] = tmp['commit_date'][0:16] + ' | ' + \\\n tmp['author'] + ' | ' + \\\n str(tmp['label']) + ' | ' + tmp['version']\n model_rows.append(tmp)\n model_info_table = pd.DataFrame(model_rows)\n model_info_table.set_index(['model', 'version'], inplace=True)\n return model_info_table\n\n def get_models(self):\n return self._model_names\n\n def get_info_table(self):\n return self._model_info_table\n\n def setup_error_measure_table(self, ml_repo, data_sets, measures):\n tmp = []\n for measure in measures:\n for data in data_sets:\n tmp.append(pd.DataFrame(\n tools.get_model_measure_list(ml_repo, measure, data)))\n tmp[-1].set_index(['model', 'version'], inplace=True)\n result = self.get_info_table()\n tmp.insert(0, result)\n return pd.concat(tmp, axis=1)\n\n class _ConsistencyModel:\n def __init__(self, ml_repo):\n self.tests = checker.Tests.run(ml_repo)\n self.model = checker.Model.run(ml_repo)\n self.data = checker.Data.run(ml_repo)\n\n def __init__(self):\n pass\n\n def set_repo(self, ml_repo):\n self.ml_repo = ml_repo\n self._setup()\n\n def _setup(self):\n self.object_types = {}\n for k in MLObjectType:\n self.object_types[k.value] = self.ml_repo.get_names(k)\n self.data = _MLRepoModel._DataModel(self.ml_repo)\n self.model = _MLRepoModel._ModelModel(self.ml_repo)\n self.consistency = _MLRepoModel._ConsistencyModel(self.ml_repo)\n self._setup_measures()\n self._setup_labels()\n # now set label information into\n\n def _setup_labels(self): # todo: das hier muss weg\n self.labels = {}\n label_names = self.ml_repo.get_names(MLObjectType.LABEL)\n if label_names is None:\n return\n if isinstance(label_names, str):\n label_names = [label_names]\n for l in label_names:\n label = self.ml_repo.get(l)\n self.labels[l] = {'model': label.name, 'version': label.version}\n\n def _setup_measures(self):\n measure_names = self.ml_repo.get_names(\n MLObjectType.MEASURE_CONFIGURATION)\n if len(measure_names) == 0:\n self.measures = []\n else:\n measure_config = self.ml_repo.get(measure_names[0])\n self.measures = [x for x in measure_config.measures.keys()]\n\n def get_model_statistics(self):\n model_stats = {}\n models = self.ml_repo.get_names(MLObjectType.CALIBRATED_MODEL)\n if isinstance(models, str):\n models = [models]\n for m in models:\n model = self.ml_repo.get(m)\n model_stats[model.repo_info.name] = {\n 'last commit': model.repo_info.commit_date,\n '#total commits': self.model.get_info_table().shape[0]\n }\n return model_stats\n\n def get_versions(self, name):\n return self.ml_repo.get_history(name, obj_member_fields=[])\n\n # def get_model_parameter(self, model_name)\n\n\nwidget_repo = _MLRepoModel()\n\n# region helpers\n\n\ndef _add_title_and_border(name):\n def _get_widget(get_widget):\n def wrapper(self):\n return widgets.VBox(children=[\n # , layout = widgets.Layout(width = '100%')),\n widgets.HTML(\n value='<h3 style=\"Color: white; background-color:#d1d1e0; text-align: center\"> ' + name + '</h3>'),\n get_widget(self),\n # , layout = widgets.Layout(width = '100%'))\n widgets.HTML(\n value='<h3 style=\"Color: white; background-color:#d1d1e0; text-align: center\"> </h3>')\n ], layout=widgets.Layout(padding='0px 0px 0px 0px', overflow_x='auto') # , overflow_y='auto', )\n ) # layout=widgets.Layout(border='solid 1px'))\n return wrapper\n return _get_widget\n\n\ndef _highlight_max(data, color='red'):\n '''\n highlight the maximum in a Series or DataFrame\n '''\n attr = 'color: {}'.format(color)\n # remove % and cast to float\n # data = data.replace('%','', regex=True).astype(float)\n if data.ndim == 1: # Series from .apply(axis=0) or axis=1\n is_max = data == data.max()\n return [attr if v else '' for v in is_max]\n else: # from .apply(axis=None)\n is_max = data == data.max().max()\n return pd.DataFrame(np.where(is_max, attr, ''),\n index=data.index, columns=data.columns)\n\n\ndef _highlight_min(data, color='green'):\n '''\n highlight the maximum in a Series or DataFrame\n '''\n attr = 'color: {}'.format(color)\n # remove % and cast to float\n # data = data.replace('%','', regex=True).astype(float)\n if data.ndim == 1: # Series from .apply(axis=0) or axis=1\n is_max = data == data.min()\n return [attr if v else '' for v in is_max]\n else: # from .apply(axis=None)\n is_max = data == data.min().min()\n return pd.DataFrame(np.where(is_max, attr, ''),\n index=data.index, columns=data.columns)\n\n\nclass _TableViewer:\n def __init__(self, table, table_name, selected_columns=None):\n self._table = table\n self._table_name = table_name\n self._columns = table.columns\n if selected_columns is None:\n self._selected_columns = self._columns\n else:\n self._selected_columns = selected_columns\n\n self._selected_columns = widgets.SelectMultiple(\n options=self._columns, value=self._selected_columns)\n self._output = widgets.Output()\n\n self._settings = widgets.HBox(children=[])\n self._tab = widgets.Tab(children=[self._output, self._settings], title=[\n 'Table', 'Table Settings'])\n\n self._button_update = widgets.Button(description='update')\n self._button_update.on_click(self.get_overview)\n\n def get_overview(self, d):\n with self._output:\n clear_output(wait=True)\n # , orient='index'))\n TableDisplay(self._table[self._selected_columns.value])\n\n def get_widget(self):\n return self._tab\n\n\nclass _ObjectCategorySelector:\n\n def __init__(self, *args, **kwargs):\n selection = []\n for k, v in widget_repo.object_types.items():\n if len(v) > 0:\n selection.append(k + ' (' + str(len(v)) + ')')\n if 'layout' not in kwargs.keys():\n kwargs['layout'] = widgets.Layout(width='300px', height='250px')\n kwargs['value'] = []\n self._selector = widgets.SelectMultiple(options=selection,\n # value = [selection[0]],\n **kwargs)\n\n def get_selection(self):\n return [k.split(' ')[0] for k in self._selector.value]\n\n def get_widget(self):\n return widgets.VBox(children=[\n widgets.Label(value='Object Types'),\n self._selector\n ]\n )\n\n\nclass _DataSelector:\n \"\"\"Widget to select training and test data.\n \"\"\"\n\n def __init__(self, **kwargs):\n names = widget_repo.data.get_data_names()\n # if len(names) > 0:\n self._selection_widget = widgets.SelectMultiple(\n options=names, value=[names[0]], **kwargs)\n\n def get_widget(self):\n return widgets.VBox(children=[widgets.Label(value='Data'), self._selection_widget])\n\n def get_selection(self):\n return self._selection_widget.value\n\n\nclass _DataSelectorWithVersion:\n \"\"\"Widget to select training and test data.\n \"\"\"\n\n def __init__(self, display_selection=True, **kwargs):\n names = widget_repo.data.get_data_names()\n self._update_callbacks = []\n self._display_selection = display_selection\n self._selection = {}\n self._selection_options = {}\n self._key_to_version = {}\n self._updating_version = {}\n for n in names:\n self._selection[n] = []\n self._selection_options[n] = []\n self._key_to_version[n] = {}\n self._selected_overview = widgets.Output()\n self._selection_data = widgets.Dropdown(\n options=names, value=None, **kwargs)\n\n self._selection_data.observe(self._update_version, names='value')\n\n self._selection_version = widgets.SelectMultiple(\n options=[], value=[], **kwargs)\n self._selection_version.observe(\n self._display_selected_overview, names='value')\n\n def _get_state(self):\n return self._selection, self._selection_options, self._key_to_version\n\n def _set_state(self, state):\n self._selection = state[0]\n self._selection_options = state[1]\n self._key_to_version = state[2]\n\n def _set_update_callback(self, cb):\n \"\"\"Set a callback (called at every update of this widget)\n\n Args:\n cb (function): Callback function called at every update.\n \"\"\"\n self._update_callbacks.append(cb)\n\n def _update_version(self, change):\n self._updating_version = True\n data_selected = self._selection_data.value\n tmp = widget_repo.ml_repo.get_history(data_selected)\n key_to_version = {}\n versions = []\n for x in tmp:\n key = x['repo_info']['commit_date'][0:16] + ' | ' + \\\n x['repo_info']['author'] + ' | ' + x['repo_info']['version']\n key_to_version[key] = x['repo_info']['version']\n versions.append(key)\n self._key_to_version[data_selected] = key_to_version\n self._selection_version.options = versions\n self._selection_version.value = self._selection_options[data_selected]\n for cb in self._update_callbacks:\n cb(change)\n self._updating_version = False\n # self._selection[self._selection_data.value] = [x for x in self._selection_version.value]\n\n def _display_selected_overview(self, change):\n if self._updating_version:\n return\n data_selected = self._selection_data.value\n key_to_version = self._key_to_version[data_selected]\n self._selection[data_selected] = [key_to_version[x]\n for x in self._selection_version.value]\n self._selection_options[data_selected] = [\n x for x in self._selection_version.value]\n tmp = {}\n tmp['data'] = []\n tmp['version'] = []\n for n, x in self._selection.items():\n for y in x:\n tmp['data'].append(n)\n tmp['version'].append(y)\n for cb in self._update_callbacks:\n cb(change)\n with self._selected_overview:\n clear_output(wait=True)\n display(pd.DataFrame.from_dict(tmp))\n\n def get_widget(self):\n if self._display_selection:\n return widgets.VBox(children=[widgets.Label(value='Data'), self._selection_data,\n widgets.Label(\n value='Versions'), self._selection_version,\n self._selected_overview, ])\n else:\n return widgets.VBox(children=[widgets.Label(value='Data'), self._selection_data,\n widgets.Label(value='Versions'), self._selection_version])\n\n def get_selection(self):\n return self._selection\n\n def get_data(self):\n data = {}\n for d_name, d_v in self._selection.items():\n if len(d_v) > 0:\n data[d_name] = d_v\n return data\n\n\nclass _ModelSelectorWithVersion:\n\n @staticmethod\n def _filter_models(labels=None, commit_start=None, commit_end=None, authors=None, model_versions=None):\n \"\"\"Filter the model table according to the given attributes.\n\n Args:\n labels ([str or iterable of str], optional): If set, returns only models with the selected labels. Defaults to None.\n commit_start (str, optional): String of earliest commit date.. Defaults to None.\n commit_end (str, optional): String of latest commit date. Defaults to None.\n authors (str or iterable of str, optional): If set it return only the models with the corresponding author(s). Defaults to None.\n model_versions (str or iterable of str, optional): If set only modes with respective version(s) are returned. Defaults to None.\n\n Returns:\n pandas DataFrame: The correspondign models.\n \"\"\"\n result = widget_repo.model.get_info_table()\n if labels is not None:\n if isinstance(labels, str):\n result = result[result['label'] == labels]\n else:\n result = result[result['label'].isin(labels)]\n if commit_start is not None:\n result = result[result['commit_date'] >= commit_start]\n if commit_end is not None:\n result = result[result['commit_date'] <= commit_end]\n if authors is not None:\n if isinstance(authors, str):\n result = result[result['author'] == authors]\n else:\n result = result[result['author'].isin(authors)]\n if model_versions is not None:\n if isinstance(model_versions, str):\n result = result[result['version'] == model_versions]\n else:\n result = result[result['version'].isin(model_versions)]\n return result\n\n def __init__(self, display_selection=True, **kwargs):\n self._display_selection = display_selection\n self._selection = defaultdict(list)\n self._selection_model_name = widgets.Dropdown(\n options=widget_repo.model.get_models(), value=None, **kwargs)\n self._selection_model_name.observe(\n self._selected_model_changes, names='value')\n\n self._selection_version = widgets.SelectMultiple(\n options=[], value=[], rows=8, layout=widgets.Layout(width=\"100%\"), **kwargs)\n\n self._selected_overview = widgets.Output()\n self._selection_version.observe(\n self._selected_version_changed, names='value')\n\n self._model_changed_callable = None\n\n # Filtering\n #\n labels = widget_repo.ml_repo.get_names(MLObjectType.LABEL)\n self._label_selector = widgets.SelectMultiple(options=labels)\n self._commit_data_start = widgets.DatePicker()\n self._commit_data_end = widgets.DatePicker()\n self._author_selector = widgets.SelectMultiple(\n options=widget_repo.model.get_info_table()['author'].unique())\n self._apply_button = widgets.Button(description='Apply')\n self._apply_button.on_click(self._apply_filter)\n self._clear_button = widgets.Button(description='Clear')\n self._clear_button.on_click(self._clear_filter)\n self._filter = widgets.VBox(children=[\n widgets.Label(value='Labels'),\n self._label_selector,\n widgets.Label(value='Commit Start'),\n self._commit_data_start,\n widgets.Label(value='Commit End'),\n self._commit_data_end,\n widgets.Label(value='Authors'),\n self._author_selector,\n widgets.HBox(children=[\n self._apply_button,\n self._clear_button])\n ]\n )\n\n def get_models(self):\n \"\"\"Returns all selected models as list of tuples (first element is model name, second model version)\n \"\"\"\n models = widget_repo.model.get_info_table()\n result = {}\n for k, v in self._selection.items():\n if len(v) > 0:\n result[k] = [models[models['widget_key'] == w].index[0][1]\n for w in v]\n return result\n\n def observe_model_change(self, handler):\n \"\"\"Setup a handler when the model trait changed\n\n Args:\n handler (callable): A callable that is called when the model trait changes.\n \"\"\"\n self._model_changed_callable = handler\n\n def _selected_model_changes(self, change):\n self._update_version(change)\n if self._model_changed_callable is not None:\n self._model_changed_callable(change)\n\n def _selected_version_changed(self, change):\n self._display_selected_overview(change)\n\n def _apply_filter(self, dummy):\n self._updating_version = True\n data_selected = self._selection_model_name.value\n labels = self._label_selector.value\n if len(labels) == 0:\n labels = None\n if self._commit_data_start.value is None:\n commit_start = None\n else:\n commit_start = str(self._commit_data_start.value)\n if self._commit_data_end.value is None:\n commit_end = None\n else:\n commit_end = str(self._commit_data_end.value)\n authors = None\n if len(self._author_selector.value) > 0:\n authors = self._author_selector.value\n models = _ModelSelectorWithVersion._filter_models(labels=labels, authors=authors,\n commit_start=commit_start, commit_end=commit_end)\n self._selection_model_name.options = [\n x for x in models['name'].unique()]\n models = models[models['name'] == data_selected]\n widget_keys = models['widget_key'].values\n self._selection_version.options = [x for x in models['widget_key']]\n self._selection_version.value = [\n x for x in self._selection[data_selected] if x in widget_keys]\n self._updating_version = False\n\n def _clear_filter(self, dummy):\n self._commit_data_start.value = None\n self._commit_data_end.value = None\n self._author_selector.value = []\n self._label_selector.value = []\n self._apply_filter(dummy)\n\n def _update_version(self, change):\n if change['old'] is not None:\n pass\n self._updating_version = True\n data_selected = self._selection_model_name.value\n models = widget_repo.model.get_info_table()\n models = models[models['name'] == data_selected]\n self._selection_version.options = [x for x in models['widget_key']]\n self._selection_version.value = self._selection[data_selected]\n self._updating_version = False\n\n def _update_selected_versions(self, change):\n data_selected = self._selection_model_name.value\n # now handle changes of version selection: Remove versions that have been\n # deselected and add versions that have been selected\n old = set(change['old'])\n new = set(change['new'])\n # remove versions that have been deselected\n diff = old-new\n self._selection[data_selected] = list(\n set(self._selection[data_selected])-diff)\n # add new elements\n diff = new - old\n self._selection[data_selected].extend(diff)\n\n def _display_selected_overview(self, change):\n if self._updating_version:\n return\n self._update_selected_versions(change)\n versions = []\n for n, x in self._selection.items():\n versions.extend(x)\n with self._selected_overview:\n clear_output(wait=True)\n models = widget_repo.model.get_info_table()\n display(models[models['widget_key'].isin(versions)])\n\n def get_widget(self):\n filter_widget = widgets.Accordion(\n children=[self._filter], selected_index=None)\n filter_widget.set_title(0, 'Filter')\n if self._display_selection:\n return widgets.VBox(children=[\n widgets.VBox(children=[\n widgets.Label(value='Model'),\n self._selection_model_name,\n widgets.Label(value='Versions'),\n self._selection_version,\n self._selected_overview,\n ]\n ),\n filter_widget])\n\n else:\n return widgets.VBox(children=[\n widgets.VBox(children=[\n widgets.Label(value='Model'),\n self._selection_model_name,\n widgets.Label(value='Versions'),\n self._selection_version\n ]\n ),\n filter_widget])\n\n\nclass _ModelAndDataSelectorWithVersion:\n \"\"\"Widget to select a model together with data used in conjunction with the selected model.\n\n Returns:\n [type]: [description]\n \"\"\"\n\n def __init__(self, display_selection=True, **kwargs):\n self._display_selection = display_selection\n names = widget_repo.model.get_models()\n self._data = _DataSelectorWithVersion(display_selection=False)\n self._model = _ModelSelectorWithVersion(display_selection=False)\n self._data._set_update_callback(self._display_selected_overview)\n self._selected_overview = widgets.Output()\n\n def get_models(self):\n \"\"\"Returns all selected models as dictionary from model to list of selected model's versions\n \"\"\"\n return self._model.get_models()\n\n def get_data(self):\n return self._data.get_data()\n\n def _display_selected_overview(self, change):\n # if self._updating_version:\n # return\n # data_selected = self._selection_data.value\n # key_to_version = self._key_to_version[data_selected]\n # self._selection[data_selected] = [key_to_version[x] for x in self._selection_version.value]\n # self._selection_options[data_selected] = [x for x in self._selection_version.value]\n # tmp ={}\n # tmp['model'] = []\n # tmp['model version'] =[]\n # tmp['data'] = []\n # tmp['data version'] =[]\n # for n, x in self._selection.items():\n # for y in x:\n # for data_name, data_versions in self._model_to_data_states[n][0].items():\n # for data_version in data_versions:\n # tmp['model'].append(n)\n # tmp['model version'].append(y)\n # tmp['data'].append(data_name)\n # tmp['data version'].append(data_version)\n\n # with self._selected_overview:\n # clear_output(wait = True)\n # df = pd.DataFrame.from_dict(tmp)\n # df = df[['model', 'model version', 'data', 'data version']]\n # #arrays=[tmp['model'],tmp['model version'], tmp['data']]\n # #df = pd.DataFrame([tmp['data version']], index=arrays)\n # #multi_index = pd.MultiIndex.from_arrays(arrays, names=('model','model version', 'data', 'data version'))\n # #df.reindex(index = multi_index)\n # display(df)\n pass\n\n def get_widget(self):\n model_selection = widgets.Accordion(\n children=[self._model.get_widget()])\n model_selection.set_title(0, 'Model')\n model_selection.selected_index = None\n data_selection = widgets.Accordion(children=[self._data.get_widget()])\n data_selection.set_title(0, 'Data')\n data_selection.selected_index = None\n if self._display_selection:\n return widgets.VBox(children=[\n model_selection,\n data_selection,\n self._selected_overview, ])\n else:\n return widgets.VBox(children=[\n model_selection,\n data_selection])\n\n\nclass _MeasureSelector:\n \"\"\"Widget to select measures.\n \"\"\"\n\n def __init__(self, **kwargs):\n self._selection_widget = widgets.SelectMultiple(\n options=widget_repo.measures, **kwargs)\n\n def get_widget(self):\n return widgets.VBox(children=[widgets.Label(value='Measures'), self._selection_widget])\n\n def get_selection(self):\n return self._selection_widget.value\n\n# endregion\n\n\nclass ObjectOverviewList:\n def __init__(self, beakerX=False):\n self._categories = _ObjectCategorySelector(\n layout=widgets.Layout(width='250px', height='250px'))\n self._repo_info = widgets.SelectMultiple(\n options=[k.value for k in RepoInfoKey], value=['category', 'name', 'commit_date', 'version'],\n layout=widgets.Layout(width='200px', height='250px', margin='10px')\n )\n # self._settings = widgets.HBox(children=[self.categories, self._repo_info])\n\n self._button_update = widgets.Button(description='update')\n self._button_update.on_click(self.get_overview)\n\n self._output = widgets.Output(layout=widgets.Layout(\n height='300px', width='1000px', overflow_y='auto', overflow_x='auto'))\n self._input_box = widgets.HBox(\n children=[\n self._categories.get_widget(),\n widgets.VBox(children=[\n widgets.Label(value='Info Fields'),\n self._repo_info\n ]\n ),\n widgets.VBox(children=[\n self._button_update,\n self._output\n ],\n layout=widgets.Layout(margin='10px 10px 10px 10px')\n )\n ]\n )\n\n def get_overview(self, d):\n result = {}\n for info in self._repo_info.value:\n result[info] = []\n\n for k in self._categories.get_selection():\n for n in widget_repo.object_types[k]:\n obj = widget_repo.ml_repo.get(n)\n for info in self._repo_info.value:\n if isinstance(obj.repo_info[info], MLObjectType):\n result[info].append(obj.repo_info[info].value)\n else:\n result[info].append(str(obj.repo_info[info]))\n with self._output:\n clear_output(wait=True)\n TableDisplay(pd.DataFrame.from_dict(result)) # , orient='index'))\n\n @_add_title_and_border('Object Overview')\n def get_widget(self):\n return self._input_box\n\n\nclass ObjectView:\n\n def _setup_names(self, change=None):\n names = []\n for k in self._categories.get_selection():\n names.extend(widget_repo.ml_repo.get_names(k))\n self._names.options = names\n\n def __init__(self):\n self._categories = _ObjectCategorySelector()\n self._names = widgets.SelectMultiple(\n options=[]\n )\n self._setup_names()\n self._categories.observe(self._setup_names, 'value')\n\n self._button_update = widgets.Button(description='show history')\n self._button_update.on_click(self.show_history)\n self._output = widgets.Output()\n self._input_box = widgets.HBox(\n children=[self._categories.get_widget(), self._names, self._button_update, self._output], layout=widgets.Layout(border='solid 1px')\n )\n\n def show_history(self, d):\n result = {RepoInfoKey.NAME.value: [],\n RepoInfoKey.AUTHOR.value: [],\n RepoInfoKey.VERSION.value: [],\n RepoInfoKey.COMMIT_DATE.value: []}\n for k in self._names.value:\n history = widget_repo.ml_repo.get_history(k)\n for l in history:\n for m in result.keys():\n result[m].append(l['repo_info'][m])\n with self._output:\n clear_output(wait=True)\n TableDisplay(pd.DataFrame.from_dict(result))\n\n @_add_title_and_border('Object View')\n def get_widget(self):\n return self._input_box\n\n\nclass RepoOverview:\n def __init__(self):\n self._repo_name = widgets.HTML(\n value='<div style=\"background-color:#c2c2d6\"><h4 stype=\"text-align: center\"> Repository: '\n + widget_repo.ml_repo._config['name'] + '</h4>') # , margin = '0px 0px 0px 0px'))\n self._data_statistics = widgets.Output(\n layout=widgets.Layout(width='450px', height='450px'))\n self._plot_data_statistics()\n self._measures = widgets.Output(\n layout=widgets.Layout(width='450px', height='450px'))\n self._consistency = self._setup_consistency()\n self._labels = self._setup_labels()\n self._model_stats = self._setup_model_stats()\n\n # check consistency\n\n def _setup_consistency(self):\n def create_consistency_html(**kwargs):\n result = '<div style=\"background-color:#c2c2d6\">'\n result += '<h4 stype=\"text-align: center\">Consistency</h4>'\n for k, v in kwargs.items():\n if len(v) > 0:\n result += '<p style=\"background-color:red\"> ' + \\\n str(v) + ' ' + k + ' issues found!</p>'\n else:\n result += '<p style=\"background-color:lightgreen\">No ' + k + ' issues found.</p>'\n result += '</div>'\n return result\n\n return widgets.HTML(create_consistency_html(model=widget_repo.consistency.model,\n test=widget_repo.consistency.tests,\n data=widget_repo.consistency.data),\n layout=widgets.Layout(margin='0% 0% 0% 0%', width='400px'))\n\n def _setup_labels(self):\n header = widgets.HTML(\n '<div style=\"background-color:#c2c2d6\"><h4 stype=\"text-align: center\">Labels</h4>')\n label_output = None\n\n if len(widget_repo.labels) > 0:\n label_output = widgets.Output(\n layout=widgets.Layout(width='400px', height='100px', overflow_y='auto', overflow_x='auto'))\n with label_output:\n clear_output(wait=True)\n display(pd.DataFrame.from_dict(\n widget_repo.labels, orient='index'))\n else:\n label_output = widgets.HTML(\n '<div style=\"background-color:#ff4d4d\"><h4 stype=\"text-align: center\">No labels defined.</h4>')\n\n return widgets.VBox(children=[header, label_output])\n\n def _setup_model_stats(self):\n header = widgets.HTML(\n '<div style=\"background-color:#c2c2d6\"><h4 stype=\"text-align: center\">Models</h4>')\n model_stats_output = widgets.Output(\n layout=widgets.Layout(width='400px', height='100px', overflow_y='auto', overflow_x='auto'))\n with model_stats_output:\n clear_output(wait=True)\n display(pd.DataFrame.from_dict(\n widget_repo.get_model_statistics(), orient='index'))\n return widgets.VBox(children=[header, model_stats_output])\n\n def _plot_data_statistics(self):\n data_names = widget_repo.data.get_data_names()\n data_num_points = widget_repo.data.get_num_data(data_names)\n with self._data_statistics:\n clear_output(wait=True)\n plt.rcdefaults()\n _, ax = plt.subplots()\n y_pos = np.arange(len(data_names))\n ax.barh(y_pos, data_num_points, align='center',\n color='green', ecolor='black')\n ax.set_yticks(y_pos)\n ax.set_yticklabels(data_names, rotation=45, va='top')\n ax.invert_yaxis()\n ax.set_xlabel('number of datapoints')\n ax.set_title('Datasets')\n plt.show()\n\n def _plot_measures(self):\n with self._measures:\n clear_output(wait=True)\n plt.rcdefaults()\n fig = plt.figure()\n ax = fig.add_subplot(111)\n for data in widget_repo.data.get_data_names():\n for measure in widget_repo.measures:\n error = widget_repo.model.setup_error_measure_table(\n widget_repo.ml_repo, [data], [measure])\n error = error.sort_values(by='commit_date')\n plt.plot(error['commit_date'],\n error[measure + ', ' + data], '-x', label=measure + ', ' + data)\n plt.xlabel('commit date')\n ax.grid(True)\n ax.format_xdata = mdates.DateFormatter('%Y-%m-%d')\n for label in ax.get_xticklabels():\n label.set_rotation(40)\n label.set_horizontalalignment('right')\n # fig.autofmt_xdate()\n plt.legend()\n ax.set_title('Measures')\n # plt.setp(ax.get_xticklabels(), ha=\"right\", rotation=45)\n plt.show()\n # plt.set_title('Measures')\n\n @_add_title_and_border('Repository Overview')\n def get_widget(self):\n self._plot_measures()\n return widgets.HBox(children=[\n widgets.VBox(children=[self._repo_name, self._model_stats, self._labels, self._consistency],\n layout=widgets.Layout(width='400px')),\n widgets.VBox(children=[self._measures]),\n widgets.VBox(children=[self._data_statistics])\n ],\n layout=widgets.Layout(width='100%', height='100%')\n )\n\n\nclass MeasureView:\n def __init__(self, beakerX=False):\n self._data = _DataSelector()\n self._measures = _MeasureSelector()\n self._repo_info = widgets.SelectMultiple(\n options=[k.value for k in RepoInfoKey], value=['category', 'name', 'commit_date', 'version'], layout=widgets.Layout(width='200px', height='250px')\n )\n self._output = widgets.Output(layout=widgets.Layout(\n width='1000px', height='450px', overflow_y='auto', overflow_x='auto'))\n self._button_update = widgets.Button(description='update')\n self._button_update.on_click(self.get_measures)\n\n def _get_columns_selected(self):\n columns = [x for x in self._repo_info.value]\n for data in self._data.get_selection():\n for m in self._measures.get_selection():\n columns.append(m+', '+data)\n return columns\n\n @_add_title_and_border('Measure View')\n def get_widget(self):\n self._tab = widgets.Tab(children=[\n self._output,\n widgets.HBox(children=[\n self._data.get_widget(),\n self._measures.get_widget(),\n widgets.VBox(children=[\n widgets.Label(\n value='Model Columns'),\n self._repo_info]\n ),\n self._button_update\n ])\n ],\n title=['Table', 'Settings']\n )\n self._tab.set_title(0, 'Table')\n self._tab.set_title(1, 'Settings')\n return self._tab\n\n def get_measures(self, d):\n self._tab.selected_index = 0\n tmp = widget_repo.model.setup_error_measure_table(\n widget_repo.ml_repo, self._data.get_selection(), self._measures.get_selection())\n columns = [c for c in tmp.columns if c in self._get_columns_selected()]\n tmp2 = tmp[columns]\n with self._output:\n clear_output(wait=True)\n # apply highlighting to floating columns only\n floats = [x.kind == 'f' for x in tmp2.dtypes]\n float_columns = tmp2.columns[floats]\n TableDisplay(tmp2.style.apply(_highlight_max, subset=float_columns).apply(\n _highlight_min, subset=float_columns)) # , orient='index'))\n\n\nclass ConsistencyChecker:\n\n def _consistency_check(self):\n self._test_results = checker.Tests.run(self._ml_repo)\n self._model_results = checker.Model.run(self._ml_repo)\n self._data_results = checker.Data.run(self._ml_repo)\n\n def __init__(self, ml_repo, beakerX=False):\n self._ml_repo = ml_repo\n\n self._overview_output = widgets.Output()\n\n self._button_update = widgets.Button(description='update')\n self._button_update.on_click(self.show_checks)\n\n self._widget_main = widgets.VBox(\n children=[self._button_update, self._overview_output]\n )\n\n def show_checks(self, d):\n self._consistency_check()\n with self._overview_output:\n clear_output(wait=True)\n print('test issues: ' + str(len(self._test_results)) + ', model issues: ' +\n str(len(self._model_results)) + ', data issues: ' + str(len(self._data_results)))\n result = {'test': [], 'message': [], 'model': [], 'type': []}\n for k, v in self._test_results.items():\n for a, b in v.items():\n result['type'].append('test')\n result['model'].append(k)\n result['test'].append(a)\n result['message'].append(b)\n for k, v in self._model_results.items():\n for a, b in v.items():\n result['type'].append('model')\n result['model'].append(k)\n result['test'].append(a)\n result['message'].append(b)\n for k, v in self._data_results.items():\n for a, b in v.items():\n result['type'].append('data')\n result['model'].append(k)\n result['test'].append(a)\n result['message'].append(b)\n\n display(pd.DataFrame.from_dict(result))\n\n @_add_title_and_border('Consistency')\n def get_widget(self):\n return self._widget_main\n\n\nclass ModelErrorHistogram:\n \"\"\"Class to plot histograms of model errors.\n Please make sure that if you use plotly, also the jupyter plotlywidgets are installed via:\n jupyter nbextension install --py --sys-prefix plotlywidget\n otherwise you may encounter problems using this class.\n \"\"\"\n\n def __init__(self):\n self._model_data_selector = _ModelAndDataSelectorWithVersion(\n display_selection=False)\n\n self._update_button = widgets.Button(description='update')\n self._update_button.on_click(self._plot)\n self._output = widgets.Output()\n self._coord = widgets.SelectMultiple(\n options=widget_repo.data._y_coord_names,\n value=[widget_repo.data._y_coord_names[0]],\n disabled=False\n )\n\n def _plot(self, d):\n with self._output:\n clear_output(wait=True)\n display(go.FigureWidget(paiplot.histogram_model_error(widget_repo.ml_repo, self._model_data_selector.get_models(),\n self._model_data_selector.get_data(), y_coordinate=self._coord.value)))\n\n @_add_title_and_border('Pointwise Model Error Histogram')\n def get_widget(self):\n y_coord = widgets.Accordion(children=[self._coord])\n y_coord.set_title(0, 'Y-coordinates')\n return widgets.HBox(children=[\n widgets.VBox(children=[\n self._model_data_selector.get_widget(),\n y_coord,\n self._update_button\n ]),\n self._output\n ])\n\n\nclass ModelErrorConditionalHistogram:\n \"\"\"Plots the distribution of input data along a given axis for the largest absolute pointwise errors in comparison to the distribution of all data.\n \"\"\"\n\n def __init__(self):\n self._data_model_selection = _ModelAndDataSelectorWithVersion(\n display_selection=False)\n self._update_button = widgets.Button(description='update')\n self._update_button.on_click(self._plot)\n self._output = widgets.Output()\n self._recommendation_output = widgets.Output()\n self._recommendation_table = None\n self._output_tab = widgets.Tab(children=[self._output,\n self._recommendation_output])\n self._output_tab.set_title(0, 'histograms')\n self._output_tab.set_title(1, 'recommendations')\n self._quantile = widgets.FloatSlider(\n value=10,\n min=1,\n max=50,\n step=1,\n readout=True,\n readout_format='.2f',\n )\n self._coord = widgets.Select(\n options=widget_repo.data._y_coord_names,\n value=widget_repo.data._y_coord_names[0],\n disabled=False\n )\n self._x_coord = widgets.Select(\n options=widget_repo.data._x_coord_names,\n value=widget_repo.data._x_coord_names[0],\n disabled=False\n )\n self._accordion = widgets.Accordion(children=[\n self._get_selection_widget(),\n self._get_recommendation_widget()\n ])\n self._accordion.set_title(0, 'Selection')\n self._accordion.set_title(1, 'Recommendation')\n\n def _get_selection_widget(self):\n coordinate_selection = widgets.Accordion(children=[\n widgets.VBox(children=[\n widgets.Label(value='y-coordinates'),\n self._coord,\n widgets.Label(value='x-coordinates'),\n self._x_coord])\n ])\n coordinate_selection.set_title(0, 'Coordinates')\n return widgets.VBox(children=[\n self._data_model_selection.get_widget(),\n coordinate_selection,\n self._quantile,\n self._update_button])\n\n def _get_recommendation_widget(self):\n self._update_recommendation = widgets.Button(description='update')\n self._max_num_recommendations = widgets.IntText(value=20,\n description='maximum number of recommendations')\n self._cache_in_repo = widgets.Checkbox(\n value=True, description='cache MMD in repo')\n self._scale = widgets.Checkbox(\n value=True, description='scale x-values to zero mean and unit variance')\n self._update_recommendation.on_click(self._recommend)\n self._kernel_selection = widgets.Dropdown(options=[\n 'rbf', 'linear', 'polynomial', 'sigmoid', 'laplacian', 'chi2'\n ],\n value='rbf',\n description='kernel')\n self._gamma = widgets.FloatText(value=1.0, description='gamma')\n self._gamma_for_kernel = [\n 'rbf', 'polynomial', 'sigmoid', 'laplacian', 'chi2']\n self._kernel_selection.observe(self._on_kernel_change, names='value')\n self._recommendation_selection = widgets.IntText(\n description='recommendation id')\n self._recomendation_selection_apply = widgets.Button(\n description='apply recommendation')\n self._recomendation_selection_apply.on_click(self._apply_recommend)\n return widgets.VBox(children=[\n self._max_num_recommendations,\n self._cache_in_repo,\n self._scale,\n self._kernel_selection,\n self._gamma,\n self._update_recommendation,\n self._recommendation_selection,\n self._recomendation_selection_apply\n ])\n self._recommendation_table = None\n\n def _on_kernel_change(self, d):\n if self._kernel_selection in self._gamma_for_kernel:\n self._gamma.disabled = False\n else:\n self._gamma.disabled = True\n\n def _apply_recommend(self, d):\n if self._recommendation_table is None:\n logger.error(\n 'Recommendation table is empty, please first update the recommendation.')\n with self._output:\n clear_output(wait=True)\n print(\n 'Recommendation table is empty, please first update the recommendation.')\n return\n\n if self._recommendation_selection.value is not None:\n self._coord.value = self._recommendation_table['y-coord'][self._recommendation_selection.value]\n self._x_coord.value = self._recommendation_table[\n 'x-coord'][self._recommendation_selection.value]\n self._models.value = [\n self._recommendation_table['model'][self._recommendation_selection.value]]\n self._data.value = [\n self._recommendation_table['data'][self._recommendation_selection.value]]\n self._plot(None)\n\n def _plot(self, d):\n with self._output:\n clear_output(wait=True)\n display(go.FigureWidget(\n paiplot.histogram_data_conditional_error(widget_repo.ml_repo,\n self._data_model_selection.get_models(), self._data_model_selection.get_data(),\n x_coordinate=self._x_coord.value,\n y_coordinate=self._coord.value,\n percentile=self._quantile.value/100.0)\n ))\n self._output_tab.selected_index = 0\n\n def _recommend(self, d):\n self._output_tab.set_title(1, 'computing...')\n self._recommendation_table = pd.DataFrame.from_dict(\n plt_helper.get_ptws_error_dist_mmd(widget_repo.ml_repo, self._data_model_selection.get_models(),\n data=self._data_model_selection.get_data(),\n start_index=0, end_index=-1, percentile=self._quantile.value/100.0,\n scale=self._scale.value,\n cache=self._cache_in_repo,\n metric=self._kernel_selection.value,\n gamma=self._gamma.value)\n )\n self._recommendation_table['model version']\n self._recommendation_table['data version']\n self._recommendation_table.sort_values(\n ['mmd'], ascending=False, inplace=True)\n with self._recommendation_output:\n clear_output(wait=True)\n display(\n self._recommendation_table.iloc[0:self._max_num_recommendations.value])\n self._output_tab.selected_index = 1\n self._output_tab.set_title(1, 'recommendations')\n self._recommendation_selection.value = self._recommendation_table.index[0]\n\n @_add_title_and_border('Data Distribution of Largest Pointwise Errors.')\n def get_widget(self):\n return widgets.HBox(children=[\n self._accordion,\n self._output_tab\n ])\n\n\nclass ScatterModelError:\n def __init__(self):\n self._model_data_selector = _ModelAndDataSelectorWithVersion(\n display_selection=False)\n self._update_button = widgets.Button(description='update')\n self._update_button.on_click(self._plot)\n self._output = widgets.Output()\n self._coord = widgets.Select(\n options=widget_repo.data._y_coord_names,\n value=widget_repo.data._y_coord_names[0],\n disabled=False\n )\n self._x_coord = widgets.Select(\n options=widget_repo.data._x_coord_names,\n value=widget_repo.data._x_coord_names[0],\n disabled=False\n )\n\n def _get_selection_widget(self):\n coordinates = widgets.Accordion(children=[\n widgets.VBox(children=[\n widgets.Label(value='y-coordinates'),\n self._coord,\n widgets.Label(value='x-coordinates'),\n self._x_coord,\n ]\n )\n ]\n )\n coordinates.set_title(0, 'Coordinates')\n return widgets.VBox(children=[\n self._model_data_selector.get_widget(),\n coordinates,\n self._update_button]\n )\n\n def _plot(self, d):\n with self._output:\n clear_output(wait=True)\n display(go.FigureWidget(\n paiplot.scatter_model_error(widget_repo.ml_repo,\n self._model_data_selector.get_models(),\n self._model_data_selector.get_data(),\n x_coordinate=self._x_coord.value,\n y_coordinate=self._coord.value)\n ))\n\n @_add_title_and_border('Scatter Plot Pointwise Errors.')\n def get_widget(self):\n return widgets.HBox(children=[\n self._get_selection_widget(),\n self._output\n ])\n\n\nclass IndividualConditionalExpectation:\n \"\"\"Plots the individual conditional expectation at a certain point.\n \"\"\"\n\n def __init__(self):\n names = widget_repo.data.get_data_names()\n self._model_data_selection = _ModelAndDataSelectorWithVersion()\n self._update_button = widgets.Button(description='update')\n self._update_button.on_click(self._plot)\n self._output = widgets.Output()\n self._cluster_statistics_output = widgets.Output()\n self._output_tab = widgets.Tab(children=[self._output,\n self._cluster_statistics_output\n ])\n self._output_tab.set_title(0, 'ICE plots')\n self._output_tab.set_title(1, 'clustering')\n self._coord = widgets.Select(\n options=widget_repo.data._y_coord_names,\n value=widget_repo.data._y_coord_names[0],\n disabled=False\n )\n self._x_coord = widgets.Select(\n options=widget_repo.data._x_coord_names,\n value=widget_repo.data._x_coord_names[0],\n disabled=False\n )\n self._x_value_start = widgets.FloatText(value=-1.0)\n self._x_value_end = widgets.FloatText(value=1.0)\n self._n_x_points = widgets.IntText(value=10)\n self._accordion = widgets.Accordion(children=[\n self._get_selection_widget(),\n self._get_clustering_widget()\n ])\n\n self._accordion.set_title(0, 'Selection')\n self._accordion.set_title(1, 'Clustering')\n\n def _get_selection_widget(self):\n return widgets.VBox(children=[\n self._model_data_selection.get_widget(),\n widgets.Label(value='y-coordinates'),\n self._coord,\n widgets.Label(value='x-coordinates'),\n self._x_coord,\n widgets.Label(value='x-start'),\n self._x_value_start,\n widgets.Label(value='x-end'),\n self._x_value_end,\n widgets.Label(value='num x-points'),\n self._n_x_points,\n self._update_button])\n\n def _get_clustering_widget(self):\n self._update_clustering = widgets.Button(description='update')\n self._use_clustering = widgets.Checkbox(\n value=True, description='apply clustering')\n self._max_num_clusters = widgets.IntText(value=20,\n description='maximum number of clusters')\n self._random_state = widgets.IntText(\n value=42, description='Random State')\n self._cache_in_repo = widgets.Checkbox(\n value=True, description='cache ICE in repo')\n self._scale = widgets.Checkbox(\n value=True, description='scale x-values to zero mean and unit variance')\n self._update_clustering.on_click(self._cluster)\n\n return widgets.VBox(children=[\n self._use_clustering,\n self._max_num_clusters,\n self._random_state,\n self._cache_in_repo,\n self._scale\n ])\n\n def _plot(self, d):\n cluster_param = None\n if self._use_clustering.value:\n cluster_param = {'n_clusters': self._max_num_clusters.value,\n 'random_state': self._random_state.value}\n # since the numpy cannot json serialized by default,\n # caching would not working, therefore we convert it into list\n x_points = [x for x in np.linspace(self._x_value_start.value, self._x_value_end.value,\n self._n_x_points.value)]\n self._ice = []\n for model, model_versions in self._model_data_selection.get_models().items():\n for data, data_versions in self._model_data_selection.get_data().items():\n for model_version in model_versions:\n for data_version in data_versions:\n self._ice.append((model, model_version, data, data_version,\n interpretation.compute_ice(widget_repo.ml_repo,\n x_points,\n data,\n model=model,\n model_version=model_version,\n data_version=data_version,\n y_coordinate=self._coord.value,\n x_coordinate=self._x_coord.value,\n cache=self._cache_in_repo.value,\n clustering_param=cluster_param,\n end_index=200),\n )\n )\n\n with self._output:\n clear_output(wait=True)\n display(go.FigureWidget(\n paiplot.ice(self._ice)\n ))\n self._output_tab.selected_index = 0\n if len(self._ice) > 0:\n if self._ice[0][-1].cluster_centers is not None:\n with self._cluster_statistics_output:\n clear_output(wait=True)\n display(go.FigureWidget(\n paiplot.ice_clusters(self._ice)\n ))\n\n def _cluster(self, d):\n\n self._output_tab.set_title(1, 'computing...')\n models = [x for x in self._models.value]\n\n for x in self._labels.value:\n l = widget_repo.labels[x]\n models.append((l['model'], l['version'],))\n\n with self._cluster_statistics_output:\n clear_output(wait=True)\n # display(self._recommendation_table.iloc[0:self._max_num_recommendations.value])\n self._output_tab.selected_index = 1\n self._output_tab.set_title(1, 'cluster statistics')\n # self._recommendation_selection.value = self._recommendation_table.index[0]\n\n @_add_title_and_border('Individual Conditional Expectation Plots')\n def get_widget(self):\n return widgets.HBox(children=[\n self._accordion,\n self._output_tab\n ])\n\n\nclass PlotMeasureVsParameter:\n def __init__(self):\n self._model_selector = widgets.Dropdown(\n options=widget_repo.model.get_models(), value=None)\n self._data_selector = _DataSelectorWithVersion(display_selection=False)\n # self._model_data_selector = _ModelAndDataSelectorWithVersion(\n # display_selection=False)\n self._measure_selector = widgets.Dropdown(options=widget_repo.measures)\n self._model_selector.observe(\n self._update_param_selector)\n self._param_selector = widgets.Dropdown(options=[])\n self._output = widgets.Output()\n self._update_button = widgets.Button(description='update')\n self._update_button.on_click(self._plot)\n\n def _print(self, message):\n with self._output:\n clear_output(wait=True)\n print(message)\n\n def _update_param_selector(self, change):\n # print(change)\n if self._model_selector.value is None:\n return\n model = self._model_selector.value\n model_param_name = NamingConventions.get_model_param_name(\n model)\n params = []\n try:\n model_params = widget_repo.ml_repo.get(model_param_name)\n for p in model_params.get_params().keys():\n params.append(p)\n except:\n pass\n train_param_name = str(NamingConventions.TrainingParam(model))\n try:\n train_params = widget_repo.ml_repo.get(train_param_name)\n for p in train_params.get_params().keys():\n params.append(p)\n except:\n pass\n self._param_selector.options = params\n\n def _plot(self, change):\n measures = []\n model = self._model_selector.value\n if model is None:\n self._print('Please select a model.')\n return\n data = self._data_selector.get_data()\n for d, w in data.items():\n if len(w) > 0:\n measures.append(str(NamingConventions.Measure(\n model=NamingConventions.get_model_from_name(model), data=d, measure_type=self._measure_selector.value)))\n if len(measures) == 0:\n self._print('Please select data together with data versions.')\n return\n with self._output:\n clear_output(wait=True)\n # create measure names from selected models, data and measures\n display(go.FigureWidget(\n paiplot.measure_by_parameter(widget_repo.ml_repo,\n measures, self._param_selector.value)\n ))\n\n @_add_title_and_border('Measure vs Parameter')\n def get_widget(self):\n return widgets.HBox(children=[\n widgets.VBox(children=[\n widgets.VBox(children=[\n widgets.Label(value='Model'),\n self._model_selector]\n ),\n self._data_selector.get_widget(),\n self._measure_selector,\n self._param_selector,\n self._update_button\n ]),\n self._output\n ])\n"
] | [
[
"numpy.load"
],
[
"matplotlib.pyplot.legend",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.rcdefaults",
"pandas.DataFrame",
"matplotlib.dates.DateFormatter",
"matplotlib.pyplot.subplots",
"pandas.set_option",
"matplotlib.pyplot.show",
"pandas.concat",
"matplotlib.pyplot.plot",
"numpy.where",
"numpy.linspace",
"matplotlib.pyplot.xlabel",
"pandas.DataFrame.from_dict"
]
] |
liwanjunit/ASRGAN | [
"ac01e546939c435c246fbdce64606464f8fdfc00"
] | [
"loss/loss_new.py"
] | [
"import torch\nfrom torch import nn\nfrom torchvision.models.vgg import vgg16\n\n\nclass GeneratorLoss_NEW(nn.Module):\n def __init__(self):\n super(GeneratorLoss_NEW, self).__init__()\n vgg = vgg16(pretrained=True)\n # loss_network = nn.Sequential(*list(vgg.features)[:31]).eval()\n loss_network = nn.Sequential(*list(vgg.features)[:35]).eval()\n for param in loss_network.parameters():\n param.requires_grad = False\n self.loss_network = loss_network\n self.mse_loss = nn.MSELoss()\n self.tv_loss = TVLoss()\n self.charbonnier_loss = L1_Charbonnier_loss()\n\n def forward(self, out_labels, out_images, target_images):\n # Adversarial Loss\n adversarial_loss = torch.mean(1 - out_labels)\n # Perception Loss\n # perception_loss = self.mse_loss(self.loss_network(out_images), self.loss_network(target_images))\n perception_loss = self.charbonnier_loss(self.loss_network(out_images), self.loss_network(target_images))\n # Image Loss\n # image_loss = self.mse_loss(out_images, target_images)\n image_loss = self.charbonnier_loss(out_images, target_images)\n # TV Loss\n tv_loss = self.tv_loss(out_images)\n return image_loss + 0.001 * adversarial_loss + 0.006 * perception_loss + 2e-8 * tv_loss\n\n\nclass TVLoss(nn.Module):\n def __init__(self, tv_loss_weight=1):\n super(TVLoss, self).__init__()\n self.tv_loss_weight = tv_loss_weight\n\n def forward(self, x):\n batch_size = x.size()[0]\n h_x = x.size()[2]\n w_x = x.size()[3]\n count_h = self.tensor_size(x[:, :, 1:, :])\n count_w = self.tensor_size(x[:, :, :, 1:])\n h_tv = torch.pow((x[:, :, 1:, :] - x[:, :, :h_x - 1, :]), 2).sum()\n w_tv = torch.pow((x[:, :, :, 1:] - x[:, :, :, :w_x - 1]), 2).sum()\n return self.tv_loss_weight * 2 * (h_tv / count_h + w_tv / count_w) / batch_size\n\n @staticmethod\n def tensor_size(t):\n return t.size()[1] * t.size()[2] * t.size()[3]\n\n\nclass L1_Charbonnier_loss(torch.nn.Module):\n \"\"\"L1 Charbonnierloss.\"\"\"\n def __init__(self):\n super(L1_Charbonnier_loss, self).__init__()\n self.eps = 1e-6\n\n def forward(self, X, Y):\n diff = torch.add(X, -Y)\n error = torch.sqrt(diff * diff + self.eps)\n loss = torch.mean(error)\n return loss\n\n\nif __name__ == \"__main__\":\n g_loss = GeneratorLoss_NEW()\n print(g_loss)\n"
] | [
[
"torch.nn.MSELoss",
"torch.add",
"torch.sqrt",
"torch.mean",
"torch.pow"
]
] |
LSanselme/kerod | [
"cb52775ed501cbe4bd5fc0f22ec0359ca1d5f902"
] | [
"src/kerod/core/sampling_ops.py"
] | [
"# Copyright 2017 The TensorFlow Authors and modified by Emilien Garreau. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Method to subsample minibatches by balancing positives and negatives.\n\nSubsamples minibatches based on a pre-specified positive fraction in range\n[0,1]. The class presumes there are many more negatives than positive examples:\nif the desired sample_size cannot be achieved with the pre-specified positive\nfraction, it fills the rest with negative examples. If this is not sufficient\nfor obtaining the desired sample_size, it returns fewer examples.\n\nThe main function to call is Subsample(self, indicator, labels). For convenience\none can also call SubsampleWeights(self, weights, labels) which is defined in\nthe minibatch_sampler base class.\n\nWhen is_static is True, it implements a method that guarantees static shapes.\nIt also ensures the length of output of the subsample is always sample_size, even\nwhen number of examples set to True in indicator is less than sample_size.\n\"\"\"\n\nimport tensorflow as tf\n\nfrom kerod.utils import ops\n\n\ndef subsample_indicator(indicator, num_samples):\n \"\"\"Subsample indicator vector.\n\n Given a boolean indicator vector with M elements set to `True`, the function\n assigns all but `num_samples` of these previously `True` elements to\n `False`. If `num_samples` is greater than M, the original indicator vector\n is returned.\n\n Arguments:\n - *indicator*: a 1-dimensional boolean tensor indicating which elements\n are allowed to be sampled and which are not.\n\n - *num_samples*: int32 scalar tensor\n\n Returns:\n\n A boolean tensor with the same shape as input (indicator) tensor\n \"\"\"\n indices = tf.where(indicator)\n indices = tf.random.shuffle(indices)\n indices = tf.reshape(indices, [-1])\n\n num_samples = tf.minimum(tf.size(indices), num_samples)\n selected_indices = tf.slice(indices, [0], tf.reshape(num_samples, [1]))\n\n selected_indicator = ops.indices_to_dense_vector(selected_indices, tf.shape(indicator)[0])\n\n return tf.equal(selected_indicator, 1)\n\n\ndef sample_balanced_positive_negative(indicator, sample_size, labels, positive_fraction=0.5):\n \"\"\"Subsamples minibatches to a desired balance of positives and negatives.\n\n Arguments:\n\n - *indicator*: boolean tensor of shape [N] whose True entries can be sampled.\n - *sample_size*: desired batch size. If None, keeps all positive samples and\n randomly selects negative samples so that the positive sample fraction\n matches positive_fraction.\n - *labels*: boolean tensor of shape [N] denoting positive(=True) and negative\n (=False) examples.\n - *positive_fraction*: desired fraction of positive examples (scalar in [0,1])\n in the batch.\n\n Returns:\n\n *sampled_idx_indicator*: boolean tensor of shape [N], True for entries which are sampled.\n \"\"\"\n\n negative_idx = tf.logical_not(labels)\n positive_idx = tf.logical_and(labels, indicator)\n negative_idx = tf.logical_and(negative_idx, indicator)\n\n # Sample positive and negative samples separately\n if sample_size is None:\n max_num_pos = tf.reduce_sum(tf.cast(positive_idx, dtype=tf.int32))\n else:\n max_num_pos = int(positive_fraction * sample_size)\n sampled_pos_idx = subsample_indicator(positive_idx, max_num_pos)\n num_sampled_pos = tf.reduce_sum(tf.cast(sampled_pos_idx, tf.int32))\n if sample_size is None:\n negative_positive_ratio = (1 - positive_fraction) / positive_fraction\n max_num_neg = tf.cast(negative_positive_ratio * tf.cast(num_sampled_pos, dtype=tf.float32),\n dtype=tf.int32)\n else:\n max_num_neg = sample_size - num_sampled_pos\n sampled_neg_idx = subsample_indicator(negative_idx, max_num_neg)\n\n return tf.logical_or(sampled_pos_idx, sampled_neg_idx)\n\n\ndef batch_sample_balanced_positive_negative(indicators,\n sample_size,\n labels,\n positive_fraction=0.5,\n dtype=tf.float32):\n \"\"\"Subsamples minibatches to a desired balance of positives and negatives.\n\n Arguments:\n\n - *indicator*: boolean tensor of shape [batch_size, N] whose True entries can be sampled.\n - *sample_size*: desired batch size. If None, keeps all positive samples and\n randomly selects negative samples so that the positive sample fraction\n matches positive_fraction.\n - *labels*: boolean tensor of shape [batch_size, N] denoting positive(=True) and negative\n (=False) examples.\n - *positive_fraction*: desired fraction of positive examples (scalar in [0,1])\n in the batch.\n\n Returns:\n\n A boolean tensor of shape [M, N], True for entries which are sampled.\n \"\"\"\n\n def _minibatch_subsample_fn(inputs):\n indicators, targets = inputs\n return sample_balanced_positive_negative(tf.cast(indicators, tf.bool),\n sample_size,\n tf.cast(targets, tf.bool),\n positive_fraction=positive_fraction)\n\n return tf.cast(tf.map_fn(_minibatch_subsample_fn, [indicators, labels],\n dtype=tf.bool,\n parallel_iterations=16,\n back_prop=True),\n dtype=dtype)\n"
] | [
[
"tensorflow.size",
"tensorflow.equal",
"tensorflow.logical_or",
"tensorflow.shape",
"tensorflow.reshape",
"tensorflow.random.shuffle",
"tensorflow.logical_not",
"tensorflow.map_fn",
"tensorflow.logical_and",
"tensorflow.cast",
"tensorflow.where"
]
] |
tapasi-brahma/nobrainer | [
"c46586658d226bc3ca22869fd45a2674fdd52be9"
] | [
"nobrainer/metrics.py"
] | [
"\"\"\"Implementations of metrics for 3D semantic segmentation.\"\"\"\n\nimport tensorflow as tf\n\n\ndef average_volume_difference():\n raise NotImplementedError()\n\n\ndef dice(y_true, y_pred, axis=(1, 2, 3, 4)):\n \"\"\"Calculate Dice similarity between labels and predictions.\n\n Dice similarity is in [0, 1], where 1 is perfect overlap and 0 is no\n overlap. If both labels and predictions are empty (e.g., all background),\n then Dice similarity is 1.\n\n If we assume the inputs are rank 5 [`(batch, x, y, z, classes)`], then an\n axis parameter of `(1, 2, 3)` will result in a tensor that contains a Dice\n score for every class in every item in the batch. The shape of this tensor\n will be `(batch, classes)`. If the inputs only have one class (e.g., binary\n segmentation), then an axis parameter of `(1, 2, 3, 4)` should be used.\n This will result in a tensor of shape `(batch,)`, where every value is the\n Dice similarity for that prediction.\n\n Implemented according to https://www.ncbi.nlm.nih.gov/pmc/articles/PMC4533825/#Equ6\n\n Returns\n -------\n Tensor of Dice similarities.\n\n Citations\n ---------\n Taha AA, Hanbury A. Metrics for evaluating 3D medical image segmentation:\n analysis, selection, and tool. BMC Med Imaging. 2015;15:29. Published 2015\n Aug 12. doi:10.1186/s12880-015-0068-x\n \"\"\"\n y_pred = tf.convert_to_tensor(y_pred)\n y_true = tf.cast(y_true, y_pred.dtype)\n eps = tf.keras.backend.epsilon()\n\n intersection = tf.reduce_sum(y_true * y_pred, axis=axis)\n summation = tf.reduce_sum(y_true, axis=axis) + tf.reduce_sum(y_pred, axis=axis)\n return (2 * intersection + eps) / (summation + eps)\n\n\ndef generalized_dice(y_true, y_pred, axis=(1, 2, 3)):\n \"\"\"Calculate Generalized Dice similarity. This is useful for multi-class\n predictions.\n\n If we assume the inputs are rank 5 [`(batch, x, y, z, classes)`], then an\n axis parameter of `(1, 2, 3)` should be used. This will result in a tensor\n of shape `(batch,)`, where every value is the Generalized Dice similarity\n for that prediction, across all classes.\n\n Returns\n -------\n Tensor of Generalized Dice similarities.\n \"\"\"\n y_pred = tf.convert_to_tensor(y_pred)\n y_true = tf.cast(y_true, y_pred.dtype)\n\n if y_true.get_shape().ndims < 2 or y_pred.get_shape().ndims < 2:\n raise ValueError(\"y_true and y_pred must be at least rank 2.\")\n\n epsilon = tf.keras.backend.epsilon()\n \n w = tf.math.reciprocal(tf.square(tf.reduce_sum(y_true, axis=axis)))\n w = tf.where(tf.math.is_finite(w), w, epsilon)\n num = 2 * tf.reduce_sum(w * tf.reduce_sum(y_true * y_pred, axis= axis), axis=-1)\n den = tf.reduce_sum(w * tf.reduce_sum(y_true + y_pred, axis= axis), axis=-1)\n gdice = num/den\n gdice = tf.where(tf.math.is_finite(gdice), gdice, tf.zeros_like(gdice))\n return gdice\n\n\ndef hamming(y_true, y_pred, axis=(1, 2, 3)):\n y_pred = tf.convert_to_tensor(y_pred)\n y_true = tf.cast(y_true, y_pred.dtype)\n return tf.reduce_mean(tf.not_equal(y_pred, y_true), axis=axis)\n\n\ndef haussdorf():\n raise NotADirectoryError()\n\n\ndef jaccard(y_true, y_pred, axis=(1, 2, 3, 4)):\n \"\"\"Calculate Jaccard similarity between labels and predictions.\n\n Jaccard similarity is in [0, 1], where 1 is perfect overlap and 0 is no\n overlap. If both labels and predictions are empty (e.g., all background),\n then Jaccard similarity is 1.\n\n If we assume the inputs are rank 5 [`(batch, x, y, z, classes)`], then an\n axis parameter of `(1, 2, 3)` will result in a tensor that contains a Jaccard\n score for every class in every item in the batch. The shape of this tensor\n will be `(batch, classes)`. If the inputs only have one class (e.g., binary\n segmentation), then an axis parameter of `(1, 2, 3, 4)` should be used.\n This will result in a tensor of shape `(batch,)`, where every value is the\n Jaccard similarity for that prediction.\n\n Implemented according to https://www.ncbi.nlm.nih.gov/pmc/articles/PMC4533825/#Equ7\n\n Returns\n -------\n Tensor of Jaccard similarities.\n\n Citations\n ---------\n Taha AA, Hanbury A. Metrics for evaluating 3D medical image segmentation:\n analysis, selection, and tool. BMC Med Imaging. 2015;15:29. Published 2015\n Aug 12. doi:10.1186/s12880-015-0068-x\n \"\"\"\n y_pred = tf.convert_to_tensor(y_pred)\n y_true = tf.cast(y_true, y_pred.dtype)\n eps = tf.keras.backend.epsilon()\n\n intersection = tf.reduce_sum(y_true * y_pred, axis=axis)\n union = tf.reduce_sum(y_true, axis=axis) + tf.reduce_sum(y_pred, axis=axis)\n return (intersection + eps) / (union - intersection + eps)\n\n\ndef tversky(y_true, y_pred, axis=(1, 2, 3), alpha=0.3, beta=0.7):\n y_pred = tf.convert_to_tensor(y_pred)\n y_true = tf.cast(y_true, y_pred.dtype)\n\n if y_true.get_shape().ndims < 2 or y_pred.get_shape().ndims < 2:\n raise ValueError(\"y_true and y_pred must be at least rank 2.\")\n\n eps = tf.keras.backend.epsilon()\n\n num = tf.reduce_sum(y_pred * y_true, axis=axis)\n den = (\n num\n + alpha * tf.reduce_sum(y_pred * (1 - y_true), axis=axis)\n + beta * tf.reduce_sum((1 - y_pred) * y_true, axis=axis)\n )\n # Sum over classes.\n return tf.reduce_sum((num + eps) / (den + eps), axis=-1)\n\ndef dice_coef_multilabel(y_true, y_pred):\n n_classes= tf.shape(y_pred)[-1]\n dice_coeff=0\n for index in range(n_classes):\n dice_coeff -= dice(y_true[:,:,:,:,index], y_pred[:,:,:,:,index])\n return dice_coeff\n"
] | [
[
"tensorflow.shape",
"tensorflow.zeros_like",
"tensorflow.cast",
"tensorflow.keras.backend.epsilon",
"tensorflow.convert_to_tensor",
"tensorflow.not_equal",
"tensorflow.reduce_sum",
"tensorflow.math.is_finite"
]
] |
huylb314/AVIAD_AVIJST | [
"bf8e0617849b4f8f4b95ea345be1565ea063ee38"
] | [
"avijst/tensorflow/data.py"
] | [
"import numpy as np\nfrom sklearn import metrics\nimport math\nfrom keras.preprocessing import sequence\nfrom keras.preprocessing.text import Tokenizer\nfrom typing import *\n\n# fastai utility\ndef listify(o):\n if o is None: return []\n if isinstance(o, list): return o\n if isinstance(o, str): return [o]\n if isinstance(o, Iterable): return list(o)\n return [o]\n\ndef compose(x, funcs, *args, **kwargs):\n for f in listify(funcs): \n x = f(x, **kwargs)\n return x\n\nclass Onehotify():\n def __init__(self, vocab_size):\n self.vocab_size = vocab_size\n self.tokenizer = Tokenizer(num_words=vocab_size)\n def __call__(self, item):\n return self.tokenizer.sequences_to_matrix([item], mode='binary')\n\nclass Padify():\n def __init__(self, maxlen):\n self.maxlen = maxlen\n def __call__(self, item):\n return sequence.pad_sequences([item], maxlen=self.maxlen)\n\nclass YOnehotify():\n def __init__(self, num_classes):\n self.num_classes = num_classes\n def __call__(self, item):\n categorical = np.zeros((1, self.num_classes))\n categorical[0, item] = 1\n return categorical\n\nclass Dataset():\n def __init__(self, x, y, tfms_x, tfms_y): \n self.x, self.y = x, y\n self.x_tfms, self.y_tfms = tfms_x, tfms_y\n def __len__(self): \n return len(self.x)\n def _get_transform(self, i, tfms):\n return compose(i, tfms)\n def __getitem__(self, i): \n batch_x, batch_y = self.x[i], self.y[i]\n return_x, return_y = [], []\n if isinstance(i, slice): \n return_x = [self._get_transform(o, self.x_tfms) for o in batch_x]\n if isinstance(i, slice):\n return_y = [self._get_transform(o, self.y_tfms) for o in batch_y]\n return np.vstack(return_x), np.vstack(return_y)\n\nclass DataLoader():\n def __init__(self, ds, bs, drop_last=True): self.ds, self.bs, self.drop_last = ds, bs, drop_last\n def __iter__(self):\n length = len(self.ds) // self.bs if self.drop_last else math.ceil(len(self.ds) / self.bs)\n for i in range(0, length, 1):\n yield self.ds[(i*self.bs):(i*self.bs)+self.bs]"
] | [
[
"numpy.vstack",
"numpy.zeros"
]
] |
orrinjelo/AedanWallpaper | [
"c5d67c45d7d295d90bc979f2cda645e0b578f10c"
] | [
"scripts/rainbow.py"
] | [
"from PIL import Image\nimport numpy as np\nimport colorsys\nimport os, sys\nimport argparse\nimport matplotlib.pyplot as plt \n\n\nrgb_to_hsv = np.vectorize(colorsys.rgb_to_hsv)\nhsv_to_rgb = np.vectorize(colorsys.hsv_to_rgb)\n\ndef crop(image, box=None):\n if box:\n imageBox = box\n else:\n imageBox = image.getbbox()\n return image.crop(imageBox)\n\ndef hue_shift(image, value):\n im = image.convert('RGBA')\n arr = np.array(np.asarray(im).astype(float))\n r,g,b,a = np.rollaxis(arr, axis=-1)\n # print(np.max(r))\n h,s,v = rgb_to_hsv(r, g, b)\n r, g, b = hsv_to_rgb((h + value/360.0) % 1.0, s, v)\n arr = np.dstack((r, g, b, a))\n\n # print(np.max(r))\n # plt.imshow(arr.astype(int), aspect='auto')\n # plt.show()\n\n return Image.fromarray(arr.astype('uint8'), 'RGBA')\n\nparser = argparse.ArgumentParser(description='Rainbow an image batch')\nparser.add_argument('--filename', dest='filename', type=str)\nparser.add_argument('--step', dest='step', type=float, default=5.0)\nparser.add_argument('--max_step', dest='max_step', type=float, default=360.0)\nargs = parser.parse_args()\n\ncolor_image = Image.open(args.filename)\n\nbasename = os.path.basename(args.filename)\nbase, ext = os.path.splitext(basename)\n\nif not os.path.exists('anim'):\n os.mkdir('anim')\n\nfor n in range(0, int(args.max_step/args.step)):\n dtheta = n*args.step\n print('Writing out', dtheta)\n cropped = crop(color_image, (1620, 780, 2220, 1380))\n new_im = hue_shift(cropped, dtheta)\n new_fn = os.path.join('anim','{0}_{1}{2}'.format(base, n, ext))\n n += 1\n new_im.save(new_fn)"
] | [
[
"numpy.asarray",
"numpy.dstack",
"numpy.vectorize",
"numpy.rollaxis"
]
] |
leike666666/tensorflow | [
"04f2870814d2773e09dcfa00cbe76a66a2c4de88",
"04f2870814d2773e09dcfa00cbe76a66a2c4de88",
"a3fd0ddfcb716be124e95b51e96e6c1e4507ef64"
] | [
"tensorflow/python/keras/regularizers_test.py",
"tensorflow/python/kernel_tests/cwise_ops_test.py",
"tensorflow/python/data/experimental/kernel_tests/stats_dataset_ops_test.py"
] | [
"# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for Keras regularizers.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom absl.testing import parameterized\nimport numpy as np\n\nfrom tensorflow.python import keras\nfrom tensorflow.python.eager import context\nfrom tensorflow.python.keras import keras_parameterized\nfrom tensorflow.python.keras import regularizers\nfrom tensorflow.python.keras import testing_utils\nfrom tensorflow.python.keras.utils import np_utils\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.platform import test\n\n\nDATA_DIM = 5\nNUM_CLASSES = 2\n\n\nclass KerasRegularizersTest(keras_parameterized.TestCase,\n parameterized.TestCase):\n\n def create_model(self, kernel_regularizer=None, activity_regularizer=None):\n model = keras.models.Sequential()\n model.add(keras.layers.Dense(NUM_CLASSES,\n kernel_regularizer=kernel_regularizer,\n activity_regularizer=activity_regularizer,\n input_shape=(DATA_DIM,)))\n return model\n\n def get_data(self):\n (x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(\n train_samples=10,\n test_samples=10,\n input_shape=(DATA_DIM,),\n num_classes=NUM_CLASSES)\n y_train = np_utils.to_categorical(y_train, NUM_CLASSES)\n y_test = np_utils.to_categorical(y_test, NUM_CLASSES)\n return (x_train, y_train), (x_test, y_test)\n\n def create_multi_input_model_from(self, layer1, layer2):\n input_1 = keras.layers.Input(shape=(DATA_DIM,))\n input_2 = keras.layers.Input(shape=(DATA_DIM,))\n out1 = layer1(input_1)\n out2 = layer2(input_2)\n out = keras.layers.Average()([out1, out2])\n model = keras.models.Model([input_1, input_2], out)\n model.add_loss(keras.backend.mean(out2))\n model.add_loss(math_ops.reduce_sum(input_1))\n return model\n\n @keras_parameterized.run_all_keras_modes\n @parameterized.named_parameters([\n ('l1', regularizers.l1()),\n ('l2', regularizers.l2()),\n ('l1_l2', regularizers.l1_l2()),\n ])\n def test_kernel_regularization(self, regularizer):\n (x_train, y_train), _ = self.get_data()\n model = self.create_model(kernel_regularizer=regularizer)\n model.compile(\n loss='categorical_crossentropy',\n optimizer='sgd',\n run_eagerly=testing_utils.should_run_eagerly(),\n experimental_run_tf_function=testing_utils.should_run_tf_function())\n self.assertEqual(len(model.losses), 1)\n model.fit(x_train, y_train, batch_size=10, epochs=1, verbose=0)\n\n @keras_parameterized.run_all_keras_modes\n @parameterized.named_parameters([\n ('l1', regularizers.l1()),\n ('l2', regularizers.l2()),\n ('l1_l2', regularizers.l1_l2()),\n ('l2_zero', keras.regularizers.l2(0.)),\n ])\n def test_activity_regularization(self, regularizer):\n (x_train, y_train), _ = self.get_data()\n model = self.create_model(activity_regularizer=regularizer)\n model.compile(\n loss='categorical_crossentropy',\n optimizer='sgd',\n run_eagerly=testing_utils.should_run_eagerly(),\n experimental_run_tf_function=testing_utils.should_run_tf_function())\n self.assertEqual(len(model.losses), 1 if context.executing_eagerly() else 1)\n model.fit(x_train, y_train, batch_size=10, epochs=1, verbose=0)\n\n @keras_parameterized.run_all_keras_modes\n @keras_parameterized.run_with_all_model_types\n def test_zero_regularization(self):\n # Verifies that training with zero regularization works.\n x, y = np.ones((10, 10)), np.ones((10, 3))\n model = testing_utils.get_model_from_layers(\n [keras.layers.Dense(3, kernel_regularizer=keras.regularizers.l2(0))],\n input_shape=(10,))\n model.compile(\n 'sgd',\n 'mse',\n run_eagerly=testing_utils.should_run_eagerly(),\n experimental_run_tf_function=testing_utils.should_run_tf_function())\n model.fit(x, y, batch_size=5, epochs=1)\n\n def test_custom_regularizer_saving(self):\n\n def my_regularizer(weights):\n return math_ops.reduce_sum(math_ops.abs(weights))\n\n inputs = keras.Input((10,))\n outputs = keras.layers.Dense(1, kernel_regularizer=my_regularizer)(inputs)\n model = keras.Model(inputs, outputs)\n model2 = model.from_config(\n model.get_config(), custom_objects={'my_regularizer': my_regularizer})\n self.assertEqual(model2.layers[1].kernel_regularizer, my_regularizer)\n\n @keras_parameterized.run_all_keras_modes\n @parameterized.named_parameters([\n ('l1', regularizers.l1()),\n ('l2', regularizers.l2()),\n ('l1_l2', regularizers.l1_l2()),\n ])\n def test_regularization_shared_layer(self, regularizer):\n dense_layer = keras.layers.Dense(\n NUM_CLASSES,\n kernel_regularizer=regularizer,\n activity_regularizer=regularizer)\n model = self.create_multi_input_model_from(dense_layer, dense_layer)\n model.compile(\n loss='categorical_crossentropy',\n optimizer='sgd',\n run_eagerly=testing_utils.should_run_eagerly(),\n experimental_run_tf_function=testing_utils.should_run_tf_function())\n self.assertLen(model.losses, 5)\n\n @keras_parameterized.run_all_keras_modes\n @parameterized.named_parameters([\n ('l1', regularizers.l1()),\n ('l2', regularizers.l2()),\n ('l1_l2', regularizers.l1_l2()),\n ])\n def test_regularization_shared_model(self, regularizer):\n dense_layer = keras.layers.Dense(\n NUM_CLASSES,\n kernel_regularizer=regularizer,\n activity_regularizer=regularizer)\n\n input_tensor = keras.layers.Input(shape=(DATA_DIM,))\n dummy_model = keras.models.Model(input_tensor, dense_layer(input_tensor))\n\n model = self.create_multi_input_model_from(dummy_model, dummy_model)\n model.compile(\n loss='categorical_crossentropy',\n optimizer='sgd',\n run_eagerly=testing_utils.should_run_eagerly(),\n experimental_run_tf_function=testing_utils.should_run_tf_function())\n self.assertLen(model.losses, 6)\n\n @keras_parameterized.run_all_keras_modes\n @parameterized.named_parameters([\n ('l1', regularizers.l1()),\n ('l2', regularizers.l2()),\n ('l1_l2', regularizers.l1_l2()),\n ])\n def test_regularization_shared_layer_in_different_models(self, regularizer):\n shared_dense = keras.layers.Dense(\n NUM_CLASSES,\n kernel_regularizer=regularizer,\n activity_regularizer=regularizer)\n models = []\n for _ in range(2):\n input_tensor = keras.layers.Input(shape=(DATA_DIM,))\n unshared_dense = keras.layers.Dense(\n NUM_CLASSES, kernel_regularizer=regularizer)\n out = unshared_dense(shared_dense(input_tensor))\n models.append(keras.models.Model(input_tensor, out))\n\n model = self.create_multi_input_model_from(\n layer1=models[0], layer2=models[1])\n model.compile(\n loss='categorical_crossentropy',\n optimizer='sgd',\n run_eagerly=testing_utils.should_run_eagerly(),\n experimental_run_tf_function=testing_utils.should_run_tf_function())\n\n # We expect to see 9 losses on the model:\n # - 2 from the 2 add_loss calls on the outer model.\n # - 3 from the weight regularizers on the shared_dense layer, unshared_dense\n # in inner model 1, unshared_dense in inner model 2.\n # - 4 from activity regularizers on the shared_dense layer.\n self.assertLen(model.losses, 9)\n\n\nif __name__ == '__main__':\n test.main()\n",
"# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Functional tests for coefficient-wise operations.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\n\nfrom tensorflow.python.compat import compat\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes as dtypes_lib\nfrom tensorflow.python.framework import errors\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import sparse_tensor\nfrom tensorflow.python.framework import test_util\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import gen_math_ops\nfrom tensorflow.python.ops import gradient_checker\nfrom tensorflow.python.ops import gradients_impl\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import nn_grad # pylint: disable=unused-import\nfrom tensorflow.python.platform import test\n\n_ADD = lambda x, y: x + y\n_SUB = lambda x, y: x - y\n_MUL = lambda x, y: x * y\n_POW = lambda x, y: x**y\n_TRUEDIV = lambda x, y: x / y\n_FLOORDIV = lambda x, y: x // y\n_MOD = lambda x, y: x % y\n\n_LT = lambda x, y: x < y\n_LE = lambda x, y: x <= y\n_GT = lambda x, y: x > y\n_GE = lambda x, y: x >= y\n\n_AND = lambda x, y: x & y\n_OR = lambda x, y: x | y\n_XOR = lambda x, y: x ^ y\n_INV = lambda x: ~x\n\n\n# TODO(zongheng): it'd be great to factor out this function and various random\n# SparseTensor gen funcs.\ndef _sparsify(x, thresh=0.5, index_dtype=np.int64):\n x[x < thresh] = 0\n\n non_zero = np.where(x)\n x_indices = np.vstack(non_zero).astype(index_dtype).T\n x_values = x[non_zero]\n x_shape = x.shape\n\n return sparse_tensor.SparseTensor(\n indices=x_indices, values=x_values, dense_shape=x_shape), x_values\n\n\ndef _default_tolerance(dtype):\n \"\"\"Returns a sensible default tolerance for comparing results of a given type.\n\n Args:\n dtype: A datatype.\n \"\"\"\n if dtype == np.float16:\n return 5e-3\n elif dtype in (np.float32, np.complex64):\n return 1e-3\n elif dtype in (np.float64, np.complex128):\n return 1e-5\n else:\n return None # Fail fast for unexpected types\n\n\nclass ComparisonOpTest(test.TestCase):\n\n def _compareScalar(self, func, x, y, dtype):\n with test_util.use_gpu():\n out = func(\n ops.convert_to_tensor(np.array([x]).astype(dtype)),\n ops.convert_to_tensor(np.array([y]).astype(dtype)))\n ret = self.evaluate(out)\n return ret[0]\n\n def testScalarCompareScalar(self):\n dtypes = [np.float16, np.float32, np.float64, np.int32, np.int64]\n data = [-1, 0, 1]\n for t in dtypes:\n for x in data:\n for y in data:\n self.assertEqual(self._compareScalar(math_ops.less, x, y, t), x < y)\n self.assertEqual(\n self._compareScalar(math_ops.less_equal, x, y, t), x <= y)\n self.assertEqual(\n self._compareScalar(math_ops.greater, x, y, t), x > y)\n self.assertEqual(\n self._compareScalar(math_ops.greater_equal, x, y, t), x >= y)\n self.assertEqual(self._compareScalar(math_ops.equal, x, y, t), x == y)\n self.assertEqual(\n self._compareScalar(math_ops.not_equal, x, y, t), x != y)\n data = [-1, 0, 1, -1j, 1j, 1 + 1j, 1 - 1j]\n for t in [np.complex64, np.complex128]:\n for x in data:\n for y in data:\n self.assertEqual(self._compareScalar(math_ops.equal, x, y, t), x == y)\n self.assertEqual(\n self._compareScalar(math_ops.not_equal, x, y, t), x != y)\n\n def _compare(self, x, y, np_func, tf_func):\n np_ans = np_func(x, y)\n with test_util.use_gpu():\n out = tf_func(ops.convert_to_tensor(x), ops.convert_to_tensor(y))\n tf_ans = self.evaluate(out)\n self.assertAllEqual(np_ans, tf_ans)\n\n def testTensorCompareTensor(self):\n x = np.linspace(-15, 15, 6).reshape(1, 3, 2)\n y = np.linspace(20, -10, 6).reshape(1, 3, 2)\n for t in [np.float16, np.float32, np.float64, np.int32, np.int64]:\n xt = x.astype(t)\n yt = y.astype(t)\n self._compare(xt, yt, np.less, math_ops.less)\n self._compare(xt, yt, np.less_equal, math_ops.less_equal)\n self._compare(xt, yt, np.greater, math_ops.greater)\n self._compare(xt, yt, np.greater_equal, math_ops.greater_equal)\n self._compare(xt, yt, np.equal, math_ops.equal)\n self._compare(xt, yt, np.not_equal, math_ops.not_equal)\n # Complex types do not support ordering but do support equality tests.\n for t in [np.complex64, np.complex128]:\n xt = x.astype(t)\n xt -= 1j * xt\n yt = y.astype(t)\n yt -= 1j * yt\n self._compare(xt, yt, np.equal, math_ops.equal)\n self._compare(xt, yt, np.not_equal, math_ops.not_equal)\n\n def _compareBCast(self, xs, ys, dtype, np_func, tf_func):\n x = np.linspace(-15, 15, np.prod(xs)).astype(dtype).reshape(xs)\n y = np.linspace(20, -10, np.prod(ys)).astype(dtype).reshape(ys)\n if dtype in (np.complex64, np.complex128):\n x -= 1j * x\n y -= 1j * y\n self._compare(x, y, np_func, tf_func)\n self._compare(y, x, np_func, tf_func)\n\n def _testBCastByFunc(self, np_func, tf_func, include_complex=False):\n shapes = [\n ([1, 3, 2], [1]),\n ([1, 3, 2], [2]),\n ([1, 3, 2], [3, 2]),\n ([1, 3, 2], [3, 1]),\n ([1, 3, 2], [1, 3, 2]),\n ([1, 3, 2], [2, 3, 1]),\n ([1, 3, 2], [2, 1, 1]),\n ([1, 3, 2], [1, 3, 1]),\n ([2, 1, 5], [2, 3, 1]),\n ([2, 0, 5], [2, 0, 1]),\n ([2, 3, 0], [2, 3, 1]),\n ]\n dtypes = [\n np.float16,\n np.float32,\n np.float64,\n np.int32,\n np.int64,\n ]\n if include_complex:\n dtypes.extend([np.complex64, np.complex128])\n\n for (xs, ys) in shapes:\n for dtype in dtypes:\n self._compareBCast(xs, ys, dtype, np_func, tf_func)\n\n def testBCastLess(self):\n self._testBCastByFunc(np.less, math_ops.less)\n\n def testBCastLessEqual(self):\n self._testBCastByFunc(np.less_equal, math_ops.less_equal)\n\n def testBCastGreater(self):\n self._testBCastByFunc(np.greater, math_ops.greater)\n\n def testBCastGreaterEqual(self):\n self._testBCastByFunc(np.greater_equal, math_ops.greater_equal)\n\n def testBCastEqual(self):\n self._testBCastByFunc(np.equal, math_ops.equal, include_complex=True)\n\n def testBCastNotEqual(self):\n self._testBCastByFunc(\n np.not_equal, math_ops.not_equal, include_complex=True)\n\n def testShapeMismatch(self):\n dtypes = [np.float16, np.float32, np.float64, np.int32, np.int64]\n funcs = [\n math_ops.less, math_ops.less_equal, math_ops.greater,\n math_ops.greater_equal, math_ops.equal, math_ops.not_equal\n ]\n x = np.arange(0, 10).reshape([2, 5])\n y = np.arange(0, 10).reshape([5, 2])\n for t in dtypes:\n for f in funcs:\n with self.assertRaisesRegexp(\n (ValueError, errors.InvalidArgumentError),\n \"Incompatible shapes|Dimensions must be equal\"):\n f(x.astype(t), y.astype(t))\n\n\nclass LogicalOpTest(test.TestCase):\n\n def _compareBinary(self, x, y, np_func, tf_func, use_gpu=False):\n np_ans = np_func(x, y)\n with test_util.device(use_gpu=use_gpu):\n inx = ops.convert_to_tensor(x)\n iny = ops.convert_to_tensor(y)\n out = tf_func(inx, iny)\n tf_val = self.evaluate(out)\n self.assertEqual(out.dtype, dtypes_lib.bool)\n self.assertAllEqual(np_ans, tf_val)\n self.assertShapeEqual(np_ans, out)\n\n def _not(self, x, use_gpu=False):\n np_ans = np.logical_not(x)\n with test_util.device(use_gpu=use_gpu):\n out = math_ops.logical_not(ops.convert_to_tensor(x))\n tf_val = self.evaluate(out)\n self.assertEqual(out.dtype, dtypes_lib.bool)\n self.assertAllEqual(np_ans, tf_val)\n self.assertShapeEqual(np_ans, out)\n\n def testScalar(self):\n data = [np.array([True]), np.array([False])]\n for use_gpu in [True, False]:\n for x in data:\n self._not(x, use_gpu)\n for x in data:\n for y in data:\n self._compareBinary(x, y, np.logical_and, math_ops.logical_and,\n use_gpu)\n self._compareBinary(x, y, np.logical_or, math_ops.logical_or, use_gpu)\n self._compareBinary(x, y, np.logical_xor, math_ops.logical_xor,\n use_gpu)\n\n def testTensor(self):\n x = np.random.randint(0, 2, 6).astype(np.bool).reshape(1, 3, 2)\n y = np.random.randint(0, 2, 6).astype(np.bool).reshape(1, 3, 2)\n for use_gpu in [True, False]:\n self._not(x, use_gpu)\n self._compareBinary(x, y, np.logical_and, math_ops.logical_and, use_gpu)\n self._compareBinary(x, y, np.logical_or, math_ops.logical_or, use_gpu)\n self._compareBinary(x, y, np.logical_xor, math_ops.logical_xor, use_gpu)\n\n def testBCast(self):\n shapes = [\n ([1, 3, 2], [1]),\n ([1, 3, 2], [2]),\n ([1, 3, 2], [3, 2]),\n ([1, 3, 2], [3, 1]),\n ([1, 3, 2], [1, 3, 2]),\n ([1, 3, 2], [2, 3, 1]),\n ([1, 3, 2], [2, 1, 1]),\n ([1, 3, 2], [1, 3, 1]),\n ([2, 1, 5], [2, 3, 1]),\n ([2, 0, 5], [2, 0, 1]),\n ([2, 3, 0], [2, 3, 1]),\n ]\n for (xs, ys) in shapes:\n x = np.random.randint(0, 2, np.prod(xs)).astype(np.bool).reshape(xs)\n y = np.random.randint(0, 2, np.prod(ys)).astype(np.bool).reshape(ys)\n for use_gpu in [True, False]:\n self._compareBinary(x, y, np.logical_and, math_ops.logical_and, use_gpu)\n self._compareBinary(x, y, np.logical_or, math_ops.logical_or, use_gpu)\n self._compareBinary(x, y, np.logical_xor, math_ops.logical_xor, use_gpu)\n\n @test_util.run_deprecated_v1\n def testShapeMismatch(self):\n x = np.random.randint(0, 2, 6).astype(np.bool).reshape(1, 3, 2)\n y = np.random.randint(0, 2, 6).astype(np.bool).reshape(3, 2, 1)\n for f in [math_ops.logical_and, math_ops.logical_or, math_ops.logical_xor]:\n with self.assertRaisesWithPredicateMatch(\n ValueError, lambda e: \"Dimensions must\" in str(e)):\n f(x, y)\n\n @test_util.run_deprecated_v1\n def testUsingAsPythonValueFails(self):\n # Ensure that we raise an error when the user attempts to treat a\n # `Tensor` as a Python `bool`.\n b = constant_op.constant(False)\n with self.assertRaises(TypeError):\n if b:\n pass\n\n x = constant_op.constant(3)\n y = constant_op.constant(4)\n with self.assertRaises(TypeError):\n if x > y:\n pass\n\n z = constant_op.constant(7)\n\n # The chained comparison should fail because Python computes `x <\n # y` and short-circuits the comparison with `z` if it is `False`.\n with self.assertRaises(TypeError):\n _ = x < y < z\n\n\nclass SelectOpTest(test.TestCase):\n\n def _compare(self, fn, c, x, y, use_gpu):\n np_ans = np.where(c, x, y)\n with test_util.device(use_gpu=use_gpu):\n out = fn(c, x, y)\n tf_ans = self.evaluate(out)\n self.assertAllEqual(np_ans, tf_ans)\n self.assertShapeEqual(np_ans, out)\n\n def _compareGradientX(self,\n fn,\n c,\n x,\n y,\n numeric_gradient_type=None,\n x_init_value=None):\n with self.cached_session():\n inx = ops.convert_to_tensor(x)\n iny = ops.convert_to_tensor(y)\n out = fn(c, inx, iny)\n s = list(np.shape(c))\n if x_init_value is None:\n x_init_value = x\n if x.shape != y.shape:\n x_init_value = np.broadcast_to(y, x.shape)\n jacob_t, jacob_n = gradient_checker.compute_gradient(\n inx, s, out, s, x_init_value=x_init_value)\n if numeric_gradient_type is not None:\n xf = x.astype(numeric_gradient_type)\n yf = y.astype(numeric_gradient_type)\n inxf = ops.convert_to_tensor(xf)\n inyf = ops.convert_to_tensor(yf)\n outf = fn(c, inxf, inyf)\n _, jacob_n = gradient_checker.compute_gradient(\n inxf, s, outf, s, x_init_value=xf)\n jacob_n = jacob_n.astype(x.dtype)\n if x.dtype == np.float16:\n self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)\n elif x.dtype == np.float32:\n self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)\n elif x.dtype == np.float64:\n self.assertAllClose(jacob_t, jacob_n, rtol=1e-5, atol=1e-5)\n\n def _compareGradientY(self, fn, c, x, y, numeric_gradient_type=None):\n with self.cached_session():\n inx = ops.convert_to_tensor(x)\n iny = ops.convert_to_tensor(y)\n out = fn(c, inx, iny)\n s = list(np.shape(c))\n jacob_t, jacob_n = gradient_checker.compute_gradient(\n iny, s, out, s, x_init_value=x, delta=1.0)\n if numeric_gradient_type is not None:\n xf = x.astype(numeric_gradient_type)\n yf = y.astype(numeric_gradient_type)\n inxf = ops.convert_to_tensor(xf)\n inyf = ops.convert_to_tensor(yf)\n outf = fn(c, inxf, inyf)\n _, jacob_n = gradient_checker.compute_gradient(\n inyf, s, outf, s, x_init_value=yf)\n jacob_n = jacob_n.astype(x.dtype)\n if x.dtype == np.float16:\n self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)\n elif x.dtype == np.float32:\n self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)\n elif x.dtype == np.float64:\n self.assertAllClose(jacob_t, jacob_n, rtol=1e-5, atol=1e-5)\n\n def _testScalar(self, fn):\n c = True\n x = np.random.rand(1, 3, 2) * 100\n y = np.random.rand(1, 3, 2) * 100\n for t in [\n np.float16, np.float32, np.float64, np.int32, np.int64, np.complex64,\n np.complex128\n ]:\n xt = x.astype(t)\n yt = y.astype(t)\n self._compare(fn, c, xt, yt, use_gpu=False)\n if t in [np.float16, np.float32, np.float64]:\n self._compare(fn, c, xt, yt, use_gpu=True)\n\n def testScalar(self):\n self._testScalar(array_ops.where)\n self._testScalar(array_ops.where_v2)\n\n def _testScalarBroadcast(self, fn, c, x, y):\n for t in [\n np.float16, np.float32, np.float64, np.int32, np.int64, np.complex64,\n np.complex128\n ]:\n xt = x.astype(t)\n yt = y.astype(t)\n self._compare(fn, c, xt, yt, use_gpu=False)\n if t in [np.float16, np.float32, np.float64]:\n self._compare(fn, c, xt, yt, use_gpu=True)\n\n def testScalarBroadcast(self):\n c = True\n # where_v2 only\n x = np.random.rand(1, 3, 2) * 100\n y = np.random.rand(1, 1, 1) * 100\n self._testScalarBroadcast(array_ops.where_v2, c, x, y)\n self._testScalarBroadcast(array_ops.where_v2, c, y, x)\n x = np.random.rand(1, 3, 2) * 100\n y = np.random.rand(1, 3, 1) * 100\n self._testScalarBroadcast(array_ops.where_v2, c, x, y)\n self._testScalarBroadcast(array_ops.where_v2, c, y, x)\n x = np.random.rand(1, 3, 2) * 100\n y = np.random.rand(1, 1, 2) * 100\n self._testScalarBroadcast(array_ops.where_v2, c, x, y)\n self._testScalarBroadcast(array_ops.where_v2, c, y, x)\n x = np.random.rand(1, 3, 2) * 100\n y = np.random.rand(1, 1) * 100\n self._testScalarBroadcast(array_ops.where_v2, c, x, y)\n self._testScalarBroadcast(array_ops.where_v2, c, y, x)\n x = np.random.rand(1, 3, 2) * 100\n y = np.random.rand(1) * 100\n self._testScalarBroadcast(array_ops.where_v2, c, x, y)\n self._testScalarBroadcast(array_ops.where_v2, c, y, x)\n x = np.random.rand(1, 3, 2) * 100\n y = np.random.rand(1, 2) * 100\n self._testScalarBroadcast(array_ops.where_v2, c, x, y)\n self._testScalarBroadcast(array_ops.where_v2, c, y, x)\n x = np.random.rand(1, 3, 2) * 100\n y = np.random.rand(3, 2) * 100\n self._testScalarBroadcast(array_ops.where_v2, c, x, y)\n self._testScalarBroadcast(array_ops.where_v2, c, y, x)\n\n def _testBasic(self, fn):\n c = np.random.randint(0, 2, 6).astype(np.bool).reshape(1, 3, 2)\n x = np.random.rand(1, 3, 2) * 100\n y = np.random.rand(1, 3, 2) * 100\n for t in [\n np.float16, np.float32, np.float64, np.int32, np.int64, np.complex64,\n np.complex128\n ]:\n xt = x.astype(t)\n yt = y.astype(t)\n self._compare(fn, c, xt, yt, use_gpu=False)\n if t in [np.float16, np.float32, np.float64]:\n self._compare(fn, c, xt, yt, use_gpu=True)\n\n def testBasic(self):\n self._testBasic(array_ops.where)\n self._testBasic(array_ops.where_v2)\n\n def _testBasicBroadcast(self, fn, c, x, y):\n for t in [\n np.float16, np.float32, np.float64, np.int32, np.int64, np.complex64,\n np.complex128\n ]:\n xt = x.astype(t)\n yt = y.astype(t)\n self._compare(fn, c, xt, yt, use_gpu=False)\n if t in [np.float16, np.float32, np.float64]:\n self._compare(fn, c, xt, yt, use_gpu=True)\n\n def testBasicBroadcast(self):\n c0 = np.random.randint(0, 2, 6).astype(np.bool).reshape(1, 3, 2)\n c1 = np.random.randint(0, 2, 2).astype(np.bool).reshape(1, 1, 2)\n c2 = np.random.randint(0, 2, 3).astype(np.bool).reshape(1, 3, 1)\n c3 = np.random.randint(0, 2, 1).astype(np.bool).reshape(1, 1, 1)\n for c in [c0, c1, c2, c3]:\n # where_v2 only\n x = np.random.rand(1, 3, 2) * 100\n y = np.random.rand(1, 1, 1) * 100\n self._testBasicBroadcast(array_ops.where_v2, c, x, y)\n self._testBasicBroadcast(array_ops.where_v2, c, y, x)\n x = np.random.rand(1, 3, 2) * 100\n y = np.random.rand(1, 3, 1) * 100\n self._testBasicBroadcast(array_ops.where_v2, c, x, y)\n self._testBasicBroadcast(array_ops.where_v2, c, y, x)\n x = np.random.rand(1, 3, 2) * 100\n y = np.random.rand(1, 1, 2) * 100\n self._testBasicBroadcast(array_ops.where_v2, c, x, y)\n self._testBasicBroadcast(array_ops.where_v2, c, y, x)\n x = np.random.rand(1, 3, 2) * 100\n y = np.random.rand(1, 1) * 100\n self._testBasicBroadcast(array_ops.where_v2, c, x, y)\n self._testBasicBroadcast(array_ops.where_v2, c, y, x)\n x = np.random.rand(1, 3, 2) * 100\n y = np.random.rand(1) * 100\n self._testBasicBroadcast(array_ops.where_v2, c, x, y)\n self._testBasicBroadcast(array_ops.where_v2, c, y, x)\n x = np.random.rand(1, 3, 2) * 100\n y = np.random.rand(1, 2) * 100\n self._testBasicBroadcast(array_ops.where_v2, c, x, y)\n self._testBasicBroadcast(array_ops.where_v2, c, y, x)\n x = np.random.rand(1, 3, 2) * 100\n y = np.random.rand(3, 2) * 100\n self._testBasicBroadcast(array_ops.where_v2, c, x, y)\n self._testBasicBroadcast(array_ops.where_v2, c, y, x)\n\n def _testGradients(self, fn):\n c = np.random.randint(0, 2, 6).astype(np.bool).reshape(1, 3, 2)\n x = np.random.rand(1, 3, 2) * 100\n y = np.random.rand(1, 3, 2) * 100\n for t in [np.float16, np.float32, np.float64]:\n xt = x.astype(t)\n yt = y.astype(t)\n if t == np.float16:\n # Compare fp16 theoretical gradients to fp32 numerical gradients,\n # since fp16 numerical gradients are too imprecise unless great\n # care is taken with choosing the inputs and the delta. This is\n # a weaker check (in particular, it does not test the op itself,\n # only its gradient), but it's much better than nothing.\n self._compareGradientX(fn, c, xt, yt, np.float)\n self._compareGradientY(fn, c, xt, yt, np.float)\n else:\n self._compareGradientX(fn, c, xt, yt)\n self._compareGradientY(fn, c, xt, yt)\n\n @test_util.run_deprecated_v1\n def testGradients(self):\n self._testGradients(array_ops.where)\n self._testGradients(array_ops.where_v2)\n\n @test_util.run_deprecated_v1\n def testGradientsBroadcast(self):\n c = np.random.randint(0, 2, 6).astype(np.bool).reshape(1, 3, 2)\n for t in [np.float32, np.float64]:\n # where_v2 only\n x = np.random.rand(1, 3, 2) * 100\n y = np.random.rand(1, 1, 1) * 100\n self._compareGradientX(array_ops.where_v2, c, x.astype(t), y.astype(t))\n x = np.random.rand(1, 3, 2) * 100\n y = np.random.rand(1, 3, 1) * 100\n self._compareGradientX(array_ops.where_v2, c, x.astype(t), y.astype(t))\n x = np.random.rand(1, 3, 2) * 100\n y = np.random.rand(1, 1, 2) * 100\n self._compareGradientX(array_ops.where_v2, c, x.astype(t), y.astype(t))\n x = np.random.rand(1, 3, 2) * 100\n y = np.random.rand(1, 1) * 100\n self._compareGradientX(array_ops.where_v2, c, x.astype(t), y.astype(t))\n x = np.random.rand(1, 3, 2) * 100\n y = np.random.rand(1) * 100\n self._compareGradientX(array_ops.where_v2, c, x.astype(t), y.astype(t))\n x = np.random.rand(1, 3, 2) * 100\n y = np.random.rand(1, 2) * 100\n self._compareGradientX(array_ops.where_v2, c, x.astype(t), y.astype(t))\n x = np.random.rand(1, 3, 2) * 100\n y = np.random.rand(3, 2) * 100\n self._compareGradientX(array_ops.where_v2, c, x.astype(t), y.astype(t))\n\n def _testShapeMismatch(self, fn):\n c = np.random.randint(0, 2, 6).astype(np.bool).reshape(1, 3, 2)\n x = np.random.rand(1, 3, 2) * 100\n y = np.random.rand(2, 5, 3) * 100\n for t in [\n np.float16, np.float32, np.float64, np.int32, np.int64, np.complex64,\n np.complex128\n ]:\n xt = x.astype(t)\n yt = y.astype(t)\n with self.assertRaises(ValueError):\n fn(c, xt, yt)\n\n @test_util.run_deprecated_v1\n def testShapeMismatch(self):\n self._testShapeMismatch(array_ops.where)\n self._testShapeMismatch(array_ops.where_v2)\n\n def _testEmptyTensor(self, fn):\n c = np.random.randint(0, 3, 0).astype(np.bool).reshape(1, 3, 0)\n x = np.random.rand(1, 3, 0) * 100\n y = np.random.rand(1, 3, 0) * 100\n z_expected = np.zeros((1, 3, 0), dtype=np.float32)\n with self.cached_session():\n xt = x.astype(np.float32)\n yt = y.astype(np.float32)\n z = fn(c, xt, yt).eval()\n self.assertAllEqual(z_expected, z)\n\n @test_util.run_deprecated_v1\n def testEmptyTensor(self):\n self._testEmptyTensor(array_ops.where)\n self._testEmptyTensor(array_ops.where_v2)\n\n def _testNan(self, fn):\n with self.cached_session():\n for c in False, True:\n for a in 7.0, np.nan:\n for b in 5.0, np.nan:\n x = fn(c, a, b).eval()\n y = a if c else b\n self.assertEqual(np.isnan(x), np.isnan(y))\n\n @test_util.run_deprecated_v1\n def testNan(self):\n \"\"\"Verify that nans don't propagate where they shouldn't.\"\"\"\n self._testNan(array_ops.where)\n self._testNan(array_ops.where_v2)\n\n\nclass BatchSelectOpTest(test.TestCase):\n \"\"\"Test broadcasting of Select when 'c' is a vec and 't' &'e' are rank2+.\"\"\"\n\n def _compare(self, c, x, y, use_gpu):\n np_ans = np.dstack(\n [x_i if c_i else y_i for c_i, x_i, y_i in zip(c, x, y)]).transpose(\n [2, 0, 1])\n with test_util.device(use_gpu=use_gpu):\n out = array_ops.where(c, x, y)\n tf_ans = self.evaluate(out)\n self.assertAllEqual(np_ans, tf_ans)\n self.assertShapeEqual(np_ans, out)\n\n def _compareGradientX(self, c, x, y, numeric_gradient_type=None):\n with self.cached_session():\n inx = ops.convert_to_tensor(x)\n iny = ops.convert_to_tensor(y)\n out = array_ops.where(c, inx, iny)\n s = list(np.shape(x))\n jacob_t, jacob_n = gradient_checker.compute_gradient(\n inx, s, out, s, x_init_value=x)\n if numeric_gradient_type is not None:\n xf = x.astype(numeric_gradient_type)\n yf = y.astype(numeric_gradient_type)\n inxf = ops.convert_to_tensor(xf)\n inyf = ops.convert_to_tensor(yf)\n outf = array_ops.where(c, inxf, inyf)\n _, jacob_n = gradient_checker.compute_gradient(\n inxf, s, outf, s, x_init_value=xf)\n jacob_n = jacob_n.astype(x.dtype)\n if x.dtype == np.float16:\n self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)\n elif x.dtype == np.float32:\n self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)\n elif x.dtype == np.float64:\n self.assertAllClose(jacob_t, jacob_n, rtol=1e-5, atol=1e-5)\n\n def _compareGradientY(self, c, x, y, numeric_gradient_type=None):\n with self.cached_session():\n inx = ops.convert_to_tensor(x)\n iny = ops.convert_to_tensor(y)\n out = array_ops.where(c, inx, iny)\n s = list(np.shape(x))\n jacob_t, jacob_n = gradient_checker.compute_gradient(\n iny, s, out, s, x_init_value=y)\n if numeric_gradient_type is not None:\n xf = x.astype(numeric_gradient_type)\n yf = y.astype(numeric_gradient_type)\n inxf = ops.convert_to_tensor(xf)\n inyf = ops.convert_to_tensor(yf)\n outf = array_ops.where(c, inxf, inyf)\n _, jacob_n = gradient_checker.compute_gradient(\n inyf, s, outf, s, x_init_value=yf)\n jacob_n = jacob_n.astype(x.dtype)\n if x.dtype == np.float16:\n self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)\n elif x.dtype == np.float32:\n self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)\n elif x.dtype == np.float64:\n self.assertAllClose(jacob_t, jacob_n, rtol=1e-5, atol=1e-5)\n\n def testBasic(self):\n c = np.random.randint(0, 2, 16).astype(np.bool)\n x = np.random.rand(16, 2, 8) * 100\n y = np.random.rand(16, 2, 8) * 100\n for t in [\n np.float16, np.float32, np.float64, np.int32, np.int64, np.complex64,\n np.complex128\n ]:\n xt = x.astype(t)\n yt = y.astype(t)\n self._compare(c, xt, yt, use_gpu=False)\n if t in [np.float16, np.float32, np.float64]:\n self._compare(c, xt, yt, use_gpu=True)\n\n @test_util.run_deprecated_v1\n def testGradients(self):\n c = np.random.randint(0, 2, 16).astype(np.bool)\n x = np.random.rand(16, 2, 8) * 100\n y = np.random.rand(16, 2, 8) * 100\n for t in [np.float16, np.float32, np.float64]:\n xt = x.astype(t)\n yt = y.astype(t)\n if t == np.float16:\n # Compare fp16 theoretical gradients to fp32 numerical gradients,\n # since fp16 numerical gradients are too imprecise unless great\n # care is taken with choosing the inputs and the delta. This is\n # a weaker check (in particular, it does not test the op itself,\n # only its gradient), but it's much better than nothing.\n self._compareGradientX(c, xt, yt, np.float)\n self._compareGradientY(c, xt, yt, np.float)\n else:\n self._compareGradientX(c, xt, yt)\n self._compareGradientY(c, xt, yt)\n\n @test_util.run_deprecated_v1\n def testShapeMismatch(self):\n c = np.random.randint(0, 2, 8).astype(np.bool)\n x = np.random.rand(16, 3, 2) * 100\n y = np.random.rand(16, 3, 2) * 100\n for t in [\n np.float16, np.float32, np.float64, np.int32, np.int64, np.complex64,\n np.complex128\n ]:\n xt = x.astype(t)\n yt = y.astype(t)\n with self.assertRaises(ValueError):\n array_ops.where(c, xt, yt)\n\n\nclass MinMaxOpTest(test.TestCase):\n\n def _compare(self, x, y, use_gpu):\n np_min, np_max = np.minimum(x, y), np.maximum(x, y)\n with test_util.device(use_gpu=use_gpu):\n inx = ops.convert_to_tensor(x)\n iny = ops.convert_to_tensor(y)\n omin, omax = math_ops.minimum(inx, iny), math_ops.maximum(inx, iny)\n tf_min, tf_max = self.evaluate([omin, omax])\n self.assertAllEqual(np_min, tf_min)\n self.assertAllEqual(np_max, tf_max)\n\n def testBasic(self):\n x = np.random.rand(1, 3, 2) * 100.\n y = np.random.rand(1, 3, 2) * 100.\n for t in [np.float16, np.float32, np.float64, np.int32, np.int64]:\n self._compare(x.astype(t), y.astype(t), use_gpu=False)\n self._compare(x.astype(t), y.astype(t), use_gpu=True)\n\n def testDifferentShapes(self):\n x = np.random.rand(1, 3, 2) * 100.\n y = np.random.rand(2) * 100. # should broadcast\n for t in [np.float16, np.float32, np.float64, np.int32, np.int64]:\n self._compare(x.astype(t), y.astype(t), use_gpu=False)\n self._compare(x.astype(t), y.astype(t), use_gpu=True)\n\n def testScalar(self):\n x = np.random.rand(1, 3, 2) * 100.\n y = np.random.rand(1).item() * 100. # should broadcast\n # dropped np.float64, int64 because TF automatically converts to 32 bit\n for t in [np.float32, np.int32]:\n self._compare(x.astype(t), t(y), use_gpu=False)\n self._compare(x.astype(t), t(y), use_gpu=True)\n\n def _compareGradientX(self, func, x, y):\n with self.cached_session():\n inx = ops.convert_to_tensor(x)\n iny = ops.convert_to_tensor(y)\n out = func(inx, iny)\n s = list(np.shape(x))\n jacob_t, jacob_n = gradient_checker.compute_gradient(\n inx, s, out, s, x_init_value=x)\n if x.dtype == np.float16:\n self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)\n elif x.dtype == np.float32:\n self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)\n elif x.dtype == np.float64:\n self.assertAllClose(jacob_t, jacob_n, rtol=1e-5, atol=1e-5)\n\n def _compareGradientY(self, func, x, y):\n with self.cached_session():\n inx = ops.convert_to_tensor(x)\n iny = ops.convert_to_tensor(y)\n out = func(inx, iny)\n s = list(np.shape(x))\n jacob_t, jacob_n = gradient_checker.compute_gradient(\n iny, s, out, s, x_init_value=y)\n if x.dtype == np.float16:\n self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)\n elif x.dtype == np.float32:\n self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)\n elif x.dtype == np.float64:\n self.assertAllClose(jacob_t, jacob_n, rtol=1e-5, atol=1e-5)\n\n @test_util.run_deprecated_v1\n def testGradients(self):\n x = np.random.rand(1, 3, 2) * 100.\n # ensure x != y\n y = x + (np.random.randint(2, size=x.shape) - .5) * 2 # -1 or +1\n self._compareGradientX(math_ops.maximum, x, y)\n self._compareGradientY(math_ops.maximum, x, y)\n self._compareGradientX(math_ops.minimum, x, y)\n self._compareGradientY(math_ops.minimum, x, y)\n\n\nclass MathOpsOverloadTest(test.TestCase):\n\n def _computeTensorAndLiteral(self, x, y, dtype, func):\n with test_util.force_cpu():\n inx = ops.convert_to_tensor(x, dtype=dtype)\n z = func(inx, y) # Should use __add__, __sub__, etc.\n return self.evaluate(z)\n\n def _computeLiteralAndTensor(self, x, y, dtype, func):\n with test_util.force_cpu():\n iny = ops.convert_to_tensor(y, dtype=dtype)\n z = func(x, iny) # Should use __radd__, __rsub__, etc.\n return self.evaluate(z)\n\n def _compareBinary(self, x, y, dtype, np_func, tf_func):\n np_ans = np_func(x, y).astype(dtype.as_numpy_dtype)\n self.assertAllClose(np_ans,\n self._computeTensorAndLiteral(x, y, dtype, tf_func))\n self.assertAllClose(np_ans,\n self._computeLiteralAndTensor(x, y, dtype, tf_func))\n\n def _compareUnary(self, x, dtype, np_func, tf_func):\n np_ans = np_func(x).astype(dtype.as_numpy_dtype)\n with test_util.force_cpu():\n self.assertAllClose(\n np_ans, self.evaluate(tf_func(ops.convert_to_tensor(x, dtype=dtype))))\n\n def testOverload(self):\n dtypes = [\n dtypes_lib.float16,\n dtypes_lib.float32,\n dtypes_lib.float64,\n dtypes_lib.int32,\n dtypes_lib.int64,\n dtypes_lib.complex64,\n dtypes_lib.complex128,\n ]\n funcs = [\n (np.add, _ADD),\n (np.subtract, _SUB),\n (np.multiply, _MUL),\n (np.power, _POW),\n (np.true_divide, _TRUEDIV),\n (np.floor_divide, _FLOORDIV),\n ]\n for dtype in dtypes:\n for np_func, tf_func in funcs:\n if dtype in (dtypes_lib.complex64,\n dtypes_lib.complex128) and tf_func == _FLOORDIV:\n continue # floordiv makes no sense for complex\n self._compareBinary(10, 5, dtype, np_func, tf_func)\n # Mod only works for int32 and int64.\n for dtype in [dtypes_lib.int32, dtypes_lib.int64]:\n self._compareBinary(10, 3, dtype, np.mod, _MOD)\n\n def testOverloadComparisons(self):\n dtypes = [\n dtypes_lib.float16,\n dtypes_lib.float32,\n dtypes_lib.float64,\n dtypes_lib.int32,\n dtypes_lib.int64,\n ]\n funcs = [\n (np.less, _LT),\n (np.less_equal, _LE),\n (np.greater, _GT),\n (np.greater_equal, _GE),\n ]\n for dtype in dtypes:\n for np_func, tf_func in funcs:\n self._compareBinary(10, 5, dtype, np_func, tf_func)\n logical_funcs = [(np.logical_and, _AND), (np.logical_or, _OR),\n (np.logical_xor, _XOR), (np.equal, math_ops.equal),\n (np.not_equal, math_ops.not_equal)]\n for np_func, tf_func in logical_funcs:\n self._compareBinary(True, False, dtypes_lib.bool, np_func, tf_func)\n self._compareBinary(True, True, dtypes_lib.bool, np_func, tf_func)\n self._compareBinary(False, False, dtypes_lib.bool, np_func, tf_func)\n self._compareBinary(False, True, dtypes_lib.bool, np_func, tf_func)\n self._compareBinary([True, True, False, False],\n [True, False, True, False], dtypes_lib.bool, np_func,\n tf_func)\n self._compareUnary(True, dtypes_lib.bool, np.logical_not, _INV)\n self._compareUnary(False, dtypes_lib.bool, np.logical_not, _INV)\n self._compareUnary([True, False], dtypes_lib.bool, np.logical_not, _INV)\n\n\nclass IsFiniteInfNanTest(test.TestCase):\n\n def _compare(self, x, use_gpu):\n np_finite, np_inf, np_nan = np.isfinite(x), np.isinf(x), np.isnan(x)\n with test_util.device(use_gpu=use_gpu):\n inx = ops.convert_to_tensor(x)\n ofinite, oinf, onan = math_ops.is_finite(inx), math_ops.is_inf(\n inx), math_ops.is_nan(inx)\n tf_finite, tf_inf, tf_nan = self.evaluate([ofinite, oinf, onan])\n self.assertAllEqual(np_inf, tf_inf)\n self.assertAllEqual(np_nan, tf_nan)\n self.assertAllEqual(np_finite, tf_finite)\n self.assertShapeEqual(np_inf, oinf)\n self.assertShapeEqual(np_nan, onan)\n self.assertShapeEqual(np_finite, ofinite)\n\n def _testDtype(self, dtype):\n fi = np.finfo(dtype)\n data = np.array([\n 0, -1, 1, fi.resolution, -fi.resolution, fi.min, fi.max, -np.inf,\n np.inf, np.nan\n ]).astype(dtype)\n self._compare(data, use_gpu=False)\n self._compare(data, use_gpu=True)\n\n def testHalf(self):\n self._testDtype(np.float16)\n\n def testFloat(self):\n self._testDtype(np.float32)\n\n def testDouble(self):\n self._testDtype(np.float64)\n\n def testSqrt(self):\n for dtype in [np.float16, np.float32, np.float64]:\n fi = np.finfo(dtype)\n for size in [1, 3, 4, 7, 8, 63, 64, 65]:\n # For float32 Eigen uses Carmack's fast vectorized sqrt algorithm.\n # It is not accurate for very large arguments, so we test for\n # fi.max/100 instead of fi.max here.\n for value in [fi.min, -2, -1, 0, fi.tiny, 1, 2, 1000, fi.max / 100]:\n x = np.full((size,), value, dtype=dtype)\n np_y = np.sqrt(x)\n np_nan = np.isnan(np_y)\n with test_util.use_gpu():\n tf_y = math_ops.sqrt(x)\n tf_nan = math_ops.is_nan(tf_y)\n if value < 0:\n self.assertAllEqual(np_nan, self.evaluate(tf_nan))\n else:\n self.assertAllCloseAccordingToType(np_y, self.evaluate(tf_y))\n\n\nclass RoundingTest(test.TestCase):\n\n def _compare_values(self, x, y=None):\n y = np.rint(x) if y is None else np.asarray(y)\n\n tf_rint = math_ops.rint(x)\n np_rint = self.evaluate(tf_rint)\n\n self.assertAllEqual(y, np_rint)\n self.assertShapeEqual(y, tf_rint)\n\n def _compare(self, x):\n np_floor, np_ceil = np.floor(x), np.ceil(x)\n\n inx = ops.convert_to_tensor(x)\n ofloor, oceil = math_ops.floor(inx), math_ops.ceil(inx)\n tf_floor, tf_ceil = self.evaluate([ofloor, oceil])\n\n self.assertAllEqual(np_floor, tf_floor)\n self.assertAllEqual(np_ceil, tf_ceil)\n self.assertShapeEqual(np_floor, ofloor)\n self.assertShapeEqual(np_ceil, oceil)\n\n def _testDtype(self, dtype):\n data = (np.arange(-3, 3) / 4.).reshape(1, 3, 2).astype(dtype)\n self._compare(data)\n # TODO: rint op is not supported for float16\n if dtype is np.float16:\n return\n self._compare_values(data)\n x = [0.5, 0.5000001]\n y = [0.0, 1.0]\n self._compare_values(x, y=y)\n\n # numpy example\n x = [-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0]\n y = [-2., -2., -0., 0., 2., 2., 2.]\n self._compare_values(x, y=y)\n\n def testTypes(self):\n self.skipTest(\"b/131162241\")\n for dtype in [np.float16, np.float32, np.float64]:\n self._testDtype(dtype)\n\n\nclass ComplexMakeRealImagTest(test.TestCase):\n\n def _compareMake(self, real, imag, use_gpu):\n np_ans = real + (1j) * imag\n\n with test_util.device(use_gpu=use_gpu):\n real = ops.convert_to_tensor(real)\n imag = ops.convert_to_tensor(imag)\n tf_ans = math_ops.complex(real, imag)\n out = self.evaluate(tf_ans)\n\n self.assertAllEqual(np_ans, out)\n self.assertShapeEqual(np_ans, tf_ans)\n\n def testMake(self):\n real = (np.arange(-3, 3) / 4.).reshape([1, 3, 2]).astype(np.float32)\n imag = (np.arange(-3, 3) / 5.).reshape([1, 3, 2]).astype(np.float32)\n for use_gpu in [False, True]:\n self._compareMake(real, imag, use_gpu)\n self._compareMake(real, 12.0, use_gpu)\n self._compareMake(23.0, imag, use_gpu)\n\n def testRealImagNumericType(self):\n for use_gpu in [True, False]:\n for value in [1., 1j, 1. + 1j]:\n np_real, np_imag = np.real(value), np.imag(value)\n with test_util.device(use_gpu=use_gpu):\n tf_real = math_ops.real(value)\n tf_imag = math_ops.imag(value)\n self.assertAllEqual(np_real, self.evaluate(tf_real))\n self.assertAllEqual(np_imag, self.evaluate(tf_imag))\n\n def _compareRealImag(self, cplx, use_gpu):\n np_real, np_imag = np.real(cplx), np.imag(cplx)\n np_zeros = np_real * 0\n\n with test_util.device(use_gpu=use_gpu):\n inx = ops.convert_to_tensor(cplx)\n tf_real = math_ops.real(inx)\n tf_imag = math_ops.imag(inx)\n tf_real_real = math_ops.real(tf_real)\n tf_imag_real = math_ops.imag(tf_real)\n self.assertAllEqual(np_real, self.evaluate(tf_real))\n self.assertAllEqual(np_imag, self.evaluate(tf_imag))\n self.assertAllEqual(np_real, self.evaluate(tf_real_real))\n self.assertAllEqual(np_zeros, self.evaluate(tf_imag_real))\n\n def testRealImag64(self):\n real = (np.arange(-3, 3) / 4.).reshape([1, 3, 2]).astype(np.float32)\n imag = (np.arange(-3, 3) / 5.).reshape([1, 3, 2]).astype(np.float32)\n cplx = real + 1j * imag\n self._compareRealImag(cplx, use_gpu=False)\n self._compareRealImag(cplx, use_gpu=True)\n\n def testRealImag128(self):\n real = (np.arange(-3, 3) / 4.).reshape([1, 3, 2]).astype(np.float64)\n imag = (np.arange(-3, 3) / 5.).reshape([1, 3, 2]).astype(np.float64)\n cplx = real + 1j * imag\n self._compareRealImag(cplx, use_gpu=False)\n self._compareRealImag(cplx, use_gpu=True)\n\n def _compareAngle(self, cplx, use_gpu):\n np_angle = np.angle(cplx)\n\n with test_util.device(use_gpu=use_gpu):\n inx = ops.convert_to_tensor(cplx)\n tf_angle = math_ops.angle(inx)\n tf_angle_val = self.evaluate(tf_angle)\n\n self.assertAllClose(np_angle, tf_angle_val)\n self.assertShapeEqual(np_angle, tf_angle)\n\n def testAngle64(self):\n real = (np.arange(-3, 3) / 4.).reshape([1, 3, 2]).astype(np.float32)\n imag = (np.arange(-3, 3) / 5.).reshape([1, 3, 2]).astype(np.float32)\n cplx = real + 1j * imag\n self._compareAngle(cplx, use_gpu=False)\n self._compareAngle(cplx, use_gpu=True)\n\n def testAngle(self):\n real = (np.arange(-3, 3) / 4.).reshape([1, 3, 2]).astype(np.float64)\n imag = (np.arange(-3, 3) / 5.).reshape([1, 3, 2]).astype(np.float64)\n cplx = real + 1j * imag\n self._compareAngle(cplx, use_gpu=False)\n self._compareAngle(cplx, use_gpu=True)\n\n @test_util.run_deprecated_v1\n def testRealReal(self):\n for dtype in (dtypes_lib.int32, dtypes_lib.int64, dtypes_lib.float32,\n dtypes_lib.float64):\n x = array_ops.placeholder(dtype)\n y = math_ops.real(x)\n self.assertEqual(x, y)\n\n def _compareConj(self, cplx, use_gpu):\n np_ans = np.conj(cplx)\n with test_util.device(use_gpu=use_gpu):\n inx = ops.convert_to_tensor(cplx)\n tf_conj = math_ops.conj(inx)\n tf_ans = self.evaluate(tf_conj)\n self.assertAllEqual(np_ans, tf_ans)\n self.assertShapeEqual(np_ans, tf_conj)\n\n def testConj64(self):\n real = (np.arange(-3, 3) / 4.).reshape([1, 3, 2]).astype(np.float32)\n imag = (np.arange(-3, 3) / 5.).reshape([1, 3, 2]).astype(np.float32)\n cplx = real + 1j * imag\n self._compareConj(cplx, use_gpu=False)\n self._compareConj(cplx, use_gpu=True)\n\n def testConj128(self):\n real = (np.arange(-3, 3) / 4.).reshape([1, 3, 2]).astype(np.float64)\n imag = (np.arange(-3, 3) / 5.).reshape([1, 3, 2]).astype(np.float64)\n cplx = real + 1j * imag\n self._compareConj(cplx, use_gpu=False)\n self._compareConj(cplx, use_gpu=True)\n\n @test_util.run_deprecated_v1\n def testConjReal(self):\n for dtype in (dtypes_lib.int32, dtypes_lib.int64, dtypes_lib.float16,\n dtypes_lib.float32, dtypes_lib.float64):\n x = array_ops.placeholder(dtype)\n y = math_ops.conj(x)\n self.assertEqual(x, y)\n\n @test_util.run_deprecated_v1\n def testConjString(self):\n x = array_ops.placeholder(dtypes_lib.string)\n with self.assertRaisesRegexp(TypeError,\n r\"Expected numeric or variant tensor\"):\n math_ops.conj(x)\n\n def _compareGradient(self, x):\n # x[:, 0] is real, x[:, 1] is imag. We combine real and imag into\n # complex numbers. Then, we extract real and imag parts and\n # computes the squared sum. This is obviously the same as sum(real\n # * real) + sum(imag * imag). We just want to make sure the\n # gradient function is checked.\n with self.cached_session():\n inx = ops.convert_to_tensor(x)\n real, imag = array_ops.split(value=inx, num_or_size_splits=2, axis=1)\n real, imag = array_ops.reshape(real, [-1]), array_ops.reshape(imag, [-1])\n cplx = math_ops.complex(real, imag)\n cplx = math_ops.conj(cplx)\n loss = math_ops.reduce_sum(math_ops.square(\n math_ops.real(cplx))) + math_ops.reduce_sum(\n math_ops.square(math_ops.imag(cplx)))\n epsilon = 1e-3\n jacob_t, jacob_n = gradient_checker.compute_gradient(\n inx, list(x.shape), loss, [1], x_init_value=x, delta=epsilon)\n self.assertAllClose(jacob_t, jacob_n, rtol=epsilon, atol=epsilon)\n\n def _compareBroadcastGradient(self, x):\n x_ = ops.convert_to_tensor(x)\n epsilon = 1e-3\n with self.cached_session():\n for args in [(x_, 0.), (0., x_)]:\n z = math_ops.reduce_sum(math_ops.abs(math_ops.complex(*args)))\n jacob_t, jacob_n = gradient_checker.compute_gradient(\n x_, list(x.shape), z, [1], x_init_value=x, delta=epsilon)\n self.assertAllClose(jacob_t, jacob_n, rtol=epsilon, atol=epsilon)\n\n @test_util.run_deprecated_v1\n def testGradient(self):\n # complex64\n data = np.arange(1, 2, 0.10).reshape([5, 2]).astype(np.float32)\n self._compareGradient(data)\n self._compareBroadcastGradient(data)\n # complex128\n data = np.arange(1, 2, 0.10).reshape([5, 2]).astype(np.float64)\n self._compareGradient(data)\n\n def _compareMulGradient(self, data):\n # data is a float matrix of shape [n, 4]. data[:, 0], data[:, 1],\n # data[:, 2], data[:, 3] are real parts of x, imaginary parts of\n # x, real parts of y and imaginary parts of y.\n with self.cached_session():\n inp = ops.convert_to_tensor(data)\n xr, xi, yr, yi = array_ops.split(value=inp, num_or_size_splits=4, axis=1)\n\n def vec(x): # Reshape to a vector\n return array_ops.reshape(x, [-1])\n\n xr, xi, yr, yi = vec(xr), vec(xi), vec(yr), vec(yi)\n\n def cplx(r, i): # Combine to a complex vector\n return math_ops.complex(r, i)\n\n x, y = cplx(xr, xi), cplx(yr, yi)\n # z is x times y in complex plane.\n z = x * y\n # Defines the loss function as the sum of all coefficients of z.\n loss = math_ops.reduce_sum(math_ops.real(z) + math_ops.imag(z))\n epsilon = 0.005\n jacob_t, jacob_n = gradient_checker.compute_gradient(\n inp, list(data.shape), loss, [1], x_init_value=data, delta=epsilon)\n self.assertAllClose(jacob_t, jacob_n, rtol=epsilon, atol=epsilon)\n\n @test_util.run_deprecated_v1\n def testMulGradient(self):\n data = np.arange(1, 2, 0.125).reshape([2, 4]).astype(np.float32)\n self._compareMulGradient(data)\n\n\nclass PolyvalTest(test.TestCase):\n\n def _runtest(self, dtype, degree):\n x = np.random.rand(2, 2).astype(dtype)\n coeffs = [np.random.rand(2, 2).astype(dtype) for _ in range(degree + 1)]\n np_val = np.polyval(coeffs, x)\n with self.cached_session():\n tf_val = math_ops.polyval(coeffs, x)\n self.assertAllClose(np_val, self.evaluate(tf_val))\n\n def testSimple(self):\n for dtype in [\n np.int32, np.float32, np.float64, np.complex64, np.complex128\n ]:\n for degree in range(5):\n self._runtest(dtype, degree)\n\n def testBroadcast(self):\n dtype = np.float32\n degree = 3\n shapes = [(1,), (2, 1), (1, 2), (2, 2)]\n for x_shape in shapes:\n for coeff_shape in shapes:\n x = np.random.rand(*x_shape).astype(dtype)\n coeffs = [\n np.random.rand(*coeff_shape).astype(dtype)\n for _ in range(degree + 1)\n ]\n np_val = np.polyval(coeffs, x)\n with self.cached_session():\n tf_val = math_ops.polyval(coeffs, x)\n self.assertAllClose(np_val, self.evaluate(tf_val))\n\n def testEmpty(self):\n x = np.random.rand(2, 2).astype(np.float32)\n coeffs = []\n np_val = np.polyval(coeffs, x)\n with self.cached_session():\n tf_val = math_ops.polyval(coeffs, x)\n self.assertAllClose(np_val, self.evaluate(tf_val))\n\n\nclass SingularGradientOpTest(test.TestCase):\n\n @test_util.run_deprecated_v1\n def testGradientAtSingularity(self):\n if not compat.forward_compatible(2020, 3, 14):\n self.skipTest(\"Skipping test for future functionality.\")\n\n ops_and_singularity = [\n (gen_math_ops.reciprocal, (0.,)),\n (gen_math_ops.rsqrt, (0.,)),\n (gen_math_ops.sqrt, (0.,)),\n (gen_math_ops.sqrt_grad, (\n 0.,\n 0.,\n )),\n (gen_math_ops.reciprocal_grad, (\n 1.,\n 0.,\n )),\n (gen_math_ops.tan, (np.pi / 2,)),\n (gen_math_ops.log, (0.,)),\n (gen_math_ops.log1p, (-1.,)),\n (gen_math_ops.acosh, (0.,)),\n (gen_math_ops.asin, (1.,)),\n (gen_math_ops.acos, (1.,)),\n (gen_math_ops.atan2, (0., 0.)),\n (gen_math_ops.div, (1., 0.)),\n (gen_math_ops.div_no_nan, (1., 0.)),\n (gen_math_ops.real_div, (1., 0.)),\n (math_ops.pow, (0., -1.)),\n ]\n for op, singularity in ops_and_singularity:\n for dtype in (dtypes_lib.half, dtypes_lib.float32, dtypes_lib.float64,\n dtypes_lib.complex64, dtypes_lib.complex128):\n if dtype.is_complex and op in [\n gen_math_ops.asin, gen_math_ops.acos, gen_math_ops.atan2\n ]:\n continue\n if dtype == dtypes_lib.half and op in [\n gen_math_ops.acosh, gen_math_ops.asin, gen_math_ops.acos,\n gen_math_ops.atan2\n ]:\n continue\n with self.cached_session():\n print(\"op = \", op, \", singularity = \", singularity, \", type = \",\n dtype)\n args = [constant_op.constant(s, dtype=dtype) for s in singularity]\n grad_y = constant_op.constant(0, dtype=dtype)\n y = op(*args)\n g = gradients_impl.gradients(y, args, grad_ys=grad_y)\n g_val = self.evaluate(g)\n self.assertAllEqual(g_val, np.zeros(len(singularity)))\n\n\nif __name__ == \"__main__\":\n test.main()\n",
"# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for the experimental input pipeline statistics gathering ops.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom absl.testing import parameterized\nimport numpy as np\n\nfrom tensorflow.python.data.experimental.kernel_tests import reader_dataset_ops_test_base\nfrom tensorflow.python.data.experimental.kernel_tests import stats_dataset_test_base\nfrom tensorflow.python.data.experimental.ops import batching\nfrom tensorflow.python.data.experimental.ops import stats_aggregator\nfrom tensorflow.python.data.experimental.ops import stats_ops\nfrom tensorflow.python.data.kernel_tests import test_base\nfrom tensorflow.python.data.ops import dataset_ops\nfrom tensorflow.python.framework import combinations\nfrom tensorflow.python.framework import errors\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.platform import test\n\n\n# TODO(jsimsa): Figure out why are graph tests failing.\nclass StatsDatasetTest(stats_dataset_test_base.StatsDatasetTestBase,\n parameterized.TestCase):\n\n @combinations.generate(test_base.eager_only_combinations())\n def testBytesProduced(self):\n aggregator = stats_aggregator.StatsAggregator()\n dataset = dataset_ops.Dataset.range(100).map(\n lambda x: array_ops.tile([x], ops.convert_to_tensor([x]))).apply(\n stats_ops.bytes_produced_stats(\"bytes_produced\"))\n dataset = self.datasetExperimentalStats(dataset, aggregator)\n next_element = self.getNext(dataset, requires_initialization=True)\n\n expected_sum = 0.0\n for i in range(100):\n self.assertAllEqual(\n np.array([i] * i, dtype=np.int64), self.evaluate(next_element()))\n handle = self.getHandle(aggregator)\n self.assertStatisticsHasCount(handle, \"bytes_produced\", float(i + 1),\n i + 2)\n expected_sum += i * 8.0\n self.assertStatisticsHasSum(handle, \"bytes_produced\", expected_sum, i + 2)\n with self.assertRaises(errors.OutOfRangeError):\n self.evaluate(next_element())\n handle = self.getHandle(aggregator)\n self.assertStatisticsHasCount(handle, \"bytes_produced\", 100.0, 101)\n self.assertStatisticsHasSum(handle, \"bytes_produced\", expected_sum, 101)\n\n @combinations.generate(test_base.eager_only_combinations())\n def testLatencyStats(self):\n aggregator = stats_aggregator.StatsAggregator()\n dataset = dataset_ops.Dataset.range(100).apply(\n stats_ops.latency_stats(\"record_latency\"))\n\n dataset = self.datasetExperimentalStats(dataset, aggregator)\n\n next_element = self.getNext(dataset, requires_initialization=True)\n\n for i in range(100):\n self.assertEqual(i, self.evaluate(next_element()))\n handle = self.getHandle(aggregator)\n self.assertStatisticsHasCount(handle, \"record_latency\", float(i + 1),\n i + 2)\n with self.assertRaises(errors.OutOfRangeError):\n self.evaluate(next_element())\n handle = self.getHandle(aggregator)\n self.assertStatisticsHasCount(handle, \"record_latency\", 100.0, 101)\n\n @combinations.generate(test_base.eager_only_combinations())\n def testPrefetchBufferUtilization(self):\n aggregator = stats_aggregator.StatsAggregator()\n dataset = dataset_ops.Dataset.range(100).map(\n lambda x: array_ops.tile([x], ops.convert_to_tensor([x]))).prefetch(-1)\n dataset = self.datasetExperimentalStats(dataset, aggregator)\n next_element = self.getNext(dataset, requires_initialization=True)\n for i in range(100):\n self.assertAllEqual(\n np.array([i] * i, dtype=np.int64), self.evaluate(next_element()))\n handle = self.getHandle(aggregator)\n self.assertStatisticsHasCount(\n handle,\n self.regexForNodeName(\"PrefetchDataset\", \"buffer_utilization\"),\n float(i + 1),\n 3 * i + 4,\n offset=2)\n self.assertStatisticsContains(\n handle, self.regexForNodeName(\"PrefetchDataset\", \"buffer_capacity\"),\n 3 * i + 4)\n self.assertStatisticsContains(\n handle,\n self.regexForNodeName(\"PrefetchDataset\", \"buffer_size\"),\n 3 * i + 4,\n offset=1)\n self.assertStatisticsHasRange(\n handle,\n self.regexForNodeName(\"PrefetchDataset\", \"buffer_utilization\"),\n 0,\n 1,\n 3 * i + 4,\n offset=2)\n with self.assertRaises(errors.OutOfRangeError):\n self.evaluate(next_element())\n handle = self.getHandle(aggregator)\n self.assertStatisticsHasCount(\n handle,\n self.regexForNodeName(\"PrefetchDataset\", \"buffer_utilization\"),\n 100,\n 301,\n offset=2)\n\n @combinations.generate(test_base.eager_only_combinations())\n def testPrefetchBufferScalars(self):\n aggregator = stats_aggregator.StatsAggregator()\n dataset = dataset_ops.Dataset.range(10).map(\n lambda x: array_ops.tile([x], ops.convert_to_tensor([x]))).prefetch(1)\n dataset = self.datasetExperimentalStats(dataset, aggregator)\n next_element = self.getNext(dataset, requires_initialization=True)\n\n for i in range(10):\n self.assertAllEqual(\n np.array([i] * i, dtype=np.int64), self.evaluate(next_element()))\n handle = self.getHandle(aggregator)\n self.assertStatisticsHasScalarValue(\n handle, self.regexForNodeName(\"PrefetchDataset\", \"buffer_capacity\"),\n 1, 3 * i + 4)\n self.assertStatisticsHasScalarValue(\n handle,\n self.regexForNodeName(\"PrefetchDataset\", \"buffer_size\"),\n 1,\n 3 * i + 4,\n offset=1)\n with self.assertRaises(errors.OutOfRangeError):\n self.evaluate(next_element())\n\n @combinations.generate(test_base.eager_only_combinations())\n def testFilteredElementsStats(self):\n aggregator = stats_aggregator.StatsAggregator()\n dataset = dataset_ops.Dataset.range(101).filter(\n lambda x: math_ops.equal(math_ops.mod(x, 3), 0))\n dataset = self.datasetExperimentalStats(dataset, aggregator)\n next_element = self.getNext(dataset, requires_initialization=True)\n\n for i in range(34):\n self.assertEqual(i * 3, self.evaluate(next_element()))\n handle = self.getHandle(aggregator)\n if i != 0:\n self.assertStatisticsHasScalarValue(\n handle, self.regexForNodeName(\"FilterDataset\", \"dropped_elements\"),\n float(i * 2))\n self.assertStatisticsHasScalarValue(\n handle, self.regexForNodeName(\"FilterDataset\", \"filtered_elements\"),\n float(i + 1))\n with self.assertRaises(errors.OutOfRangeError):\n self.evaluate(next_element())\n handle = self.getHandle(aggregator)\n self.assertStatisticsHasScalarValue(\n handle, self.regexForNodeName(\"FilterDataset\", \"dropped_elements\"),\n 67.0)\n self.assertStatisticsHasScalarValue(\n handle, self.regexForNodeName(\"FilterDataset\", \"filtered_elements\"),\n 34.0)\n\n @combinations.generate(test_base.eager_only_combinations())\n def testReinitialize(self):\n aggregator = stats_aggregator.StatsAggregator()\n dataset = dataset_ops.Dataset.range(100).apply(\n stats_ops.latency_stats(\"record_latency\"))\n dataset = self.datasetExperimentalStats(dataset, aggregator)\n\n for j in range(5):\n next_element = self.getNext(dataset, requires_initialization=True)\n for i in range(100):\n self.assertEqual(i, self.evaluate(next_element()))\n handle = self.getHandle(aggregator)\n self.assertStatisticsHasCount(handle, \"record_latency\",\n float((j * 100) + i + 1),\n (j * 100) + i + 2)\n with self.assertRaises(errors.OutOfRangeError):\n self.evaluate(next_element())\n handle = self.getHandle(aggregator)\n self.assertStatisticsHasCount(handle, \"record_latency\", (j + 1) * 100.0,\n (j * 100) + 101)\n\n @combinations.generate(test_base.eager_only_combinations())\n def testNoAggregatorRegistered(self):\n dataset = dataset_ops.Dataset.range(100).apply(\n stats_ops.latency_stats(\"record_latency\"))\n\n next_element = self.getNext(dataset, requires_initialization=True)\n\n for i in range(100):\n self.assertEqual(i, self.evaluate(next_element()))\n with self.assertRaises(errors.OutOfRangeError):\n self.evaluate(next_element())\n\n @combinations.generate(test_base.eager_only_combinations())\n def testMultipleTags(self):\n aggregator = stats_aggregator.StatsAggregator()\n dataset = dataset_ops.Dataset.range(100).apply(\n stats_ops.latency_stats(\"record_latency\")).apply(\n stats_ops.latency_stats(\"record_latency_2\"))\n dataset = self.datasetExperimentalStats(dataset, aggregator)\n\n next_element = self.getNext(dataset, requires_initialization=True)\n\n for i in range(100):\n self.assertEqual(i, self.evaluate(next_element()))\n handle = self.getHandle(aggregator)\n self.assertStatisticsHasCount(\n handle, \"record_latency\", float(i + 1), 2 * i + 3, offset=1)\n self.assertStatisticsHasCount(handle, \"record_latency_2\", float(i + 1),\n 2 * i + 3)\n with self.assertRaises(errors.OutOfRangeError):\n self.evaluate(next_element())\n handle = self.getHandle(aggregator)\n self.assertStatisticsHasCount(\n handle, \"record_latency\", 100.0, 201, offset=1)\n self.assertStatisticsHasCount(handle, \"record_latency_2\", 100.0, 201)\n\n @combinations.generate(test_base.eager_only_combinations())\n def testRepeatedTags(self):\n aggregator = stats_aggregator.StatsAggregator()\n dataset = dataset_ops.Dataset.range(100).apply(\n stats_ops.latency_stats(\"record_latency\")).apply(\n stats_ops.latency_stats(\"record_latency\"))\n dataset = self.datasetExperimentalStats(dataset, aggregator)\n next_element = self.getNext(dataset, requires_initialization=True)\n\n for i in range(100):\n self.assertEqual(i, self.evaluate(next_element()))\n handle = self.getHandle(aggregator)\n self.assertStatisticsHasCount(handle, \"record_latency\",\n float(2 * (i + 1)), 2 * i + 3)\n with self.assertRaises(errors.OutOfRangeError):\n self.evaluate(next_element())\n handle = self.getHandle(aggregator)\n self.assertStatisticsHasCount(handle, \"record_latency\", 200.0, 201)\n\n @combinations.generate(test_base.eager_only_combinations())\n def testMultipleIteratorsSameAggregator(self):\n aggregator = stats_aggregator.StatsAggregator()\n dataset = dataset_ops.Dataset.range(100).apply(\n stats_ops.latency_stats(\"record_latency\"))\n dataset = self.datasetExperimentalStats(dataset, aggregator)\n next_element1 = self.getNext(dataset, requires_initialization=True)\n next_element2 = self.getNext(dataset, requires_initialization=True)\n\n for i in range(100):\n self.assertEqual(i * 2, self.evaluate(next_element1() + next_element2()))\n handle = self.getHandle(aggregator)\n self.assertStatisticsHasCount(handle, \"record_latency\",\n float(2 * (i + 1)), 2 * i + 3)\n with self.assertRaises(errors.OutOfRangeError):\n self.evaluate(next_element1())\n with self.assertRaises(errors.OutOfRangeError):\n self.evaluate(next_element2())\n handle = self.getHandle(aggregator)\n self.assertStatisticsHasCount(handle, \"record_latency\", 200.0, 201)\n\n @combinations.generate(test_base.eager_only_combinations())\n def testMultipleDatasetWithPrefixes(self):\n aggregator = stats_aggregator.StatsAggregator()\n dataset = dataset_ops.Dataset.range(100).apply(\n stats_ops.latency_stats(\"record_latency\"))\n dataset = self.datasetExperimentalStats(\n dataset, aggregator, prefix=\"dataset1\")\n dataset2 = dataset_ops.Dataset.range(100).apply(\n stats_ops.latency_stats(\"record_latency\"))\n dataset2 = self.datasetExperimentalStats(\n dataset2, aggregator, prefix=\"dataset2\")\n next_element1 = self.getNext(dataset, requires_initialization=True)\n next_element2 = self.getNext(dataset2, requires_initialization=True)\n\n for i in range(100):\n self.assertEqual(i * 2, self.evaluate(next_element1() + next_element2()))\n handle = self.getHandle(aggregator)\n self.assertStatisticsHasCount(\n handle, \"dataset1::record_latency\", float(i + 1), 2 * i + 3, offset=1)\n self.assertStatisticsHasCount(handle, \"dataset2::record_latency\",\n float(i + 1), 2 * i + 3)\n with self.assertRaises(errors.OutOfRangeError):\n self.evaluate(next_element1())\n with self.assertRaises(errors.OutOfRangeError):\n self.evaluate(next_element2())\n handle = self.getHandle(aggregator)\n self.assertStatisticsHasCount(\n handle, \"dataset1::record_latency\", 100.0, 201, offset=1)\n self.assertStatisticsHasCount(handle, \"dataset2::record_latency\", 100.0,\n 201)\n\n @combinations.generate(test_base.eager_only_combinations())\n def testMultiplePrefetchStats(self):\n\n aggregator = stats_aggregator.StatsAggregator()\n dataset = dataset_ops.Dataset.range(10).prefetch(\n 2).filter(lambda x: math_ops.equal(math_ops.mod(x, 2), 0)).prefetch(1)\n\n dataset = self.datasetExperimentalStats(dataset, aggregator)\n next_element = self.getNext(dataset, requires_initialization=True)\n\n for i in range(5):\n self.assertEqual(i * 2, self.evaluate(next_element()))\n handle = self.getHandle(aggregator)\n # TODO(shivaniagarwal): using exact name of prefetch node than the regex,\n # to differentiate between two prefetch. This might break in future, at\n # which point, it would be best to disable this test.\n self.assertStatisticsHasScalarValue(\n handle, \"PrefetchDataset/_5::buffer_capacity\", 2)\n self.assertStatisticsContains(handle, \"PrefetchDataset/_5::buffer_size\")\n self.assertStatisticsHasScalarValue(\n handle, \"PrefetchDataset/_8::buffer_capacity\", 1)\n self.assertStatisticsContains(handle, \"PrefetchDataset/_8::buffer_size\")\n with self.assertRaises(errors.OutOfRangeError):\n self.evaluate(next_element())\n\n\nclass ThreadUtilizationStatsTest(stats_dataset_test_base.StatsDatasetTestBase,\n parameterized.TestCase):\n\n @combinations.generate(test_base.eager_only_combinations())\n def testMapBufferUtilization(self):\n\n def dataset_fn():\n return dataset_ops.Dataset.range(10).map(\n lambda x: array_ops.tile([x], ops.convert_to_tensor([x])),\n num_parallel_calls=4)\n\n self.parallelCallsStats(\n dataset_fn, {\"ParallelMapDataset\"}, 10, function_processing_time=True)\n\n @combinations.generate(test_base.eager_only_combinations())\n def testMapAutoTuneBufferUtilization(self):\n\n def dataset_fn():\n return dataset_ops.Dataset.range(10).map(\n lambda x: array_ops.tile([x], ops.convert_to_tensor([x])),\n num_parallel_calls=dataset_ops.AUTOTUNE)\n\n self.parallelCallsStats(\n dataset_fn, {\"ParallelMapDataset\"}, 10, function_processing_time=True)\n\n @combinations.generate(test_base.eager_only_combinations())\n def testInterleaveAutoTuneBufferUtilization(self):\n\n def dataset_fn():\n\n def interleave_fn(_):\n return dataset_ops.Dataset.range(\n 10).map(lambda x: array_ops.tile([x], ops.convert_to_tensor([x])))\n\n return dataset_ops.Dataset.range(1).interleave(\n interleave_fn,\n cycle_length=1,\n num_parallel_calls=dataset_ops.AUTOTUNE)\n\n self.parallelCallsStats(dataset_fn, {\"ParallelInterleaveDatasetV2\"}, 10)\n\n @combinations.generate(test_base.eager_only_combinations())\n def testMapAndBatchAutoTuneBufferUtilization(self):\n\n def dataset_fn():\n return dataset_ops.Dataset.range(100).apply(\n batching.map_and_batch(\n lambda x: array_ops.tile([x], ops.convert_to_tensor([2])),\n num_parallel_calls=dataset_ops.AUTOTUNE,\n batch_size=16))\n\n num_output = 100 // 16 + 1\n self.parallelCallsStats(\n dataset_fn, {\"MapAndBatchDataset\"},\n num_output,\n check_elements=False,\n function_processing_time=True)\n\n\nclass FeatureStatsDatasetTest(\n stats_dataset_test_base.StatsDatasetTestBase,\n reader_dataset_ops_test_base.MakeBatchedFeaturesDatasetTestBase,\n parameterized.TestCase):\n\n @combinations.generate(test_base.eager_only_combinations())\n def testFeaturesStats(self):\n num_epochs = 5\n total_records = num_epochs * self._num_records\n batch_size = 2\n\n def dataset_fn():\n return self.make_batch_feature(\n filenames=self.test_filenames[0],\n num_epochs=num_epochs,\n batch_size=batch_size,\n shuffle=True,\n shuffle_seed=5,\n drop_final_batch=False)\n\n num_output = total_records // batch_size\n if total_records % batch_size:\n num_output = total_records // batch_size + 1\n\n self.parallelCallsStats(\n dataset_fn, {\"ParseExampleDataset\"},\n num_output,\n check_elements=False)\n\n aggregator = stats_aggregator.StatsAggregator()\n dataset = self.datasetExperimentalStats(\n dataset_fn(), aggregator, prefix=\"record_stats\")\n\n next_element = self.getNext(dataset, requires_initialization=True)\n\n for _ in range(num_output):\n self.evaluate(next_element())\n\n with self.assertRaises(errors.OutOfRangeError):\n self.evaluate(next_element())\n handle = self.getHandle(aggregator)\n self.assertStatisticsHasCount(\n handle,\n self.regexForNodeName(\"record_stats::ParseExampleDataset\",\n \"features_count\"), total_records)\n self.assertStatisticsHasCount(\n handle,\n self.regexForNodeName(\"record_stats::ParseExampleDataset\",\n \"feature_values_count\"), total_records)\n self.assertStatisticsHasSum(\n handle,\n self.regexForNodeName(\"record_stats::ParseExampleDataset\",\n \"features_count\"), total_records * 4)\n self.assertStatisticsHasSum(\n handle,\n self.regexForNodeName(\"record_stats::ParseExampleDataset\",\n \"feature_values_count\"),\n self._sum_keywords(1) * num_epochs + 3 * total_records)\n\n\nif __name__ == \"__main__\":\n test.main()\n"
] | [
[
"numpy.ones",
"tensorflow.python.keras.regularizers.l1_l2",
"tensorflow.python.keras.testing_utils.should_run_eagerly",
"tensorflow.python.keras.layers.Average",
"tensorflow.python.keras.models.Sequential",
"tensorflow.python.keras.Model",
"tensorflow.python.keras.Input",
"tensorflow.python.keras.testing_utils.get_test_data",
"tensorflow.python.ops.math_ops.reduce_sum",
"tensorflow.python.keras.regularizers.l2",
"tensorflow.python.keras.layers.Dense",
"tensorflow.python.keras.utils.np_utils.to_categorical",
"tensorflow.python.ops.math_ops.abs",
"tensorflow.python.keras.backend.mean",
"tensorflow.python.keras.models.Model",
"tensorflow.python.platform.test.main",
"tensorflow.python.keras.regularizers.l1",
"tensorflow.python.eager.context.executing_eagerly",
"tensorflow.python.keras.testing_utils.should_run_tf_function",
"tensorflow.python.keras.layers.Input"
],
[
"tensorflow.python.ops.math_ops.maximum",
"numpy.asarray",
"tensorflow.python.ops.gradients_impl.gradients",
"numpy.full",
"tensorflow.python.ops.gradient_checker.compute_gradient",
"numpy.polyval",
"tensorflow.python.ops.math_ops.is_finite",
"tensorflow.python.framework.constant_op.constant",
"numpy.isfinite",
"numpy.vstack",
"tensorflow.python.ops.array_ops.reshape",
"tensorflow.python.ops.array_ops.placeholder",
"tensorflow.python.ops.math_ops.angle",
"numpy.logical_not",
"numpy.random.rand",
"numpy.isnan",
"tensorflow.python.ops.math_ops.is_inf",
"tensorflow.python.framework.sparse_tensor.SparseTensor",
"numpy.where",
"tensorflow.python.ops.math_ops.imag",
"tensorflow.python.compat.compat.forward_compatible",
"numpy.linspace",
"tensorflow.python.ops.math_ops.polyval",
"numpy.minimum",
"numpy.sqrt",
"numpy.ceil",
"numpy.zeros",
"tensorflow.python.ops.math_ops.minimum",
"tensorflow.python.framework.test_util.device",
"numpy.arange",
"tensorflow.python.ops.math_ops.rint",
"tensorflow.python.framework.test_util.force_cpu",
"tensorflow.python.ops.math_ops.ceil",
"tensorflow.python.ops.math_ops.real",
"numpy.prod",
"numpy.maximum",
"tensorflow.python.ops.math_ops.floor",
"numpy.finfo",
"tensorflow.python.framework.ops.convert_to_tensor",
"numpy.broadcast_to",
"numpy.rint",
"numpy.conj",
"numpy.isinf",
"numpy.floor",
"tensorflow.python.ops.math_ops.complex",
"tensorflow.python.ops.math_ops.sqrt",
"tensorflow.python.ops.array_ops.split",
"tensorflow.python.platform.test.main",
"numpy.shape",
"numpy.angle",
"tensorflow.python.framework.test_util.use_gpu",
"tensorflow.python.ops.array_ops.where",
"numpy.array",
"tensorflow.python.ops.math_ops.is_nan",
"numpy.random.randint",
"tensorflow.python.ops.math_ops.conj",
"numpy.real",
"numpy.imag"
],
[
"tensorflow.python.ops.math_ops.mod",
"tensorflow.python.data.ops.dataset_ops.Dataset.range",
"tensorflow.python.platform.test.main",
"tensorflow.python.data.kernel_tests.test_base.eager_only_combinations",
"numpy.array",
"tensorflow.python.data.experimental.ops.stats_ops.bytes_produced_stats",
"tensorflow.python.framework.ops.convert_to_tensor",
"tensorflow.python.data.experimental.ops.stats_aggregator.StatsAggregator",
"tensorflow.python.data.experimental.ops.stats_ops.latency_stats"
]
] |
ersilia-os/osm-series4-candidates | [
"2d06ae0a5c26efea70d2a21f06a376625977b8b7"
] | [
"postprocess/_5_1_chemprop.py"
] | [
"from tqdm import tqdm\nimport pandas as pd\nfrom __init__ import FILE\n\ndf = pd.read_csv(FILE)\nsmiles = list(df[\"Smiles\"])\n\nwith open(\"_chemprop.csv\", \"w\") as f:\n f.write(\"smiles\\n\")\n for smi in smiles:\n f.write(\"{0}\\n\".format(smi))\n"
] | [
[
"pandas.read_csv"
]
] |
jaketae/pytorch | [
"e5282c3cb8bf6ad8c5161f9d0cc271edb9abed25",
"5654e6339879e438efb7cf50e88e356472eb0545"
] | [
"torch/distributed/_shard/sharded_tensor/__init__.py",
"test/test_public_bindings.py"
] | [
"# coding=utf-8\n\nimport copy\nimport functools\nfrom typing import List\n\nimport torch\nimport torch.distributed._shard.sharding_spec as shard_spec\n\nfrom .api import (\n _register_sharded_op,\n Shard,\n ShardedTensor,\n ShardedTensorMetadata,\n TensorProperties,\n)\nfrom .metadata import ShardMetadata # noqa: F401\nfrom .partial_tensor import _PartialTensor\n\n\ndef empty(sharding_spec: shard_spec.ShardingSpec,\n *size,\n dtype=None,\n layout=torch.strided,\n requires_grad=False,\n pin_memory=False,\n memory_format=torch.contiguous_format,\n process_group=None,\n init_rrefs=False) -> ShardedTensor:\n \"\"\"\n Returns a :class:`ShardedTensor` filled with uninitialized data.\n Needs to be called on all ranks in an SPMD fashion.\n\n Args:\n sharding_spec (:class:`torch.distributed._shard.sharding_spec.ShardingSpec`): The specification\n describing how to shard the Tensor.\n size (int...): a sequence of integers defining the shape of the output\n tensor. Can be a variable number of arguments or a collection like a list or tuple.\n\n Keyword args:\n dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.\n Default: if ``None``, uses a global default (see :func:`torch.set_default_tensor_type`).\n layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.\n Default: ``torch.strided``.\n requires_grad (bool, optional): If autograd should record operations on the\n returned tensor. Default: ``False``.\n pin_memory (bool, optional): If set, returned tensor would be allocated in\n the pinned memory. Works only for CPU tensors. Default: ``False``.\n memory_format (:class:`torch.memory_format`, optional): the desired memory format of\n returned Tensor. Default: ``torch.contiguous_format``.\n process_group (ProcessGroup, optional): The process group to work on. If None,\n the default process group will be used.\n init_rrefs (bool, optional): Whether or not to initialize\n :class:`torch.distributed.rpc.RRef`s pointing to remote shards.\n Need to initialize the RPC Framework if specified as ``True``.\n Default: ``False``.\n\n Returns:\n A :class:`ShardedTensor` object on each rank\n \"\"\"\n return ShardedTensor(\n sharding_spec,\n *size,\n dtype=dtype,\n layout=layout,\n requires_grad=requires_grad,\n pin_memory=pin_memory,\n memory_format=memory_format,\n process_group=process_group,\n init_rrefs=init_rrefs,\n )\n\ndef ones(sharding_spec: shard_spec.ShardingSpec,\n *size,\n dtype=None,\n layout=torch.strided,\n requires_grad=False,\n pin_memory=False,\n memory_format=torch.contiguous_format,\n process_group=None,\n init_rrefs=False) -> ShardedTensor:\n \"\"\"\n Returns a :class:`ShardedTensor` with the scalar value 1.\n Needs to be called on all ranks in an SPMD fashion.\n\n Args:\n sharding_spec (:class:`torch.distributed._shard.sharding_spec.ShardingSpec`): The specification\n describing how to shard the Tensor.\n size (int...): a sequence of integers defining the shape of the output\n tensor. Can be a variable number of arguments or a collection like a list or tuple.\n\n Keyword args:\n dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.\n Default: if ``None``, uses a global default (see :func:`torch.set_default_tensor_type`).\n layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.\n Default: ``torch.strided``.\n requires_grad (bool, optional): If autograd should record operations on the\n returned tensor. Default: ``False``.\n pin_memory (bool, optional): If set, returned tensor would be allocated in\n the pinned memory. Works only for CPU tensors. Default: ``False``.\n process_group (ProcessGroup, optional): The process group to work on. If None,\n the default process group will be used.\n init_rrefs (bool, optional): Whether or not to initialize\n :class:`torch.distributed.rpc.RRef`s pointing to remote shards.\n Need to initialize the RPC Framework if specified as ``True``.\n Default: ``False``.\n\n Returns:\n A :class:`ShardedTensor` object on each rank\n \"\"\"\n return full(\n sharding_spec,\n size,\n fill_value=1,\n dtype=dtype,\n layout=layout,\n requires_grad=requires_grad,\n pin_memory=pin_memory,\n memory_format=memory_format,\n process_group=process_group,\n init_rrefs=init_rrefs\n )\n\ndef zeros(sharding_spec: shard_spec.ShardingSpec,\n *size,\n dtype=None,\n layout=torch.strided,\n requires_grad=False,\n pin_memory=False,\n memory_format=torch.contiguous_format,\n process_group=None,\n init_rrefs=False) -> ShardedTensor:\n \"\"\"\n Returns a :class:`ShardedTensor` filled with the scalar value 0.\n Needs to be called on all ranks in an SPMD fashion.\n\n Args:\n sharding_spec (:class:`torch.distributed._shard.sharding_spec.ShardingSpec`): The specification\n describing how to shard the Tensor.\n size (int...): a sequence of integers defining the shape of the output\n tensor. Can be a variable number of arguments or a collection like a list or tuple.\n\n Keyword args:\n dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.\n Default: if ``None``, uses a global default (see :func:`torch.set_default_tensor_type`).\n layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.\n Default: ``torch.strided``.\n requires_grad (bool, optional): If autograd should record operations on the\n returned tensor. Default: ``False``.\n pin_memory (bool, optional): If set, returned tensor would be allocated in\n the pinned memory. Works only for CPU tensors. Default: ``False``.\n process_group (ProcessGroup, optional): The process group to work on. If None,\n the default process group will be used.\n init_rrefs (bool, optional): Whether or not to initialize\n :class:`torch.distributed.rpc.RRef`s pointing to remote shards.\n Need to initialize the RPC Framework if specified as ``True``.\n Default: ``False``.\n\n Returns:\n A :class:`ShardedTensor` object on each rank\n \"\"\"\n return full(\n sharding_spec,\n size,\n fill_value=0,\n dtype=dtype,\n layout=layout,\n requires_grad=requires_grad,\n pin_memory=pin_memory,\n memory_format=memory_format,\n process_group=process_group,\n init_rrefs=init_rrefs\n )\n\ndef full(sharding_spec: shard_spec.ShardingSpec,\n size,\n fill_value=torch.types.Number,\n dtype=None,\n layout=torch.strided,\n requires_grad=False,\n pin_memory=False,\n memory_format=torch.contiguous_format,\n process_group=None,\n init_rrefs=False) -> ShardedTensor:\n \"\"\"\n Creates a :class:`ShardedTensor` filled with fill_value. The tensor’s dtype\n is inferred from fill_value. If dtype is specified, it will override the\n inferred type from fill_value. Needs to be called on all ranks in an SPMD fashion.\n Args:\n sharding_spec (:class:`torch.distributed._sharding_spec.ShardingSpec`): The specification\n describing how to shard the Tensor.\n size (int...): a list, tuple, or `torch.Size` of integers defining the shape of the\n output tensor.\n fill_value (Scalar) – the value to fill the output tensor with.\n Keyword args:\n dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.\n Default: if ``None``, uses a global default (see :func:`torch.set_default_tensor_type`).\n layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.\n Default: ``torch.strided``.\n requires_grad (bool, optional): If autograd should record operations on the\n returned tensor. Default: ``False``.\n pin_memory (bool, optional): If set, returned tensor would be allocated in\n the pinned memory. Works only for CPU tensors. Default: ``False``.\n process_group (ProcessGroup, optional): The process group to work on. If None,\n the default process group will be used.\n init_rrefs (bool, optional): Whether or not to initialize\n :class:`torch.distributed.rpc.RRef`s pointing to remote shards.\n Need to initialize the RPC Framework if specified as ``True``.\n Default: ``False``.\n Returns:\n A :class:`ShardedTensor` object on each rank\n \"\"\"\n sharded_tensor = ShardedTensor(\n sharding_spec,\n *size,\n dtype=dtype,\n layout=layout,\n requires_grad=requires_grad,\n pin_memory=pin_memory,\n memory_format=memory_format,\n process_group=process_group,\n init_rrefs=init_rrefs,\n )\n torch.nn.init.constant_(sharded_tensor, fill_value) # type: ignore[arg-type]\n return sharded_tensor\n\ndef rand(sharding_spec: shard_spec.ShardingSpec,\n *size,\n dtype=None,\n layout=torch.strided,\n requires_grad=False,\n pin_memory=False,\n memory_format=torch.contiguous_format,\n process_group=None,\n init_rrefs=False) -> ShardedTensor:\n \"\"\"\n Creates a :class:`ShardedTensor` filled with fill_value. The tensor’s dtype\n is inferred from fill_value. If dtype is specified, it will override the\n inferred type from fill_value. Needs to be called on all ranks in an SPMD fashion.\n\n Args:\n sharding_spec (:class:`torch.distributed._shard.sharding_spec.ShardingSpec`): The specification\n describing how to shard the Tensor.\n size (int...): a list, tuple, or `torch.Size` of integers defining the shape of the\n output tensor.\n fill_value (Scalar) – the value to fill the output tensor with.\n\n Keyword args:\n dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.\n Default: if ``None``, uses a global default (see :func:`torch.set_default_tensor_type`).\n layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.\n Default: ``torch.strided``.\n requires_grad (bool, optional): If autograd should record operations on the\n returned tensor. Default: ``False``.\n pin_memory (bool, optional): If set, returned tensor would be allocated in\n the pinned memory. Works only for CPU tensors. Default: ``False``.\n process_group (ProcessGroup, optional): The process group to work on. If None,\n the default process group will be used.\n init_rrefs (bool, optional): Whether or not to initialize\n :class:`torch.distributed.rpc.RRef`s pointing to remote shards.\n Need to initialize the RPC Framework if specified as ``True``.\n Default: ``False``.\n\n Returns:\n A :class:`ShardedTensor` object on each rank\n \"\"\"\n sharded_tensor = ShardedTensor(\n sharding_spec,\n *size,\n dtype=dtype,\n layout=layout,\n requires_grad=requires_grad,\n pin_memory=pin_memory,\n memory_format=memory_format,\n process_group=process_group,\n init_rrefs=init_rrefs,\n )\n torch.nn.init.uniform_(sharded_tensor, 0, 1) # type: ignore[arg-type]\n return sharded_tensor\n\ndef init_from_local_shards(\n local_shards: List[Shard],\n *global_size,\n process_group=None,\n init_rrefs=False) -> ShardedTensor:\n \"\"\"\n Creates an :class:`ShardedTensor` from local shards and the global metadata.\n Needs to be called on all ranks in an SPMD fashion.\n\n Args:\n local_shards (List[:class `torch.distributed._shard.sharded_tensor.Shard`]): A list\n of shards that represent the local shards on this rank.\n global_size (int...): a list, tuple, or `torch.Size` of integers defining the\n shape of the overall sharded tensor.\n\n Keyword args:\n process_group (ProcessGroup, optional): The process group to work on. If None,\n the default process group will be used.\n init_rrefs (bool, optional): Whether or not to initialize\n :class:`torch.distributed.rpc.RRef`s pointing to remote shards.\n Need to initialize the RPC Framework if specified as ``True``.\n Default: ``False``.\n\n Returns:\n A :class:`ShardedTensor` object handle on this rank\n\n\n Examples:\n Suppose we want construct a sharded tensor on two ranks, global size = (10, 5),\n each shard have a (5, 5) local tensor, we can do it like below:\n\n on rank 0:\n >>> local_shard_metadata = ShardMetadata(\n >>> shard_offsets=[0, 0]\n >>> shard_lengths=[5, 5]\n >>> placement=\"rank:0/cuda:0\"\n >>> )\n >>> local_shards = [Shard(torch.randn(5, 5), local_shard_metadata)]\n >>> sharded_tensor = init_from_local_shards(local_shards, [10, 5])\n\n on rank 1:\n >>> local_shard_metadata = ShardMetadata(\n >>> shard_offsets=[5, 0]\n >>> shard_lengths=[5, 5]\n >>> placement=\"rank:1/cuda:1\"\n >>> )\n >>> local_shards = [Shard(torch.randn(5, 5), local_shard_metadata)]\n >>> sharded_tensor = init_from_local_shards(local_shards, [10, 5])\n \"\"\"\n return ShardedTensor._init_from_local_shards(\n local_shards,\n *global_size,\n process_group=process_group,\n init_rrefs=init_rrefs\n )\n\ndef state_dict_hook(module, destination, prefix, local_metadata):\n \"\"\"\n Hook to add ShardedTensor to Module's ``state_dict``. Needs to be\n registered to the Module using\n :meth:`torch.nn.Module._register_state_dict_hook`.\n \"\"\"\n for submodule_name, submodule in module.named_modules():\n for attr_name, attr in submodule.__dict__.items():\n if isinstance(attr, ShardedTensor):\n destination[prefix + submodule_name + '.' + attr_name] = attr\n\ndef pre_load_state_dict_hook(module, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs):\n \"\"\"\n Pre-load state dict hook to add ShardedTensor to the module.\n \"\"\"\n for submodule_name, submodule in module.named_modules():\n for attr_name, attr in submodule.__dict__.items():\n key = prefix + submodule_name + '.' + attr_name\n if key in state_dict:\n if isinstance(state_dict[key], ShardedTensor):\n setattr(submodule, attr_name, state_dict[key])\n\ndef sharded_op_impl(func):\n \"\"\"\n Provides a way for users to write their own custom sharded operator. This\n can be used to override existing ShardedTensor operators or write a new\n one not supported by ShardedTensor. If the operator in question is covered\n by ``__torch_function__`` dispatch and has a ShardedTensor as any of its\n parameters, the function provided will be invoked for that operator.\n\n Example::\n >>> @sharded_op_impl(torch.nn.functional.linear)\n >>> def my_custom_sharded_linear(types, args, kwargs, process_group):\n >>> ....\n >>>\n >>> input = torch.rand(10, 32)\n >>> weight = sharded_tensor.rand(32, 16)\n >>> bias = torch.rand(16)\n >>> # This will call 'my_custom_sharded_linear'\n >>> torch.nn.functional.linear(input, weight, bias)\n\n The types, args and kwargs parameters are the same parameters that are\n passed to ``__torch_function__`` dispatch API\n (https://pytorch.org/docs/stable/notes/extending.html#extending-torch).\n There is an additional ``process_group`` parameter which is the\n process_group used for the ShardedTensor and can be used by\n implementations for communications within a sharded implementation.\n\n Args:\n func(Callable): Torch function for which we want to provide a sharded\n implementation (ex: torch.nn.functional.linear)\n \"\"\"\n def decorator_sharded_func(wrapped_func):\n _register_sharded_op(func, wrapped_func)\n\n @functools.wraps(wrapped_func)\n def wrapper(*args, **kwargs):\n return wrapped_func(*args, **kwargs)\n return wrapper\n return decorator_sharded_func\n\n# Import all builtin sharded ops\nfrom ._ops import * # noqa: F403\n\ndef _reshard_output(\n module: torch.nn.Module,\n resharding_spec: shard_spec.ShardingSpec) -> torch.nn.Module:\n \"\"\"\n Hook a module with local shards collection in the forward pass according\n to the given ``resharding_spec``.\n\n Args:\n module (:class:`torch.nn.Module`): Module whose output needs to be resharded.\n resharding_spec (:class:`torch.distributed._shard.sharding_spec.ShardingSpec`):\n The specification describing how the output of the module will be resharded.\n\n Returns:\n A :class:`torch.nn.Module` object with collection API hooked.\n \"\"\"\n def hook_func(_module, _input, output):\n if isinstance(output, ShardedTensor) or isinstance(output, _PartialTensor):\n return output.reshard(resharding_spec)\n return output\n module.register_forward_hook(hook_func)\n return module\n\n\ndef _collect_local_shard(module: torch.nn.Module) -> torch.nn.Module:\n \"\"\"\n Hook a module with local shards collection in the forward pass.\n\n This API is typically used to convert a sharded representation back to data parallel\n representation. In particular, it returns the local tensor for this Shard. If the\n size along the sharding dimension for the local tensor is 1, this dimension is removed\n from the final result. For example a [4, 16] ShardedTensor across 4 ranks is typically\n a local Tensor of size [16] across each rank and not [1, 16] across each rank.\n\n Args:\n module (:class:`torch.nn.Module`): Module whose output needs to be resharded.\n\n Returns:\n A :class:`torch.nn.Module` object with collection API hooked.\n \"\"\"\n\n def hook_func(_module, _input, output):\n if isinstance(output, ShardedTensor):\n local_tensor = output.local_tensor()\n # Squeeze the # of dimensions manually.\n if local_tensor.size(output._sharding_spec.dim) == 1: # type: ignore[attr-defined]\n local_tensor = local_tensor.squeeze(\n output._sharding_spec.dim # type: ignore[attr-defined]\n )\n return local_tensor\n module.register_forward_hook(hook_func)\n return module\n",
"# -*- coding: utf-8 -*-\n# Owner(s): [\"module: autograd\"]\n\nfrom torch.testing._internal.common_utils import TestCase, run_tests, IS_WINDOWS\nimport pkgutil\nimport torch\nimport sys\nfrom typing import Callable\nimport inspect\nimport json\nimport os\nimport unittest\n\nclass TestPublicBindings(TestCase):\n def test_no_new_bindings(self):\n \"\"\"\n This test aims to stop the introduction of new JIT bindings into torch._C\n whose names do not start with _. Such bindings are made available as\n torch.XXX, which may not be desirable.\n\n If your change causes this test to fail, add your new binding to a relevant\n submodule of torch._C, such as torch._C._jit (or other relevant submodule of\n torch._C). If your binding really needs to be available as torch.XXX, add it\n to torch._C and add it to the allowlist below.\n\n If you have removed a binding, remove it from the allowlist as well.\n \"\"\"\n # This allowlist contains every binding in torch._C that is copied into torch at\n # the time of writing. It was generated with\n #\n # {elem for elem in dir(torch._C) if not elem.startswith(\"_\")}\n #\n torch_C_allowlist_superset = {\n \"AggregationType\",\n \"AliasDb\",\n \"AnyType\",\n \"Argument\",\n \"ArgumentSpec\",\n \"autocast_decrement_nesting\",\n \"autocast_increment_nesting\",\n \"AVG\",\n \"BenchmarkConfig\",\n \"BenchmarkExecutionStats\",\n \"BFloat16StorageBase\",\n \"Block\",\n \"BoolStorageBase\",\n \"BoolType\",\n \"BufferDict\",\n \"ByteStorageBase\",\n \"CallStack\",\n \"Capsule\",\n \"CharStorageBase\",\n \"ClassType\",\n \"clear_autocast_cache\",\n \"Code\",\n \"CompilationUnit\",\n \"CompleteArgumentSpec\",\n \"ComplexDoubleStorageBase\",\n \"ComplexFloatStorageBase\",\n \"ComplexType\",\n \"ConcreteModuleType\",\n \"ConcreteModuleTypeBuilder\",\n \"CONV_BN_FUSION\",\n \"cpp\",\n \"CudaBFloat16StorageBase\",\n \"CudaBFloat16TensorBase\",\n \"CudaBFloat16TensorBase\",\n \"CudaBoolStorageBase\",\n \"CudaBoolTensorBase\",\n \"CudaBoolTensorBase\",\n \"CudaByteStorageBase\",\n \"CudaByteTensorBase\",\n \"CudaByteTensorBase\",\n \"CudaCharStorageBase\",\n \"CudaCharTensorBase\",\n \"CudaCharTensorBase\",\n \"CudaComplexDoubleStorageBase\",\n \"CudaComplexDoubleTensorBase\",\n \"CudaComplexDoubleTensorBase\",\n \"CudaComplexFloatStorageBase\",\n \"CudaComplexFloatTensorBase\",\n \"CudaComplexFloatTensorBase\",\n \"CudaDoubleStorageBase\",\n \"CudaDoubleTensorBase\",\n \"CudaDoubleTensorBase\",\n \"CudaFloatStorageBase\",\n \"CudaFloatTensorBase\",\n \"CudaHalfStorageBase\",\n \"CudaHalfTensorBase\",\n \"CudaIntStorageBase\",\n \"CudaIntTensorBase\",\n \"CudaIntTensorBase\",\n \"CudaLongStorageBase\",\n \"CudaLongTensorBase\",\n \"CudaLongTensorBase\",\n \"CudaShortStorageBase\",\n \"CudaShortTensorBase\",\n \"CudaShortTensorBase\",\n \"DeepCopyMemoTable\",\n \"default_generator\",\n \"DeserializationStorageContext\",\n \"device\",\n \"DeviceObjType\",\n \"DictType\",\n \"DisableTorchFunction\",\n \"DoubleStorageBase\",\n \"dtype\",\n \"EnumType\",\n \"ErrorReport\",\n \"ExecutionPlan\",\n \"FatalError\",\n \"FileCheck\",\n \"finfo\",\n \"FloatStorageBase\",\n \"FloatType\",\n \"fork\",\n \"FunctionSchema\",\n \"FUSE_ADD_RELU\",\n \"Future\",\n \"FutureType\",\n \"Generator\",\n \"get_autocast_cpu_dtype\",\n \"get_default_dtype\",\n \"get_num_interop_threads\",\n \"get_num_threads\",\n \"Gradient\",\n \"Graph\",\n \"GraphExecutorState\",\n \"HalfStorageBase\",\n \"has_cuda\",\n \"has_cudnn\",\n \"has_lapack\",\n \"has_mkl\",\n \"has_mkldnn\",\n \"has_mlc\",\n \"has_openmp\",\n \"has_spectral\",\n \"HOIST_CONV_PACKED_PARAMS\",\n \"iinfo\",\n \"import_ir_module_from_buffer\",\n \"import_ir_module\",\n \"InferredType\",\n \"init_num_threads\",\n \"INSERT_FOLD_PREPACK_OPS\",\n \"InterfaceType\",\n \"IntStorageBase\",\n \"IntType\",\n \"SymIntType\",\n \"IODescriptor\",\n \"is_anomaly_enabled\",\n \"is_autocast_cache_enabled\",\n \"is_autocast_cpu_enabled\",\n \"is_autocast_enabled\",\n \"is_grad_enabled\",\n \"is_inference_mode_enabled\",\n \"JITException\",\n \"layout\",\n \"ListType\",\n \"LiteScriptModule\",\n \"LockingLogger\",\n \"LoggerBase\",\n \"LongStorageBase\",\n \"memory_format\",\n \"merge_type_from_type_comment\",\n \"MobileOptimizerType\",\n \"ModuleDict\",\n \"Node\",\n \"NoneType\",\n \"NoopLogger\",\n \"NumberType\",\n \"OperatorInfo\",\n \"OptionalType\",\n \"ParameterDict\",\n \"parse_ir\",\n \"parse_schema\",\n \"parse_type_comment\",\n \"PyObjectType\",\n \"PyTorchFileReader\",\n \"PyTorchFileWriter\",\n \"QInt32StorageBase\",\n \"QInt8StorageBase\",\n \"qscheme\",\n \"QUInt4x2StorageBase\",\n \"QUInt2x4StorageBase\",\n \"QUInt8StorageBase\",\n \"read_vitals\",\n \"REMOVE_DROPOUT\",\n \"RRefType\",\n \"ScriptClass\",\n \"ScriptClassFunction\",\n \"ScriptDict\",\n \"ScriptDictIterator\",\n \"ScriptDictKeyIterator\",\n \"ScriptList\",\n \"ScriptListIterator\",\n \"ScriptFunction\",\n \"ScriptMethod\",\n \"ScriptModule\",\n \"ScriptModuleSerializer\",\n \"ScriptObject\",\n \"ScriptObjectProperty\",\n \"SerializationStorageContext\",\n \"set_anomaly_enabled\",\n \"set_autocast_cache_enabled\",\n \"set_autocast_cpu_dtype\",\n \"set_autocast_cpu_enabled\",\n \"set_autocast_enabled\",\n \"set_flush_denormal\",\n \"set_num_interop_threads\",\n \"set_num_threads\",\n \"set_vital\",\n \"ShortStorageBase\",\n \"Size\",\n \"StaticModule\",\n \"Stream\",\n \"StreamObjType\",\n \"StringType\",\n \"SUM\",\n \"TensorType\",\n \"ThroughputBenchmark\",\n \"TracingState\",\n \"TupleType\",\n \"Type\",\n \"unify_type_list\",\n \"UnionType\",\n \"Use\",\n \"Value\",\n \"autocast_decrement_nesting\",\n \"autocast_increment_nesting\",\n \"clear_autocast_cache\",\n \"cpp\",\n \"default_generator\",\n \"device\",\n \"dtype\",\n \"finfo\",\n \"fork\",\n \"get_default_dtype\",\n \"get_num_interop_threads\",\n \"get_num_threads\",\n \"has_cuda\",\n \"has_cudnn\",\n \"has_lapack\",\n \"has_mkl\",\n \"has_mkldnn\",\n \"has_mlc\",\n \"has_openmp\",\n \"iinfo\",\n \"import_ir_module\",\n \"import_ir_module_from_buffer\",\n \"init_num_threads\",\n \"is_anomaly_enabled\",\n \"is_autocast_enabled\",\n \"is_grad_enabled\",\n \"layout\",\n \"memory_format\",\n \"merge_type_from_type_comment\",\n \"parse_ir\",\n \"parse_schema\",\n \"parse_type_comment\",\n \"qscheme\",\n \"set_anomaly_enabled\",\n \"set_autocast_enabled\",\n 'set_autocast_gpu_dtype',\n 'get_autocast_gpu_dtype',\n \"set_flush_denormal\",\n \"set_num_interop_threads\",\n \"set_num_threads\",\n \"unify_type_list\",\n \"vitals_enabled\",\n\n \"wait\",\n }\n torch_C_bindings = {elem for elem in dir(torch._C) if not elem.startswith(\"_\")}\n\n # Check that the torch._C bindings are all in the allowlist. Since\n # bindings can change based on how PyTorch was compiled (e.g. with/without\n # CUDA), the two may not be an exact match but the bindings should be\n # a subset of the allowlist.\n difference = torch_C_bindings.difference(torch_C_allowlist_superset)\n msg = f\"torch._C had bindings that are not present in the allowlist:\\n{difference}\"\n self.assertTrue(torch_C_bindings.issubset(torch_C_allowlist_superset), msg)\n\n # AttributeError: module 'torch.distributed' has no attribute '_shard'\n @unittest.skipIf(IS_WINDOWS, \"Distributed Attribute Error\")\n def test_correct_module_names(self):\n '''\n An API is considered public, if its `__module__` starts with `torch.`\n and there is no name in `__module__` or the object itself that starts with “_”.\n Each public package should either:\n - (preferred) Define `__all__` and all callables and classes in there must have their\n `__module__` start with the current submodule's path. Things not in `__all__` should\n NOT have their `__module__` start with the current submodule.\n - (for simple python-only modules) Not define `__all__` and all the elements in `dir(submod)` must have their\n `__module__` that start with the current submodule.\n '''\n failure_list = []\n with open(os.path.join(os.path.dirname(__file__), 'allowlist_for_publicAPI.json')) as json_file:\n # no new entries should be added to this allow_dict.\n # New APIs must follow the public API guidelines.\n allow_dict = json.load(json_file)\n\n def test_module(modname):\n split_strs = modname.split('.')\n mod = sys.modules.get(modname)\n for elem in split_strs:\n if elem.startswith(\"_\"):\n return\n\n def add_to_failure_list_if_not_in_allow_dict(modname, elem, elem_module):\n if modname in allow_dict and elem in allow_dict[modname]:\n return\n failure_list.append((modname, elem, elem_module))\n\n # verifies that each public API has the correct module name and naming semantics\n def looks_public_or_not(elem, modname, mod, is_public=True):\n obj = getattr(mod, elem)\n if not (isinstance(obj, Callable) or inspect.isclass(obj)):\n return\n elem_module = getattr(obj, '__module__', None)\n elem_modname_starts_with_mod = elem_module is not None and \\\n elem_module.startswith(modname) and '._' not in elem_module\n # elem's name must NOT begin with an `_` and it's module name\n # SHOULD start with it's current module since it's a public API\n looks_public = not elem.startswith('_') and elem_modname_starts_with_mod\n if is_public != looks_public:\n add_to_failure_list_if_not_in_allow_dict(modname, elem, elem_module)\n\n if hasattr(modname, '__all__'):\n public_api = mod.__all__\n all_api = dir(modname)\n for elem in all_api:\n looks_public_or_not(elem, modname, is_public=elem in public_api)\n\n else:\n all_api = dir(mod)\n for elem in all_api:\n if not elem.startswith('_'):\n looks_public_or_not(elem, modname, mod, is_public=True)\n\n for _, modname, ispkg in pkgutil.walk_packages(path=torch.__path__, prefix=torch.__name__ + '.'):\n test_module(modname)\n\n test_module('torch')\n msg = \"Following new APIs ( displayed in the form (module, element, element module) )\" \\\n \" were added that do not meet our guidelines for public API\" \\\n \" Please review https://docs.google.com/document/d/10yx2-4gs0gTMOimVS403MnoAWkqitS8TUHX73PN8EjE/edit?pli=1#\" \\\n \" for more information:\\n\" + \"\\n\".join(map(str, failure_list))\n\n # empty lists are considered false in python\n self.assertTrue(not failure_list, msg)\n\nif __name__ == '__main__':\n run_tests()\n"
] | [
[
"torch.nn.init.uniform_",
"torch.nn.init.constant_"
],
[
"torch.testing._internal.common_utils.run_tests"
]
] |
ofirpress/shortformer | [
"0281f7618fb3833c8ac99f3e8e0512aed95fa2a1"
] | [
"fairseq/data/iterators.py"
] | [
"# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport itertools\nimport logging\nimport math\nimport operator\nimport os\nimport queue\nimport time\nfrom threading import Thread\n\nimport numpy as np\nimport torch\n\nfrom fairseq.data import data_utils\n\n\nlogger = logging.getLogger(__name__)\n\n# Object used by _background_consumer to signal the source is exhausted\n# to the main thread.\n_sentinel = object()\n\n\nclass CountingIterator(object):\n \"\"\"Wrapper around an iterable that maintains the iteration count.\n\n Args:\n iterable (iterable): iterable to wrap\n start (int): starting iteration count. Note that this doesn't\n actually advance the iterator.\n total (int): override the iterator length returned by\n ``__len__``. This can be used to truncate *iterator*.\n\n Attributes:\n n (int): number of elements consumed from this iterator\n \"\"\"\n\n def __init__(self, iterable, start=None, total=None):\n self.iterable = iterable\n self.itr = iter(self)\n\n if start is None:\n self.n = getattr(iterable, 'n', 0)\n else:\n self.n = start\n\n if total is None:\n self.total = self.n + len(iterable)\n else:\n self.total = total\n\n def __len__(self):\n return self.total\n\n def __iter__(self):\n for x in self.iterable:\n if self.n >= self.total:\n raise RuntimeError(\n 'Mismatch between actual and expected iterable length. '\n 'Please report this to the fairseq developers.'\n )\n self.n += 1\n yield x\n\n def __next__(self):\n return next(self.itr)\n\n def has_next(self):\n \"\"\"Whether the iterator has been exhausted.\"\"\"\n return self.n < len(self)\n\n def skip(self, num_to_skip):\n \"\"\"Fast-forward the iterator by skipping *num_to_skip* elements.\"\"\"\n next(itertools.islice(self.itr, num_to_skip, num_to_skip), None)\n return self\n\n def take(self, n):\n \"\"\"\n Truncates the iterator to n elements at most.\n \"\"\"\n self.total = min(self.total, n)\n\n # Propagate this change to the underlying iterator\n # Only take after what we have already consumed (i.e. after restarting\n # from checkpoint mid epoch, we have to subtract self.n which is the\n # starting point)\n #\n # This to maintain the invariant self.total = self.n + len(iterable),\n # before calling __next__ or __iter__\n propagated_take = max(n - self.n, 0)\n if hasattr(self.iterable, \"take\"):\n self.iterable.take(propagated_take)\n else:\n self.iterable = itertools.islice(self.iterable, propagated_take)\n\n\nclass EpochBatchIterating(object):\n def __len__(self) -> int:\n raise NotImplementedError\n\n @property\n def next_epoch_idx(self):\n raise NotImplementedError\n\n def next_epoch_itr(self, shuffle=True, fix_batches_to_gpus=False):\n \"\"\"Return a new iterator over the dataset.\n\n Args:\n shuffle (bool, optional): shuffle batches before returning the\n iterator (default: True).\n fix_batches_to_gpus: ensure that batches are always\n allocated to the same shards across epochs. Requires\n that :attr:`dataset` supports prefetching (default: False).\n \"\"\"\n raise NotImplementedError\n\n def end_of_epoch(self) -> bool:\n \"\"\"Returns whether the most recent epoch iterator has been exhausted\"\"\"\n raise NotImplementedError\n\n @property\n def iterations_in_epoch(self) -> int:\n \"\"\"The number of consumed batches in the current epoch.\"\"\"\n raise NotImplementedError\n\n def state_dict(self):\n \"\"\"Returns a dictionary containing a whole state of the iterator.\"\"\"\n raise NotImplementedError\n\n def load_state_dict(self, state_dict):\n \"\"\"Copies the state of the iterator from the given *state_dict*.\"\"\"\n raise NotImplementedError\n\n\nclass StreamingEpochBatchIterator(EpochBatchIterating):\n def __init__(\n self, dataset, epoch=1, num_shards=1, shard_id=0,\n ):\n assert isinstance(dataset, torch.utils.data.IterableDataset)\n self.dataset = dataset\n self.epoch = max(epoch, 1) # we use 1-based indexing for epochs\n self._current_epoch_iterator = None\n self.num_shards = num_shards\n self.shard_id = shard_id\n\n @property\n def next_epoch_idx(self):\n \"\"\"Return the epoch index after *next_epoch_itr* is called.\"\"\"\n if self._current_epoch_iterator is not None and self.end_of_epoch():\n return self.epoch + 1\n else:\n return self.epoch\n\n def next_epoch_itr(self, shuffle=True, fix_batches_to_gpus=False):\n self.epoch = self.next_epoch_idx\n self.dataset.set_epoch(self.epoch)\n self._current_epoch_iterator = CountingIterator(\n iterable=ShardedIterator(\n iterable=self.dataset,\n num_shards=self.num_shards,\n shard_id=self.shard_id,\n ),\n )\n return self._current_epoch_iterator\n\n def end_of_epoch(self) -> bool:\n return not self._current_epoch_iterator.has_next()\n\n @property\n def iterations_in_epoch(self) -> int:\n if self._current_epoch_iterator is not None:\n return self._current_epoch_iterator.n\n return 0\n\n def state_dict(self):\n return {\n 'epoch': self.epoch,\n }\n\n def load_state_dict(self, state_dict):\n self.epoch = state_dict['epoch']\n\n\nclass EpochBatchIterator(EpochBatchIterating):\n \"\"\"A multi-epoch iterator over a :class:`torch.utils.data.Dataset`.\n\n Compared to :class:`torch.utils.data.DataLoader`, this iterator:\n\n - can be reused across multiple epochs with the :func:`next_epoch_itr`\n method (optionally shuffled between epochs)\n - can be serialized/deserialized with the :func:`state_dict` and\n :func:`load_state_dict` methods\n - supports sharding with the *num_shards* and *shard_id* arguments\n\n Args:\n dataset (~torch.utils.data.Dataset): dataset from which to load the data\n collate_fn (callable): merges a list of samples to form a mini-batch\n batch_sampler (~torch.utils.data.Sampler or a callable): an iterator over batches of\n indices, or a callable to create such an iterator (~torch.utils.data.Sampler).\n A callable batch_sampler will be called for each epoch to enable per epoch dynamic\n batch iterators defined by this callable batch_sampler.\n seed (int, optional): seed for random number generator for\n reproducibility (default: 1).\n num_shards (int, optional): shard the data iterator into N\n shards (default: 1).\n shard_id (int, optional): which shard of the data iterator to\n return (default: 0).\n num_workers (int, optional): how many subprocesses to use for data\n loading. 0 means the data will be loaded in the main process\n (default: 0).\n epoch (int, optional): the epoch to start the iterator from\n (default: 1).\n buffer_size (int, optional): the number of batches to keep ready in the\n queue. Helps speeding up dataloading. When buffer_size is zero, the\n default torch.utils.data.DataLoader preloading is used.\n timeout (int, optional): if positive, the timeout value for collecting a batch\n from workers. Should always be non-negative. (default: ``0``)\n \"\"\"\n\n def __init__(\n self, dataset, collate_fn, batch_sampler, seed=1, num_shards=1, shard_id=0,\n num_workers=0, epoch=1, buffer_size=0, timeout=0,\n ):\n assert isinstance(dataset, torch.utils.data.Dataset)\n self.dataset = dataset\n self.collate_fn = collate_fn\n self.batch_sampler = batch_sampler\n self._frozen_batches = tuple(batch_sampler) if not callable(batch_sampler) else None\n self.seed = seed\n self.num_shards = num_shards\n self.shard_id = shard_id\n self.num_workers = num_workers\n # This upper limit here is to prevent people from abusing this feature\n # in a shared computing environment.\n self.buffer_size = min(buffer_size, 20)\n self.timeout = timeout\n\n self.epoch = max(epoch, 1) # we use 1-based indexing for epochs\n self.shuffle = True\n self._cur_epoch_itr = None\n self._next_epoch_itr = None\n self._supports_prefetch = getattr(dataset, 'supports_prefetch', False)\n\n @property\n def frozen_batches(self):\n if self._frozen_batches is None:\n self._frozen_batches = tuple(self.batch_sampler(self.dataset, self.epoch))\n return self._frozen_batches\n\n def __len__(self):\n return int(math.ceil(len(self.frozen_batches) / float(self.num_shards)))\n\n @property\n def n(self):\n return self.iterations_in_epoch\n\n @property\n def next_epoch_idx(self):\n \"\"\"Return the epoch index after *next_epoch_itr* is called.\"\"\"\n if self._next_epoch_itr is not None:\n return self.epoch\n elif self._cur_epoch_itr is not None and self.end_of_epoch():\n return self.epoch + 1\n else:\n return self.epoch\n\n def next_epoch_itr(self, shuffle=True, fix_batches_to_gpus=False):\n \"\"\"Return a new iterator over the dataset.\n\n Args:\n shuffle (bool, optional): shuffle batches before returning the\n iterator (default: True).\n fix_batches_to_gpus: ensure that batches are always\n allocated to the same shards across epochs. Requires\n that :attr:`dataset` supports prefetching (default: False).\n \"\"\"\n self.epoch = self.next_epoch_idx\n self.dataset.set_epoch(self.epoch)\n if self._next_epoch_itr is not None:\n self._cur_epoch_itr = self._next_epoch_itr\n self._next_epoch_itr = None\n else:\n if callable(self.batch_sampler):\n # reset _frozen_batches to refresh the next epoch\n self._frozen_batches = None\n self._cur_epoch_itr = self._get_iterator_for_epoch(\n self.epoch, shuffle, fix_batches_to_gpus=fix_batches_to_gpus,\n )\n self.shuffle = shuffle\n return self._cur_epoch_itr\n\n def end_of_epoch(self) -> bool:\n \"\"\"Returns whether the most recent epoch iterator has been exhausted\"\"\"\n return not self._cur_epoch_itr.has_next()\n\n @property\n def iterations_in_epoch(self):\n \"\"\"The number of consumed batches in the current epoch.\"\"\"\n if self._cur_epoch_itr is not None:\n return self._cur_epoch_itr.n\n elif self._next_epoch_itr is not None:\n return self._next_epoch_itr.n\n return 0\n\n def state_dict(self):\n \"\"\"Returns a dictionary containing a whole state of the iterator.\"\"\"\n if self.end_of_epoch():\n epoch = self.epoch + 1\n iter_in_epoch = 0\n else:\n epoch = self.epoch\n iter_in_epoch = self.iterations_in_epoch\n return {\n 'version': 2,\n 'epoch': epoch,\n 'iterations_in_epoch': iter_in_epoch,\n 'shuffle': self.shuffle,\n }\n\n def load_state_dict(self, state_dict):\n \"\"\"Copies the state of the iterator from the given *state_dict*.\"\"\"\n self.epoch = state_dict['epoch']\n itr_pos = state_dict.get('iterations_in_epoch', 0)\n version = state_dict.get('version', 1)\n if itr_pos > 0:\n # fast-forward epoch iterator\n self._next_epoch_itr = self._get_iterator_for_epoch(\n self.epoch,\n shuffle=state_dict.get('shuffle', True),\n offset=itr_pos,\n )\n if self._next_epoch_itr is None:\n if version == 1:\n # legacy behavior: we finished the epoch, increment epoch counter\n self.epoch += 1\n else:\n raise RuntimeError(\n 'Cannot resume training due to dataloader mismatch, please '\n 'report this to the fairseq developers. You can relaunch '\n 'training with `--reset-dataloader` and it should work.'\n )\n else:\n self._next_epoch_itr = None\n\n def _get_iterator_for_epoch(self, epoch, shuffle, fix_batches_to_gpus=False, offset=0):\n\n def shuffle_batches(batches, seed):\n with data_utils.numpy_seed(seed):\n np.random.shuffle(batches)\n return batches\n\n if self._supports_prefetch:\n batches = self.frozen_batches\n\n if shuffle and not fix_batches_to_gpus:\n batches = shuffle_batches(list(batches), self.seed + epoch)\n\n batches = list(ShardedIterator(\n batches, self.num_shards, self.shard_id, fill_value=[]\n ))\n self.dataset.prefetch([i for s in batches for i in s])\n\n if shuffle and fix_batches_to_gpus:\n batches = shuffle_batches(batches, self.seed + epoch + self.shard_id)\n else:\n if shuffle:\n batches = shuffle_batches(list(self.frozen_batches), self.seed + epoch)\n else:\n batches = self.frozen_batches\n batches = list(ShardedIterator(\n batches, self.num_shards, self.shard_id, fill_value=[]\n ))\n\n if offset > 0 and offset >= len(batches):\n return None\n\n if self.num_workers > 0:\n os.environ['PYTHONWARNINGS'] = 'ignore:semaphore_tracker:UserWarning'\n\n # Create data loader\n itr = torch.utils.data.DataLoader(\n self.dataset,\n collate_fn=self.collate_fn,\n batch_sampler=batches[offset:],\n num_workers=self.num_workers,\n timeout=self.timeout,\n )\n\n # Wrap with a BufferedIterator if needed\n if self.buffer_size > 0:\n itr = BufferedIterator(self.buffer_size, itr)\n\n # Wrap with CoutingIterator\n itr = CountingIterator(itr, start=offset)\n return itr\n\n\nclass GroupedIterator(CountingIterator):\n \"\"\"Wrapper around an iterable that returns groups (chunks) of items.\n\n Args:\n iterable (iterable): iterable to wrap\n chunk_size (int): size of each chunk\n\n Attributes:\n n (int): number of elements consumed from this iterator\n \"\"\"\n\n def __init__(self, iterable, chunk_size):\n itr = _chunk_iterator(iterable, chunk_size)\n super().__init__(\n itr,\n start=int(math.ceil(getattr(iterable, 'n', 0) / float(chunk_size))),\n total=int(math.ceil(len(iterable) / float(chunk_size))),\n )\n self.chunk_size = chunk_size\n\n\ndef _chunk_iterator(itr, chunk_size):\n chunk = []\n for x in itr:\n chunk.append(x)\n if len(chunk) == chunk_size:\n yield chunk\n chunk = []\n if len(chunk) > 0:\n yield chunk\n\n\nclass ShardedIterator(CountingIterator):\n \"\"\"A sharded wrapper around an iterable, padded to length.\n\n Args:\n iterable (iterable): iterable to wrap\n num_shards (int): number of shards to split the iterable into\n shard_id (int): which shard to iterator over\n fill_value (Any, optional): padding value when the iterable doesn't\n evenly divide *num_shards* (default: None).\n\n Attributes:\n n (int): number of elements consumed from this iterator\n \"\"\"\n\n def __init__(self, iterable, num_shards, shard_id, fill_value=None):\n if shard_id < 0 or shard_id >= num_shards:\n raise ValueError('shard_id must be between 0 and num_shards')\n sharded_len = int(math.ceil(len(iterable) / float(num_shards)))\n\n batch_size = len(list(iterable)[0])\n last = max( list(map(max, *list(iterable))))\n\n # This function receives a list [1,2,3,...., last] where each number represents one of the input subsequences\n # In the unmodified fairseq, if you have 4 GPUS, fairseq will give the first GPU subsequences [1,5,9,13,...],\n # the second GPU will get [2,6,10,14,..], the third GPU will get [3,7,11,15] and so on...\n # If we want to do caching, we can't use that. We need each GPU to get a continuous list of input subsequences (like [1,2,3,4,5,...]).\n # So what the following code does, is it splits the input into *continuous* chunks of subsequences. For example, if we have\n # 4 GPUs and 100,000 input subsequences, the first GPU will get [1,2,3,...,25000], the second GPU will get [25001,25002,25003,...],\n # and so on.\n # The above description was written with the assumption that batch_size is 1. This function also works when batch_size is greater than 1.\n\n iterable = range(0, last)\n all_itrs = []\n for i in range(shard_id*batch_size, (shard_id+1)*batch_size):\n itr = list(itertools.islice(iterable, i * sharded_len,\n (i +1 )* sharded_len ))\n\n\n all_itrs.append(itr)\n\n itr = [x for x in itertools.chain(*itertools.zip_longest(*all_itrs)) if x is not None]\n itr = [itr[i:i+batch_size] for i in range(0, len(itr), batch_size)] #split to batches\n\n\n if len(itr) != sharded_len: #this makes sure that we don't miss any input subsequences\n to_add = sharded_len - len(itr)\n to_add = [[e] for e in range(sharded_len-to_add, sharded_len)]\n itr = itr + to_add\n\n\n\n super().__init__(\n itr,\n start=int(math.ceil(getattr(iterable, 'n', 0) / float(num_shards))),\n total=sharded_len,\n )\n\n\nclass BackgroundConsumer(Thread):\n def __init__(self, queue, source, max_len):\n Thread.__init__(self)\n\n self._queue = queue\n self._source = source\n self._max_len = max_len\n self.count = 0\n\n def run(self):\n try:\n for item in self._source:\n self._queue.put(item)\n\n # Stop if we reached the maximum length\n self.count += 1\n if self._max_len is not None and self.count >= self._max_len:\n break\n\n # Signal the consumer we are done.\n self._queue.put(_sentinel)\n except Exception as e:\n self._queue.put(e)\n\n\nclass BufferedIterator(object):\n def __init__(self, size, iterable):\n self._queue = queue.Queue(size)\n self._iterable = iterable\n self._consumer = None\n\n self.start_time = time.time()\n self.warning_time = None\n\n self.total = len(iterable)\n\n def _create_consumer(self):\n self._consumer = BackgroundConsumer(\n self._queue,\n self._iterable,\n self.total,\n )\n self._consumer.daemon = True\n self._consumer.start()\n\n def __iter__(self):\n return self\n\n def __len__(self):\n return self.total\n\n def take(self, n):\n self.total = min(self.total, n)\n\n # Propagate this change to the underlying iterator\n if hasattr(self._iterable, \"take\"):\n self._iterable.take(n)\n else:\n self._iterable = itertools.islice(self._iterable, n)\n\n def __next__(self):\n # Create consumer if not created yet\n if self._consumer is None:\n self._create_consumer()\n\n # Notify the user if there is a data loading bottleneck\n if self._queue.qsize() < min(2, max(1, self._queue.maxsize // 2)):\n if time.time() - self.start_time > 5 * 60:\n if self.warning_time is None or time.time() - self.warning_time > 15 * 60:\n logger.debug(\n \"Data loading buffer is empty or nearly empty. This may \"\n \"indicate a data loading bottleneck, and increasing the \"\n \"number of workers (--num-workers) may help.\"\n )\n self.warning_time = time.time()\n\n # Get next example\n item = self._queue.get(True)\n if isinstance(item, Exception):\n raise item\n if item is _sentinel:\n raise StopIteration()\n return item\n"
] | [
[
"torch.utils.data.DataLoader",
"numpy.random.shuffle"
]
] |
berkott/SpaceInvadersAI | [
"0d1d095f60b06f09b337bd3abf7bb46a08a8ed70"
] | [
"NeuroEvolution/evolution.py"
] | [
"import gym\nimport keras as k\nfrom keras.models import Sequential\nfrom keras.layers import Conv2D, Activation, MaxPooling2D, Flatten, Dense, Dropout\nfrom keras.optimizers import Adam\nimport numpy as np\nfrom datetime import datetime\nfrom matplotlib import pyplot as PLT\nimport time\nimport csv\nimport os\n\n# You can adjust these hyperparameters\nPOPULATION_SIZE = 50\nL1=20\nL2=10\nL3=50\nL4=4\n# L1=2\n# L2=3\n# L3=4\n# L4=5\nPOOLING_SIZE = (2,2)\nFILTER_SIZE_1 = (3,3)\nFILTER_SIZE_2 = (5,5)\nELITE_SET_SIZE = 5\nMUTATION_RATE = 0.5\n\nFRAME_SIZE = 210*160*1\nINPUT_DIM = 2*FRAME_SIZE\nINPUT_SHAPE = (210, 160, 2)\nFINAL_DIMENSION_X = int(((INPUT_SHAPE[0] - 2*int(FILTER_SIZE_1[0]/2))/2 - 2*int(FILTER_SIZE_2[0]/2))/2)\nFINAL_DIMENSION_Y = int(((INPUT_SHAPE[1] - 2*int(FILTER_SIZE_1[0]/2))/2 - 2*int(FILTER_SIZE_2[0]/2))/2)\n\n\nenv = gym.make('SpaceInvaders-v0')\nkeepTraining = True\nslack_logs = np.zeros((6,1))\n\ndef visualize(featureVector):\n regularImage = featureVector[0,:FRAME_SIZE].reshape((210,160))\n differenceImage = featureVector[0,FRAME_SIZE:].reshape((210,160))\n PLT.imshow(regularImage)\n PLT.show()\n PLT.imshow(differenceImage)\n PLT.show()\n\ndef writeCsv(index, data):\n slack_logs[index] = data\n\n # For slack_logs:\n # [0] Generation\n # [1] Highest Score\n # [2] Current Score\n # [3] Games Played\n # [4] Start Time\n # [5] All Time High Score\n\n with open(\"logs.csv\", \"w\", newline='') as csv_file:\n writer = csv.writer(csv_file, delimiter=',')\n writer.writerows(slack_logs)\n\ndef calculatePolicySize():\n # INPUT_DIM * L1+L1+L1 * L2+L2+L2 * L3+L3+L3 * L4+L4\n # FILTER_SIZE_1[0] * FILTER_SIZE_1[1] * INPUT_SHAPE[2] * L1 + L1 + \n # FILTER_SIZE_1[0] * FILTER_SIZE_1[1] * L1 * L2 + L2 + \n # final_dimension_x*final_dimension_y*L2*L3 + L3 + \n # L3*L4\n return FILTER_SIZE_1[0] * FILTER_SIZE_1[1] * INPUT_SHAPE[2] * L1 + L1 + FILTER_SIZE_1[0] * FILTER_SIZE_1[1] * L1 * L2 + L2 + FINAL_DIMENSION_X*FINAL_DIMENSION_Y*L2*L3 + L3 + L3 * L4 + L4\n\n# This function is called each time a new memeber of the population is created\ndef initPopulation():\n population = np.random.rand(POPULATION_SIZE, calculatePolicySize())\n population = population*2-1\n return population\n\ndef convert_prediction_to_action(prediction):\n index = np.argmax(prediction[0])\n # NOOP\n if (index == 0):\n return 0\n # FIRE\n elif (index == 1):\n return 1\n # RIGHT\n elif (index == 2):\n return 3\n # LEFT\n elif (index == 3):\n return 4\n return 0\n\ndef playGame(model):\n score=0\n done=False\n action=0\n frame = np.zeros((1,FRAME_SIZE))\n previous_frame = np.zeros((1,FRAME_SIZE))\n env.reset()\n observation_dim = list(INPUT_SHAPE)\n observation_dim.insert(0,1)\n observation_dim = tuple(observation_dim)\n while not done:\n env.render()\n observation, reward, done, _ = env.step(action)\n frame = np.reshape(observation[:,:,0],(1,FRAME_SIZE))\n frame = np.where(frame > 0, 1.0,0)\n difference = frame-previous_frame\n final_observation=np.zeros((1,INPUT_DIM))\n final_observation[0,:FRAME_SIZE]=frame\n final_observation[0,FRAME_SIZE:]=difference\n final_observation = np.reshape(final_observation, observation_dim)\n prediction = model.predict(final_observation)\n action = convert_prediction_to_action(prediction)\n score+=reward\n\n writeCsv(2, score)\n\n previous_frame = np.copy(frame)\n\n # print(\"Score:\",score)\n return score\n\n# This is where the weights are put into the neural net to see how well it goes\ndef evaluate(dnnmodel, population, gamesPlayed):\n scores=np.zeros(POPULATION_SIZE)\n for i in range(POPULATION_SIZE):\n nnFormatPolicyVector = applyPolicyVectorToNN(population[i])\n dnnmodel.set_weights(nnFormatPolicyVector)\n scores[i] = playGame(dnnmodel)\n gamesPlayed+=1\n writeCsv(3, gamesPlayed)\n return scores\n\n\n# Constructs the model that is to be used\ndef buildModel():\n model = Sequential()\n # layer1=Dense(L1, activation = 'relu', input_dim = INPUT_DIM, kernel_initializer='uniform')\n layer1=Conv2D(L1, FILTER_SIZE_1, activation='relu', input_shape = INPUT_SHAPE, kernel_initializer='uniform')\n model.add(layer1)\n model.add(MaxPooling2D(pool_size=POOLING_SIZE))\n \n layer2=Conv2D(L2, FILTER_SIZE_2, activation='relu', kernel_initializer='uniform')\n model.add(layer2)\n model.add(MaxPooling2D(pool_size=POOLING_SIZE))\n\n # model.add(Dropout(0.25))\n model.add(Flatten())\n\n layer3=Dense(L3, activation = 'relu', kernel_initializer='uniform')\n model.add(layer3)\n\n layer4=Dense(L4, activation ='softmax', kernel_initializer='uniform')\n model.add(layer4)\n\n adam = Adam(lr=0.01)\n model.compile(loss='mean_squared_error', optimizer=adam)\n weights=model.get_weights()\n print(len(weights))\n print(\"====================================\")\n return model\n\ndef applyPolicyVectorToNN(policyVector):\n # INPUT_DIM * L1+L1+L1 * L2+L2+L2 * L3+L3+L3 * L4+L4\n # FILTER_SIZE_1[0] * FILTER_SIZE_1[1] * INPUT_SHAPE[2] * L1 + L1 + \n # FILTER_SIZE_1[0] * FILTER_SIZE_1[1] * L1 * L2 + L2 + \n # final_dimension_x*final_dimension_y*L2*L3 + L3 + \n # L3*L4\n\n offset=FILTER_SIZE_1[0] * FILTER_SIZE_1[1] * INPUT_SHAPE[2] * L1\n sec1 = policyVector[:offset].reshape(FILTER_SIZE_1[0], FILTER_SIZE_1[1], INPUT_SHAPE[2], L1)\n sec2 = policyVector[offset:offset+L1]\n offset+=L1\n sec3 = policyVector[offset:offset+FILTER_SIZE_2[0] * FILTER_SIZE_2[1] * L1 * L2].reshape(FILTER_SIZE_2[0], FILTER_SIZE_2[1], L1, L2)\n offset+=FILTER_SIZE_1[0] * FILTER_SIZE_1[1] * L1 * L2\n sec4 = policyVector[offset:offset+L2]\n offset+=L2\n sec5 = policyVector[offset:offset+FINAL_DIMENSION_X*FINAL_DIMENSION_Y*L2*L3].reshape(FINAL_DIMENSION_X*FINAL_DIMENSION_Y*L2, L3)\n offset+=FINAL_DIMENSION_X*FINAL_DIMENSION_Y*L2*L3\n sec6 = policyVector[offset:offset+L3]\n offset+=L3\n sec7 = policyVector[offset:offset+L3*L4].reshape(L3, L4)\n offset+=L3*L4\n sec8 = policyVector[offset:]\n\n nnFormat = []\n nnFormat.append(sec1)\n nnFormat.append(sec2)\n nnFormat.append(sec3)\n nnFormat.append(sec4)\n nnFormat.append(sec5)\n nnFormat.append(sec6)\n nnFormat.append(sec7)\n nnFormat.append(sec8)\n return nnFormat\n\n# This is where the members of the population are ranked\ndef selection(scores, population):\n eliteSet = np.zeros((ELITE_SET_SIZE,calculatePolicySize()))\n scoresTemp=np.copy(scores)\n for i in range(ELITE_SET_SIZE):\n index = np.argmax(scoresTemp)\n scoresTemp[index] = 0\n eliteSet[i] = population[index]\n return eliteSet\n\ndef cross(policy1, policy2):\n newPolicy = policy1.copy()\n mask = np.random.randint(2, size=newPolicy.shape).astype(np.bool)\n newPolicy[mask] = policy2[mask]\n # for i in range(calculatePolicySize()):\n # rand = np.random.uniform()\n # if rand > 0.5:\n # newPolicy[i] = policy2[i]\n return newPolicy\n\n# This is where crossover occurs based on the selection process\ndef crossover(scores, population):\n crossoverSet = np.zeros((POPULATION_SIZE,calculatePolicySize()))\n selectionProbability = np.array(scores)/np.sum(scores)\n for i in range(POPULATION_SIZE - ELITE_SET_SIZE):\n randomIndex = np.random.choice(range(POPULATION_SIZE), p=selectionProbability)\n policy1 = population[randomIndex]\n randomIndex = np.random.choice(range(POPULATION_SIZE), p=selectionProbability)\n policy2 = population[randomIndex]\n newPolicy = cross(policy1, policy2)\n crossoverSet[i]=newPolicy\n return crossoverSet\n\n# Lastly, the mutation is a point mutation that sometimes occurs\ndef mutation(crossoverPopulation):\n i = int((POPULATION_SIZE - ELITE_SET_SIZE) * np.random.random_sample())\n j = int(calculatePolicySize() * np.random.random_sample())\n\n for _ in range(int(i*j*MUTATION_RATE)):\n crossoverPopulation[i][j] = np.random.random_sample() * 2 - 1\n # for i in range(POPULATION_SIZE - ELITE_SET_SIZE):\n # for j in range(calculatePolicySize()):\n # rand = np.random.uniform()\n # if(rand < MUTATION_RATE):\n # crossoverPopulation[i][j] = np.random.random_sample() * 2 - 1\n return crossoverPopulation\n\ndef generateNewGeneration(scores, population):\n elitePopulation = selection(scores, population)\n crossoverPopulation = crossover(scores, population)\n mutationPopulation = mutation(crossoverPopulation)\n \n for i in range(ELITE_SET_SIZE):\n mutationPopulation[POPULATION_SIZE-ELITE_SET_SIZE+i] = elitePopulation[i] \n\n return mutationPopulation\n\ndef saveHighestScorePolicy(population, generation, scores):\n if (generation % 10 == 0):\n index = np.argmax(scores)\n filename='generation'+str(generation)+'HS'+str(scores[index])+'.npy'\n np.save(os.path.join('SavedScores', filename) ,population[index])\n print(\"Saved generation to file \"+filename)\n\ndef loadPolicy(filename, population, index):\n policy=np.load(filename)\n print(\"Loaded\\n\",policy)\n population[index]=policy\n\ndef measureTime():\n global lasttime\n currentTime=time.time()\n diff=currentTime-lasttime\n lasttime=currentTime\n return diff\n\n# test_selection()\n# quit()\n\nenv.reset()\npopulation = initPopulation()\n# loadPolicy('generation0.npy',population,0)\ndnnmodel = buildModel()\ngeneration = 0\nlasttime = time.time()\nall_time_high_score = 0\n\nwriteCsv(4, time.time())\n\nwhile (keepTraining):\n scores = evaluate(dnnmodel, population, generation*POPULATION_SIZE)\n print(int(measureTime()),\" sec Generation: \", generation, \" Highest Score: \", np.max(scores), \" Games Played: \", generation*POPULATION_SIZE+POPULATION_SIZE)\n\n writeCsv(0, generation)\n writeCsv(1, np.max(scores))\n if (np.max(scores) > all_time_high_score):\n all_time_high_score = np.max(scores)\n writeCsv(5, all_time_high_score)\n\n saveHighestScorePolicy(population, generation, scores)\n population = generateNewGeneration(scores, population)\n print(int(measureTime()),\" sec New generation created.\")\n generation+=1\n"
] | [
[
"numpy.random.random_sample",
"numpy.load",
"numpy.sum",
"numpy.zeros",
"numpy.reshape",
"numpy.argmax",
"numpy.copy",
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.show",
"numpy.max",
"numpy.array",
"numpy.where",
"numpy.random.randint"
]
] |
BeibinLi/SSD | [
"2cd30f02c21b0a8731a34dca2a89d6e099ca3442"
] | [
"ssd/modeling/backbone/vgg.py"
] | [
"import torch.nn as nn\nimport torch.nn.functional as F\n\nfrom ssd.layers import L2Norm\nfrom ssd.modeling import registry\nfrom ssd.utils.model_zoo import load_state_dict_from_url\n\nmodel_urls = {\n 'vgg': 'https://s3.amazonaws.com/amdegroot-models/vgg16_reducedfc.pth',\n}\n\n\n# borrowed from https://github.com/amdegroot/ssd.pytorch/blob/master/ssd.py\ndef add_vgg(cfg, batch_norm=False):\n layers = []\n in_channels = 3\n for v in cfg:\n if v == 'M':\n layers += [nn.MaxPool2d(kernel_size=2, stride=2)]\n elif v == 'C':\n layers += [nn.MaxPool2d(kernel_size=2, stride=2, ceil_mode=True)]\n else:\n conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)\n if batch_norm:\n layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]\n else:\n layers += [conv2d, nn.ReLU(inplace=True)]\n in_channels = v\n pool5 = nn.MaxPool2d(kernel_size=3, stride=1, padding=1)\n conv6 = nn.Conv2d(512, 1024, kernel_size=3, padding=6, dilation=6)\n conv7 = nn.Conv2d(1024, 1024, kernel_size=1)\n layers += [pool5, conv6,\n nn.ReLU(inplace=True), conv7, nn.ReLU(inplace=True)]\n return layers\n\n\ndef add_extras(cfg, i, size=300):\n # Extra layers added to VGG for feature scaling\n layers = []\n in_channels = i\n flag = False\n for k, v in enumerate(cfg):\n if in_channels != 'S':\n if v == 'S':\n layers += [nn.Conv2d(in_channels, cfg[k + 1], kernel_size=(1, 3)[flag], stride=2, padding=1)]\n else:\n layers += [nn.Conv2d(in_channels, v, kernel_size=(1, 3)[flag])]\n flag = not flag\n in_channels = v\n if size == 512:\n layers.append(nn.Conv2d(in_channels, 128, kernel_size=1, stride=1))\n layers.append(nn.Conv2d(128, 256, kernel_size=4, stride=1, padding=1))\n return layers\n\n\ndef add_header(vgg, extra_layers, boxes_per_location, num_classes):\n regression_headers = []\n classification_headers = []\n vgg_source = [21, -2]\n for k, v in enumerate(vgg_source):\n regression_headers += [nn.Conv2d(vgg[v].out_channels,\n boxes_per_location[k] * 4, kernel_size=3, padding=1)]\n classification_headers += [nn.Conv2d(vgg[v].out_channels,\n boxes_per_location[k] * num_classes, kernel_size=3, padding=1)]\n for k, v in enumerate(extra_layers[1::2], 2):\n regression_headers += [nn.Conv2d(v.out_channels, boxes_per_location[k]\n * 4, kernel_size=3, padding=1)]\n classification_headers += [nn.Conv2d(v.out_channels, boxes_per_location[k]\n * num_classes, kernel_size=3, padding=1)]\n return regression_headers, classification_headers\n\n\nvgg_base = {\n '300': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'C', 512, 512, 512, 'M',\n 512, 512, 512],\n '512': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'C', 512, 512, 512, 'M',\n 512, 512, 512],\n}\nextras_base = {\n '300': [256, 'S', 512, 128, 'S', 256, 128, 256, 128, 256],\n '512': [256, 'S', 512, 128, 'S', 256, 128, 'S', 256, 128, 'S', 256],\n}\n\n\nclass VGG(nn.Module):\n def __init__(self, cfg):\n super().__init__()\n size = cfg.INPUT.IMAGE_SIZE\n vgg_config = vgg_base[str(size)]\n extras_config = extras_base[str(size)]\n\n self.vgg = nn.ModuleList(add_vgg(vgg_config))\n self.extras = nn.ModuleList(add_extras(extras_config, i=1024, size=size))\n self.l2_norm = L2Norm(512, scale=20)\n self.reset_parameters()\n\n def reset_parameters(self):\n for m in self.extras.modules():\n if isinstance(m, nn.Conv2d):\n nn.init.xavier_uniform_(m.weight)\n nn.init.zeros_(m.bias)\n\n def init_from_pretrain(self, state_dict):\n self.vgg.load_state_dict(state_dict)\n\n def forward(self, x):\n features = []\n for i in range(23):\n x = self.vgg[i](x)\n s = self.l2_norm(x) # Conv4_3 L2 normalization\n features.append(s)\n\n # apply vgg up to fc7\n for i in range(23, len(self.vgg)):\n x = self.vgg[i](x)\n features.append(x)\n\n for k, v in enumerate(self.extras):\n x = F.relu(v(x), inplace=True)\n if k % 2 == 1:\n features.append(x)\n\n return tuple(features)\n\n\n@registry.BACKBONES.register('vgg')\ndef vgg(cfg, pretrained=True):\n model = VGG(cfg)\n if pretrained:\n model.init_from_pretrain(load_state_dict_from_url(model_urls['vgg']))\n return model\n"
] | [
[
"torch.nn.MaxPool2d",
"torch.nn.init.xavier_uniform_",
"torch.nn.BatchNorm2d",
"torch.nn.Conv2d",
"torch.nn.init.zeros_",
"torch.nn.ReLU"
]
] |
fedarko/songbird | [
"44827596bc9ca16d8046aeafee24ee1dd74dcc0b"
] | [
"songbird/util.py"
] | [
"import os\nimport tensorflow as tf\nimport numpy as np\nimport pandas as pd\nfrom sklearn.utils import check_random_state\nfrom skbio.stats.composition import clr_inv as softmax\nfrom biom import Table\nfrom patsy import dmatrix\n\n\ndef random_multinomial_model(num_samples, num_features,\n reps=1,\n low=2, high=10,\n beta_mean=0,\n beta_scale=5,\n mu=1,\n sigma=1,\n seed=0):\n \"\"\" Generates a table using a random poisson regression model.\n\n Here we will be simulating microbial counts given the model, and the\n corresponding model priors.\n\n Parameters\n ----------\n num_samples : int\n Number of samples\n num_features : int\n Number of features\n tree : np.array\n Tree specifying orthonormal contrast matrix.\n low : float\n Smallest gradient value.\n high : float\n Largest gradient value.\n beta_mean : float\n Mean of beta prior (for regression coefficients)\n beta_scale : float\n Scale of beta prior (for regression coefficients)\n mu : float\n Mean sequencing depth (in log units)\n sigma : float\n Variance for sequencing depth\n\n Returns\n -------\n table : biom.Table\n Biom representation of the count table.\n metadata : pd.DataFrame\n DataFrame containing relevant metadata.\n beta : np.array\n Regression parameter estimates.\n \"\"\"\n N = num_samples\n\n # generate all of the coefficient using the random poisson model\n state = check_random_state(seed)\n beta = state.normal(beta_mean, beta_scale, size=(2, num_features-1))\n\n X = np.hstack([np.linspace(low, high, num_samples // reps)]\n for _ in range(reps))\n X = np.vstack((np.ones(N), X)).T\n phi = np.hstack((np.zeros((N, 1)), X @ beta))\n probs = softmax(phi)\n n = [mu] * N\n\n table = np.vstack(\n state.multinomial(n[i], probs[i, :])\n for i in range(N)\n ).T\n\n samp_ids = pd.Index(['S%d' % i for i in range(num_samples)],\n name='sampleid')\n feat_ids = ['F%d' % i for i in range(num_features)]\n balance_ids = ['L%d' % i for i in range(num_features-1)]\n\n table = Table(table, feat_ids, samp_ids)\n metadata = pd.DataFrame(X, columns=['Ones', 'X'], index=samp_ids)\n beta = pd.DataFrame(beta.T, columns=['Intercept', 'beta'],\n index=balance_ids)\n\n return table, metadata, beta\n\n\ndef _type_cast_to_float(df):\n \"\"\" Attempt to cast all of the values in dataframe to float.\n\n This will try to type cast all of the series within the\n dataframe into floats. If a column cannot be type casted,\n it will be kept as is.\n\n Parameters\n ----------\n df : pd.DataFrame\n\n Returns\n -------\n pd.DataFrame\n \"\"\"\n # TODO: Will need to improve this, as this is a very hacky solution.\n for c in df.columns:\n s = df[c]\n try:\n df[c] = s.astype(np.float64)\n except Exception:\n continue\n return df\n\n\ndef read_metadata(filepath):\n \"\"\" Reads in a sample metadata file\n\n Parameters\n ----------\n filepath: str\n The file path location of the sample metadata file\n\n Returns\n -------\n pd.DataFrame :\n The metadata table with inferred types.\n \"\"\"\n metadata = pd.read_table(\n filepath, dtype=object)\n cols = metadata.columns\n metadata = metadata.set_index(cols[0])\n metadata = _type_cast_to_float(metadata.copy())\n\n return metadata\n\n\ndef match_and_filter(table, metadata, formula,\n min_sample_count, min_feature_count):\n \"\"\" Matches and aligns biom and metadata tables.\n\n This will also return the patsy representation.\n\n Parameters\n ----------\n table : biom.Table\n Table of abundances\n metadata : pd.DataFrame\n Sample metadata\n\n Returns\n -------\n table : biom.Table\n Filtered biom table\n metadata : pd.DataFrame\n Sample metadata\n \"\"\"\n # match them\n\n def sample_filter(val, id_, md):\n return id_ in metadata.index and np.sum(val) > min_sample_count\n\n def read_filter(val, id_, md):\n return np.sum(val > 0) > min_feature_count\n\n table = table.filter(sample_filter, axis='sample', inplace=False)\n table = table.filter(read_filter, axis='observation', inplace=False)\n\n metadata = metadata.loc[table.ids(axis='sample')]\n metadata = metadata.loc[~metadata.index.duplicated(keep='first')]\n\n def sort_f(xs):\n return [xs[metadata.index.get_loc(x)] for x in xs]\n\n table = table.sort(sort_f=sort_f, axis='sample')\n design = dmatrix(formula, metadata, return_type='dataframe')\n design = design.dropna()\n\n def design_filter(val, id_, md):\n return id_ in design.index\n\n table = table.filter(design_filter, axis='sample')\n return table, metadata, design\n\n\ndef split_training(dense_table, metadata, design, training_column=None,\n num_random_test_examples=10, seed=None):\n\n if training_column is None:\n np.random.seed(seed)\n idx = np.random.random(design.shape[0])\n i = np.argsort(idx)[num_random_test_examples]\n\n threshold = idx[i]\n train_idx = ~(idx < threshold)\n else:\n train_idx = metadata.loc[design.index, training_column] == \"Train\"\n\n trainX = design.loc[train_idx].values\n testX = design.loc[~train_idx].values\n\n trainY = dense_table.loc[train_idx].values\n testY = dense_table.loc[~train_idx].values\n\n return trainX, testX, trainY, testY\n\n\ndef silence_output():\n # suppress profiling messages & compilation warnings\n # taken from:\n # https://stackoverflow.com/questions/47068709/your-cpu-supports-\n # instructions-that-this-tensorflow-binary-was-not-compiled-to-u\n os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'\n\n # suppress deprecation warnings\n # taken from https://github.com/tensorflow/tensorflow/issues/27023\n tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)\n"
] | [
[
"pandas.read_table",
"numpy.sum",
"numpy.ones",
"sklearn.utils.check_random_state",
"numpy.zeros",
"pandas.DataFrame",
"numpy.random.seed",
"numpy.argsort",
"numpy.random.random",
"numpy.linspace",
"tensorflow.compat.v1.logging.set_verbosity"
]
] |
lsst-sitcom/spot_motion_monitor | [
"3d0242276198126240667ba13e95b7bdf901d053"
] | [
"tests/models/test_full_frame_model.py"
] | [
"# This file is part of spot_motion_monitor.\n#\n# Developed for LSST System Integration, Test and Commissioning.\n#\n# See the LICENSE file at the top-level directory of this distribution\n# for details of code ownership.\n#\n# Use of this source code is governed by a 3-clause BSD-style\n# license that can be found in the LICENSE file.\n\nimport numpy as np\nimport pytest\n\nfrom spot_motion_monitor.camera.gaussian_camera import GaussianCamera\nfrom spot_motion_monitor.models import FullFrameModel\nfrom spot_motion_monitor.utils import FrameRejected, TimeHandler\n\nclass TestFullFrameModel():\n\n def setup_class(cls):\n cls.model = FullFrameModel()\n cls.model.timeHandler = TimeHandler()\n\n def checkFrame(self, flux, maxAdc, comX, comY):\n return flux > 4000 and maxAdc > 130 and comX > 0 and comY > 0\n\n def test_parametersAfterConstruction(self):\n assert self.model.sigmaScale == 5.0\n assert self.model.minimumNumPixels == 10\n assert self.model.timeHandler is not None\n\n def test_frameCalculations(self):\n # This test requires the generation of a CCD frame which will be\n # provided by the GaussianCamera\n camera = GaussianCamera()\n camera.seed = 1000\n camera.startup()\n frame = camera.getFullFrame()\n info = self.model.calculateCentroid(frame)\n assert info.centerX == 288.47687644439395\n assert info.centerY == 224.45394404821826\n assert info.flux == 3235.9182163661176\n assert info.maxAdc == 135.83703259361937\n assert info.fwhm == 5.749039360993981\n assert info.stdNoObjects is None\n\n def test_badFrameCalculation(self):\n frame = np.ones((480, 640))\n with pytest.raises(FrameRejected):\n self.model.calculateCentroid(frame)\n\n def test_failedFrameCheck(self):\n # This test requires the generation of a CCD frame which will be\n # provided by the GaussianCamera\n self.model.frameCheck = self.checkFrame\n camera = GaussianCamera()\n camera.seed = 1000\n camera.startup()\n frame = camera.getFullFrame()\n with pytest.raises(FrameRejected):\n self.model.calculateCentroid(frame)\n self.model.frameCheck = None\n"
] | [
[
"numpy.ones"
]
] |
jmendozais/SDSSDepth | [
"7a4d0c5affef3eda7056876ccb2365ac883c08eb"
] | [
"loss/general_adaptive_loss.py"
] | [
"import sys\nimport math\nimport os\n\nimport torch\nimport torchvision\nimport numpy as np\n\nfrom pkg_resources import resource_stream\n\ndef interpolate1d(x, values, tangents):\n '''\n Returns:\n Returns the interpolated or extrapolated values for each query point,\n depending on whether or not the query lies within the span of the spline.\n '''\n assert torch.is_tensor(x)\n assert torch.is_tensor(values)\n assert torch.is_tensor(tangents)\n float_dtype = x.dtype\n assert values.dtype == float_dtype\n assert tangents.dtype == float_dtype\n assert len(values.shape) == 1\n assert len(tangents.shape) == 1\n assert values.shape[0] == tangents.shape[0]\n\n x_lo = torch.floor(torch.clamp(x, torch.as_tensor(0),\n values.shape[0] - 2)).type(torch.int64)\n x_hi = x_lo + 1\n\n # Compute the relative distance between each `x` and the knot below it.\n t = x - x_lo.type(float_dtype)\n\n # Compute the cubic hermite expansion of `t`.\n t_sq = t**2\n t_cu = t * t_sq\n h01 = -2. * t_cu + 3. * t_sq\n h00 = 1. - h01\n h11 = t_cu - t_sq\n h10 = h11 - t_sq + t\n\n # Linearly extrapolate above and below the extents of the spline for all\n # values.\n value_before = tangents[0] * t + values[0]\n value_after = tangents[-1] * (t - 1.) + values[-1]\n\n # Cubically interpolate between the knots below and above each query point.\n neighbor_values_lo = values[x_lo]\n neighbor_values_hi = values[x_hi]\n neighbor_tangents_lo = tangents[x_lo]\n neighbor_tangents_hi = tangents[x_hi]\n value_mid = (\n neighbor_values_lo * h00 + neighbor_values_hi * h01 +\n neighbor_tangents_lo * h10 + neighbor_tangents_hi * h11)\n\n return torch.where(t < 0., value_before,\n torch.where(t > 1., value_after, value_mid))\n\n\ndef log_safe(x):\n x = torch.as_tensor(x)\n return torch.log(torch.min(x, torch.tensor(33e37).to(x)))\n\n\ndef load_spline_params():\n dirname = os.path.dirname(__file__)\n with open(os.path.join(dirname, '../misc/partition_spline.npz'), \"rb\") as spline_file:\n with np.load(spline_file, allow_pickle=False) as f:\n spline_x_scale = torch.tensor(f['x_scale'])\n spline_values = torch.tensor(f['values'])\n spline_tangents = torch.tensor(f['tangents'])\n\n return spline_x_scale, spline_values, spline_tangents\n\n\ndef get_partition_init(shape):\n shape = torch.as_tensor(shape)\n\n base1 = (2.25 * shape - 4.5) / (torch.abs(shape - 2) + 0.25) + shape + 2\n base2 = 5. / 18. * log_safe(4 * shape - 15) + 8\n\n return torch.where(shape < 4, base1, base2)\n\n\ndef get_partition(shape):\n shape = torch.as_tensor(shape)\n assert (shape >= 0).all()\n\n init = get_partition_init(shape)\n\n x_scale, values, tangents = load_spline_params()\n\n return interpolate1d(init * x_scale.to(init), values.to(init), tangents.to(init))\n\n\ndef general_adaptive_loss(x, shape, bowl=1.):\n input_shape = x.shape\n shape = torch.as_tensor(shape).to(x.device)\n bowl = torch.as_tensor(bowl).to(x.device)\n\n b = x.size(0)\n x = x.view(b, -1)\n\n if len(shape.shape) == 0:\n shape = shape.unsqueeze(dim=0).expand([b, ]).unsqueeze(dim=1)\n else:\n shape = shape.view(b, -1)\n\n if len(bowl.shape) == 0:\n bowl = bowl.unsqueeze(dim=0).expand([b, ]).unsqueeze(dim=1)\n else:\n bowl = bowl.view(b, -1)\n\n partition = get_partition(shape)\n ans = (torch.abs(shape - 2)/shape) * (torch.pow((torch.square(x/bowl) /\n torch.abs(shape - 2) + 1), shape/2) - 1) + log_safe(bowl) + log_safe(partition)\n\n return ans.view(input_shape)\n"
] | [
[
"numpy.load",
"torch.square",
"torch.as_tensor",
"torch.tensor",
"torch.where",
"torch.is_tensor",
"torch.abs"
]
] |
dzungcamlang/noise_adversarial_tacotron | [
"7a7fda49eb8bf82f5139743d55639d48ff204e9e"
] | [
"dataset/cut_chime.py"
] | [
"import hp\nfrom pathlib import Path\nimport numpy as np\nfrom tqdm import tqdm\nimport librosa\nimport torch\nimport librosa.filters\nimport numpy as np\nimport scipy\nfrom random import randint\nfrom os import makedirs\n\n\ndef load_wav(path, sample_rate):\n return librosa.core.load(path, sr=sample_rate)[0]\n\n\ndef save_wav(wav, path, sample_rate):\n wav *= 32767 / max(0.01, np.max(np.abs(wav)))\n scipy.io.wavfile.write(path, sample_rate, wav.astype(np.int16))\n\n\ndef get_segments(source, length, count):\n begins = []\n l = len(source)\n for _ in range(count):\n begins.append(randint(0, l - length - 1))\n segments = []\n for begin in begins:\n segments.append(source[begin: begin + length])\n return segments\n\n\ndef process_chime(\n source=hp.whole_chime_path,\n target=hp.part_chime_path,\n sr=16000,\n duration=30,\n count=10\n):\n \"\"\"\n Randomly picking segments from CHiME dataset, since full dataset is not necessary in our case.\n :param source:\n :param target:\n :param sr:\n :param duration:\n :param count:\n :return:\n \"\"\"\n makedirs(str(target), exist_ok=True)\n for path in tqdm(source.glob(\"*.wav\")):\n wave = load_wav(path, sr)\n if len(wave) < sr * 30: continue\n waves = get_segments(wave, duration * sr, count)\n for i, wave in enumerate(waves, 1):\n save_wav(wave, str(target / f\"{path.stem}_{i}.wav\"), sr)\n\n\nif __name__ == '__main__':\n print(\"Beginning segmenting CHiME4 noises.\")\n process_chime()\n print(\"Processing Finished\")\n"
] | [
[
"numpy.abs"
]
] |
JaeyoonSSim/Design-Project | [
"8a0037bec50b44b3f5d92da5254e79964fdaf9cf"
] | [
"Detector_1/fusion_detecting.py"
] | [
"import cv2\r\nimport sys\r\nimport os\r\nimport numpy as np\r\nimport time\r\n\r\n# Initialize the parameters\r\nconfThreshold = 0.5 # Confidence threshold\r\nnmsThreshold = 0.4 # Non-maximum suppression threshold\r\ninpWidth = 416 # Width of network's input image\r\ninpHeight = 416 # Height of network's input image\r\nstarting_time = 0\r\nframe_id = 0\r\nfont = cv2.FONT_HERSHEY_PLAIN\r\n\r\n# Load names of classes\r\nclassesFile = \"coco.names\"\r\nclasses = None\r\nwith open(classesFile, 'rt') as f:\r\n classes = f.read().rstrip('\\n').split('\\n')\r\n\r\n# Give the configuration and weight files for the model and load the network using them.\r\nmodelConfiguration = \"yolov3.cfg\"\r\nmodelWeights = \"yolov3.weights\"\r\nnet = cv2.dnn.readNetFromDarknet(modelConfiguration, modelWeights)\r\nnet.setPreferableBackend(cv2.dnn.DNN_BACKEND_OPENCV)\r\nnet.setPreferableTarget(cv2.dnn.DNN_TARGET_CPU)\r\n\r\ninputFile = \"presen_T.mp4\"\r\ninputFile2 = \"presen_R.mp4\"\r\noutputFile = \"yolo_out_py.avi\"\r\n\r\n# Open the video file\r\nif not os.path.isfile(inputFile):\r\n print(\"Input video file \", inputFile, \" doesn't exist\")\r\n sys.exit(1)\r\ncap = cv2.VideoCapture(inputFile)\r\ncap2 = cv2.VideoCapture(inputFile2)\r\noutputFile = inputFile[:-4] + \"_yolo_out_py.avi\"\r\n\r\n# Get the video writer initialized to save the output video\r\nvid_writer = cv2.VideoWriter(outputFile, cv2.VideoWriter_fourcc('M', 'J', 'P', 'G'), 30,\r\n (round(cap.get(cv2.CAP_PROP_FRAME_WIDTH)), round(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))))\r\n\r\n# Get the names of the output layers\r\ndef getOutputsNames(net):\r\n # Get the names of all the layers in the network\r\n layersNames = net.getLayerNames()\r\n # Get the names of the output layers, i.e. the layers with unconnected outputs\r\n return [layersNames[i[0] - 1] for i in net.getUnconnectedOutLayers()]\r\n\r\n# Draw the predicted bounding box\r\ndef drawPred(classId, conf, left, top, right, bottom):\r\n # Draw a bounding box.\r\n cv2.rectangle(frame, (left, top), (right, bottom), (0, 255, 0))\r\n label = '%.2f' % conf\r\n\r\n # Get the label for the class name and its confidence\r\n if classes:\r\n assert (classId < len(classes))\r\n label = '%s:%s' % (classes[classId], label)\r\n\r\n # Display the label at the top of the bounding box\r\n labelSize, baseLine = cv2.getTextSize(label, font, 0.5, 1)\r\n top = max(top, labelSize[1])\r\n cv2.putText(frame, label, (left, top), font, 1, (0, 255, 0), 2)\r\n\r\n# Remove the bounding boxes with low confidence using non-maxima suppression\r\ndef postprocess(frame, outs):\r\n frameHeight = frame.shape[0]\r\n frameWidth = frame.shape[1]\r\n\r\n # Scan through all the bounding boxes output from the network and keep only the\r\n # ones with high confidence scores. Assign the box's class label as the class with the highest score.\r\n classIds = []\r\n confidences = []\r\n boxes = []\r\n for out in outs:\r\n for detection in out:\r\n scores = detection[5:]\r\n classId = np.argmax(scores)\r\n confidence = scores[classId]\r\n if confidence > confThreshold:\r\n center_x = int(detection[0] * frameWidth)\r\n center_y = int(detection[1] * frameHeight)\r\n width = int(detection[2] * frameWidth)\r\n height = int(detection[3] * frameHeight)\r\n left = int(center_x - width / 2)\r\n top = int(center_y - height / 2)\r\n classIds.append(classId)\r\n confidences.append(float(confidence))\r\n boxes.append([left, top, width, height])\r\n\r\n # Perform non maximum suppression to eliminate redundant overlapping boxes with\r\n # lower confidences.\r\n indices = cv2.dnn.NMSBoxes(boxes, confidences, confThreshold, nmsThreshold)\r\n for i in indices:\r\n i = i[0]\r\n box = boxes[i]\r\n left = box[0]\r\n top = box[1]\r\n width = box[2]\r\n height = box[3]\r\n drawPred(classIds[i], confidences[i], left, top, left + width, top + height)\r\n\r\n# Main\r\nwhile True:\r\n # get frame from the video\r\n hasFrame, frame = cap.read()\r\n hasFrame2, frame2 = cap2.read()\r\n\r\n frame = cv2.resize(frame, dsize=(600, 402))\r\n frame2 = cv2.resize(frame2, dsize=(600, 402))\r\n\r\n cv2.imshow(\"Camera\", frame)\r\n cv2.imshow(\"Thermal_Camera\", frame2)\r\n # Stop the program if reached end of video\r\n if not hasFrame:\r\n print(\"Done processing !!!\")\r\n cv2.waitKey(3000)\r\n break\r\n\r\n # Create a 4D blob from a frame.\r\n blob = cv2.dnn.blobFromImage(frame, 1 / 255, (inpWidth, inpHeight), [0, 0, 0], 1, crop=False)\r\n\r\n # Sets the input to the network\r\n net.setInput(blob)\r\n\r\n # Runs the forward pass to get output of the output layers\r\n outs = net.forward(getOutputsNames(net))\r\n\r\n # Remove the bounding boxes with low confidence\r\n postprocess(frame, outs)\r\n\r\n # Print the FPS\r\n current_time = time.time()\r\n sec = current_time - starting_time\r\n starting_time = current_time\r\n fps = 1 / (sec)\r\n str2 = \"FPS : %0.1f\" % fps\r\n # cv2.putText(frame, str2, (10, 50), font, 2, (0, 255, 0), 2)\r\n\r\n # Write the frame with the detection boxes\r\n vid_writer.write(frame.astype(np.uint8))\r\n\r\n # CAMERA RESULT\r\n cv2.imshow(\"CAMERA_Detection\", frame)\r\n\r\n\r\n img2 = None\r\n fast = cv2.FastFeatureDetector_create(30)\r\n fast.setNonmaxSuppression(0)\r\n kp = fast.detect(frame2, None)\r\n img2 = cv2.drawKeypoints(frame2, kp, img2, (0, 255, 255))\r\n # cv2.imshow(\"THERMAL\", img2)\r\n\r\n\r\n hsv = cv2.cvtColor(frame2, cv2.COLOR_BGR2HSV)\r\n car_prediction = 30\r\n lower_white = np.array([0, 0, 255 - car_prediction], dtype=np.uint8)\r\n upper_white = np.array([255, car_prediction, 255], dtype=np.uint8)\r\n mask_white = cv2.inRange(hsv, lower_white, upper_white)\r\n res = cv2.bitwise_and(frame2, frame2, mask=mask_white)\r\n # cv2.imshow(\"THERMAL_CAR\", res)\r\n\r\n\r\n res2 = None\r\n res2 = res\r\n igray = cv2.cvtColor(res2, cv2.COLOR_BGR2GRAY)\r\n iret, ibinary = cv2.threshold(igray, 127, 255, cv2.THRESH_BINARY)\r\n contours, hierachy = cv2.findContours(ibinary, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_NONE)\r\n for i in range(len(contours)):\r\n cv2.drawContours(res2, [contours[i]], 0, (255, 255, 255), 2)\r\n cv2.putText(res2, \"car\", tuple(contours[i][0][0]), font, 1, (0, 255, 0), 1)\r\n # cv2.imshow(\"THERMAL_CONTOUR\", res2)\r\n\r\n\r\n # THERMAL PROCESSING RESULT\r\n dst = cv2.addWeighted(res2, 1, frame2, 1, 0)\r\n #cv2.imshow('THERMAL_RES',dst)\r\n #cv2.imshow(\"THERMAL\",frame2)\r\n\r\n # FINAL RESULT\r\n dst2 = cv2.addWeighted(res2, 1, frame, 1, 0)\r\n cv2.imshow(\"RESULT\",dst2)\r\n\r\n\r\n # End the video with \"Esc\"\r\n key = cv2.waitKey(1)\r\n if key == 27:\r\n break\r\n\r\ncap.release()\r\ncv2.destroyAllWindows()"
] | [
[
"numpy.array",
"numpy.argmax"
]
] |
grisoniFr/virtual_libraries | [
"0aac0ce249f6f3bc529abb3cbdf2d3f49be84388"
] | [
"experiments/do_data_generation.py"
] | [
"import os, sys\nimport time\nimport warnings\nimport argparse\nimport configparser\nimport ast\nimport numpy as np\nfrom math import log\nfrom rdkit import Chem\nfrom rdkit import rdBase\nrdBase.DisableLog('rdApp.*')\nfrom rdkit.Chem import Draw\n\nfrom keras.models import load_model\n\nsys.path.append('../src/')\nfrom python import helper as hp\nfrom python import fixed_parameters as FP\n\nparser = argparse.ArgumentParser(description='SMILES generation')\nparser.add_argument('-fn','--filename', type=str, help='Path to the fine-tuning txt file', required=True)\nparser.add_argument('-m','--model_path', type=str, help='Path to a pretrained model', required=True)\nparser.add_argument('-v','--verbose', type=bool, help='Verbose', required=True)\n\n\ndef int_to_smile(array, indices_token, pad_char):\n \"\"\" \n From an array of int, return a list of \n molecules in string smile format\n Note: remove the padding char\n \"\"\"\n all_mols = []\n for seq in array:\n new_mol = [indices_token[str(int(x))] for x in seq]\n all_mols.append(''.join(new_mol).replace(pad_char, ''))\n return all_mols\n\n\ndef one_hot_encode(token_lists, n_chars):\n \n output = np.zeros((len(token_lists), len(token_lists[0]), n_chars))\n for i, token_list in enumerate(token_lists):\n for j, token in enumerate(token_list):\n output[i, j, int(token)] = 1\n return output\n \ndef sample(model, temp, start_char, end_char, max_len, indices_token, token_indices):\n \n n_chars = len(indices_token)\n\n seed_token = [token_indices[start_char]]\n generated = indices_token[str(seed_token[0])]\n \n while generated[-1] != end_char and len(generated) < max_len:\n x_seed = one_hot_encode([seed_token], n_chars)\n full_preds = model.predict(x_seed, verbose=0)[0]\n logits = full_preds[-1]\n \n probas, next_char_ind = get_token_proba(logits, temp)\n \n next_char = indices_token[str(next_char_ind)]\n generated += next_char\n seed_token += [next_char_ind]\n \n return generated\n\ndef get_token_proba(preds, temp):\n \n preds = np.asarray(preds).astype('float64')\n preds = np.log(preds) / temp\n exp_preds = np.exp(preds)\n \n probas = exp_preds / np.sum(exp_preds)\n char_ind = np.argmax(np.random.multinomial(1, probas, 1))\n \n return probas, char_ind\n\ndef softmax(preds):\n return np.exp(preds)/np.sum(np.exp(preds))\n\n\nif __name__ == '__main__':\n \n start = time.time()\n \n ####################################\n # get back parameters\n args = vars(parser.parse_args())\n \n verbose = args['verbose']\n filename = args['filename']\n model_path = args['model_path']\n name_data = filename.split('/')[-1].replace('.txt','')\n config = configparser.ConfigParser()\n config.read('parameters.ini')\n \n if verbose: print('\\nSTART SAMPLING')\n ####################################\n \n \n \n ####################################\n # path to save data\n save_path = f'results/{name_data}/generated_data/'\n os.makedirs(save_path, exist_ok=True)\n \n # path to checkpoints\n dir_ckpts = f'results/{name_data}/models/'\n ####################################\n \n \n \n \n ####################################\n # Parameters to sample novo smiles\n temp = float(config['EXPERIMENTS']['temp'])\n n_sample = int(config['EXPERIMENTS']['n_sample'])\n if n_sample>5000:\n warnings.warn('You will sample more than 5000 SMILES; this will take a while')\n \n max_len = int(config['PROCESSING']['max_len'])\n pad_char = FP.PROCESSING_FIXED['pad_char']\n start_char = FP.PROCESSING_FIXED['start_char']\n end_char = FP.PROCESSING_FIXED['end_char']\n indices_token = FP.INDICES_TOKEN\n token_indices = FP.TOKEN_INDICES\n ####################################\n \n \n \n ####################################\n # start the sampling of new SMILES\n epoch = model_path.split('/')[-1].replace('.h5', '')\n if verbose: print(f'Sampling from model saved at epoch {epoch}')\n \n model = load_model(model_path)\n \n generated_smi = []\n for n in range(n_sample):\n generated_smi.append(sample(model, temp, \n start_char, end_char, max_len+1, \n indices_token, token_indices))\n hp.save_obj(generated_smi, f'{save_path}{epoch}_{temp}')\n \n end = time.time()\n if verbose: print(f'SAMPLING DONE for model from epoch {epoch} in {end-start:.2f} seconds') \n ####################################\n "
] | [
[
"numpy.sum",
"numpy.asarray",
"numpy.exp",
"numpy.log",
"numpy.random.multinomial"
]
] |
JosmarSuarez/yolact | [
"43b694603638562ffcdc81df7b04783c9990291c"
] | [
"yolact.py"
] | [
"import torch, torchvision\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torchvision.models.resnet import Bottleneck\nimport numpy as np\nfrom itertools import product\nfrom math import sqrt\nfrom typing import List\nfrom collections import defaultdict\n\nfrom data.config import cfg, mask_type\nfrom layers import Detect\nfrom layers.interpolate import InterpolateModule\nfrom backbone import construct_backbone\n\nimport torch.backends.cudnn as cudnn\nfrom utils import timer\nfrom utils.functions import MovingAverage, make_net\n\n# This is required for Pytorch 1.0.1 on Windows to initialize Cuda on some driver versions.\n# See the bug report here: https://github.com/pytorch/pytorch/issues/17108\ntorch.cuda.current_device()\n\n# As of March 10, 2019, Pytorch DataParallel still doesn't support JIT Script Modules\nuse_jit = torch.cuda.device_count() <= 1\nif not use_jit:\n print('Multiple GPUs detected! Turning off JIT.')\n\nScriptModuleWrapper = torch.jit.ScriptModule if use_jit else nn.Module\nscript_method_wrapper = torch.jit.script_method if use_jit else lambda fn, _rcn=None: fn\n\n\n\nclass Concat(nn.Module):\n def __init__(self, nets, extra_params):\n super().__init__()\n\n self.nets = nn.ModuleList(nets)\n self.extra_params = extra_params\n \n def forward(self, x):\n # Concat each along the channel dimension\n return torch.cat([net(x) for net in self.nets], dim=1, **self.extra_params)\n\nprior_cache = defaultdict(lambda: None)\n\nclass PredictionModule(nn.Module):\n \"\"\"\n The (c) prediction module adapted from DSSD:\n https://arxiv.org/pdf/1701.06659.pdf\n\n Note that this is slightly different to the module in the paper\n because the Bottleneck block actually has a 3x3 convolution in\n the middle instead of a 1x1 convolution. Though, I really can't\n be arsed to implement it myself, and, who knows, this might be\n better.\n\n Args:\n - in_channels: The input feature size.\n - out_channels: The output feature size (must be a multiple of 4).\n - aspect_ratios: A list of lists of priorbox aspect ratios (one list per scale).\n - scales: A list of priorbox scales relative to this layer's convsize.\n For instance: If this layer has convouts of size 30x30 for\n an image of size 600x600, the 'default' (scale\n of 1) for this layer would produce bounding\n boxes with an area of 20x20px. If the scale is\n .5 on the other hand, this layer would consider\n bounding boxes with area 10x10px, etc.\n - parent: If parent is a PredictionModule, this module will use all the layers\n from parent instead of from this module.\n \"\"\"\n \n def __init__(self, in_channels, out_channels=1024, aspect_ratios=[[1]], scales=[1], parent=None, index=0):\n super().__init__()\n\n self.num_classes = cfg.num_classes\n self.mask_dim = cfg.mask_dim # Defined by Yolact\n self.num_priors = sum(len(x)*len(scales) for x in aspect_ratios)\n self.parent = [parent] # Don't include this in the state dict\n self.index = index\n self.num_heads = cfg.num_heads # Defined by Yolact\n\n if cfg.mask_proto_split_prototypes_by_head and cfg.mask_type == mask_type.lincomb:\n self.mask_dim = self.mask_dim // self.num_heads\n\n if cfg.mask_proto_prototypes_as_features:\n in_channels += self.mask_dim\n \n if parent is None:\n if cfg.extra_head_net is None:\n out_channels = in_channels\n else:\n self.upfeature, out_channels = make_net(in_channels, cfg.extra_head_net)\n\n if cfg.use_prediction_module:\n self.block = Bottleneck(out_channels, out_channels // 4)\n self.conv = nn.Conv2d(out_channels, out_channels, kernel_size=1, bias=True)\n self.bn = nn.BatchNorm2d(out_channels)\n\n self.bbox_layer = nn.Conv2d(out_channels, self.num_priors * 4, **cfg.head_layer_params)\n self.conf_layer = nn.Conv2d(out_channels, self.num_priors * self.num_classes, **cfg.head_layer_params)\n self.mask_layer = nn.Conv2d(out_channels, self.num_priors * self.mask_dim, **cfg.head_layer_params)\n \n if cfg.use_mask_scoring:\n self.score_layer = nn.Conv2d(out_channels, self.num_priors, **cfg.head_layer_params)\n\n if cfg.use_instance_coeff:\n self.inst_layer = nn.Conv2d(out_channels, self.num_priors * cfg.num_instance_coeffs, **cfg.head_layer_params)\n \n # What is this ugly lambda doing in the middle of all this clean prediction module code?\n def make_extra(num_layers):\n if num_layers == 0:\n return lambda x: x\n else:\n # Looks more complicated than it is. This just creates an array of num_layers alternating conv-relu\n return nn.Sequential(*sum([[\n nn.Conv2d(out_channels, out_channels, kernel_size=3, padding=1),\n nn.ReLU(inplace=True)\n ] for _ in range(num_layers)], []))\n\n self.bbox_extra, self.conf_extra, self.mask_extra = [make_extra(x) for x in cfg.extra_layers]\n \n if cfg.mask_type == mask_type.lincomb and cfg.mask_proto_coeff_gate:\n self.gate_layer = nn.Conv2d(out_channels, self.num_priors * self.mask_dim, kernel_size=3, padding=1)\n\n self.aspect_ratios = aspect_ratios\n self.scales = scales\n\n self.priors = None\n self.last_conv_size = None\n self.last_img_size = None\n\n def forward(self, x):\n \"\"\"\n Args:\n - x: The convOut from a layer in the backbone network\n Size: [batch_size, in_channels, conv_h, conv_w])\n\n Returns a tuple (bbox_coords, class_confs, mask_output, prior_boxes) with sizes\n - bbox_coords: [batch_size, conv_h*conv_w*num_priors, 4]\n - class_confs: [batch_size, conv_h*conv_w*num_priors, num_classes]\n - mask_output: [batch_size, conv_h*conv_w*num_priors, mask_dim]\n - prior_boxes: [conv_h*conv_w*num_priors, 4]\n \"\"\"\n # In case we want to use another module's layers\n src = self if self.parent[0] is None else self.parent[0]\n \n conv_h = x.size(2)\n conv_w = x.size(3)\n \n if cfg.extra_head_net is not None:\n x = src.upfeature(x)\n \n if cfg.use_prediction_module:\n # The two branches of PM design (c)\n a = src.block(x)\n \n b = src.conv(x)\n b = src.bn(b)\n b = F.relu(b)\n \n # TODO: Possibly switch this out for a product\n x = a + b\n\n bbox_x = src.bbox_extra(x)\n conf_x = src.conf_extra(x)\n mask_x = src.mask_extra(x)\n\n bbox = src.bbox_layer(bbox_x).permute(0, 2, 3, 1).contiguous().view(x.size(0), -1, 4)\n conf = src.conf_layer(conf_x).permute(0, 2, 3, 1).contiguous().view(x.size(0), -1, self.num_classes)\n \n if cfg.eval_mask_branch:\n mask = src.mask_layer(mask_x).permute(0, 2, 3, 1).contiguous().view(x.size(0), -1, self.mask_dim)\n else:\n mask = torch.zeros(x.size(0), bbox.size(1), self.mask_dim, device=bbox.device)\n\n if cfg.use_mask_scoring:\n score = src.score_layer(x).permute(0, 2, 3, 1).contiguous().view(x.size(0), -1, 1)\n\n if cfg.use_instance_coeff:\n inst = src.inst_layer(x).permute(0, 2, 3, 1).contiguous().view(x.size(0), -1, cfg.num_instance_coeffs) \n\n # See box_utils.decode for an explanation of this\n if cfg.use_yolo_regressors:\n bbox[:, :, :2] = torch.sigmoid(bbox[:, :, :2]) - 0.5\n bbox[:, :, 0] /= conv_w\n bbox[:, :, 1] /= conv_h\n\n if cfg.eval_mask_branch:\n if cfg.mask_type == mask_type.direct:\n mask = torch.sigmoid(mask)\n elif cfg.mask_type == mask_type.lincomb:\n mask = cfg.mask_proto_coeff_activation(mask)\n\n if cfg.mask_proto_coeff_gate:\n gate = src.gate_layer(x).permute(0, 2, 3, 1).contiguous().view(x.size(0), -1, self.mask_dim)\n mask = mask * torch.sigmoid(gate)\n\n if cfg.mask_proto_split_prototypes_by_head and cfg.mask_type == mask_type.lincomb:\n mask = F.pad(mask, (self.index * self.mask_dim, (self.num_heads - self.index - 1) * self.mask_dim), mode='constant', value=0)\n \n priors = self.make_priors(conv_h, conv_w, x.device)\n\n preds = { 'loc': bbox, 'conf': conf, 'mask': mask, 'priors': priors }\n\n if cfg.use_mask_scoring:\n preds['score'] = score\n\n if cfg.use_instance_coeff:\n preds['inst'] = inst\n \n return preds\n\n def make_priors(self, conv_h, conv_w, device):\n \"\"\" Note that priors are [x,y,width,height] where (x,y) is the center of the box. \"\"\"\n global prior_cache\n size = (conv_h, conv_w)\n\n with timer.env('makepriors'):\n if self.last_img_size != (cfg._tmp_img_w, cfg._tmp_img_h):\n prior_data = []\n\n # Iteration order is important (it has to sync up with the convout)\n for j, i in product(range(conv_h), range(conv_w)):\n # +0.5 because priors are in center-size notation\n x = (i + 0.5) / conv_w\n y = (j + 0.5) / conv_h\n \n for ars in self.aspect_ratios:\n for scale in self.scales:\n for ar in ars:\n if not cfg.backbone.preapply_sqrt:\n ar = sqrt(ar)\n\n if cfg.backbone.use_pixel_scales:\n w = scale * ar / cfg.max_size\n h = scale / ar / cfg.max_size\n else:\n w = scale * ar / conv_w\n h = scale / ar / conv_h\n \n # This is for backward compatability with a bug where I made everything square by accident\n if cfg.backbone.use_square_anchors:\n h = w\n\n prior_data += [x, y, w, h]\n\n self.priors = torch.Tensor(prior_data, device=device).view(-1, 4).detach()\n self.priors.requires_grad = False\n self.last_img_size = (cfg._tmp_img_w, cfg._tmp_img_h)\n self.last_conv_size = (conv_w, conv_h)\n prior_cache[size] = None\n elif self.priors.device != device:\n # This whole weird situation is so that DataParalell doesn't copy the priors each iteration\n if prior_cache[size] is None:\n prior_cache[size] = {}\n \n if device not in prior_cache[size]:\n prior_cache[size][device] = self.priors.to(device)\n\n self.priors = prior_cache[size][device]\n \n return self.priors\n\nclass FPN(ScriptModuleWrapper):\n \"\"\"\n Implements a general version of the FPN introduced in\n https://arxiv.org/pdf/1612.03144.pdf\n\n Parameters (in cfg.fpn):\n - num_features (int): The number of output features in the fpn layers.\n - interpolation_mode (str): The mode to pass to F.interpolate.\n - num_downsample (int): The number of downsampled layers to add onto the selected layers.\n These extra layers are downsampled from the last selected layer.\n\n Args:\n - in_channels (list): For each conv layer you supply in the forward pass,\n how many features will it have?\n \"\"\"\n __constants__ = ['interpolation_mode', 'num_downsample', 'use_conv_downsample', 'relu_pred_layers',\n 'lat_layers', 'pred_layers', 'downsample_layers', 'relu_downsample_layers']\n\n def __init__(self, in_channels):\n super().__init__()\n\n self.lat_layers = nn.ModuleList([\n nn.Conv2d(x, cfg.fpn.num_features, kernel_size=1)\n for x in reversed(in_channels)\n ])\n\n # This is here for backwards compatability\n padding = 1 if cfg.fpn.pad else 0\n self.pred_layers = nn.ModuleList([\n nn.Conv2d(cfg.fpn.num_features, cfg.fpn.num_features, kernel_size=3, padding=padding)\n for _ in in_channels\n ])\n\n if cfg.fpn.use_conv_downsample:\n self.downsample_layers = nn.ModuleList([\n nn.Conv2d(cfg.fpn.num_features, cfg.fpn.num_features, kernel_size=3, padding=1, stride=2)\n for _ in range(cfg.fpn.num_downsample)\n ])\n \n self.interpolation_mode = cfg.fpn.interpolation_mode\n self.num_downsample = cfg.fpn.num_downsample\n self.use_conv_downsample = cfg.fpn.use_conv_downsample\n self.relu_downsample_layers = cfg.fpn.relu_downsample_layers\n self.relu_pred_layers = cfg.fpn.relu_pred_layers\n\n @script_method_wrapper\n def forward(self, convouts:List[torch.Tensor]):\n \"\"\"\n Args:\n - convouts (list): A list of convouts for the corresponding layers in in_channels.\n Returns:\n - A list of FPN convouts in the same order as x with extra downsample layers if requested.\n \"\"\"\n\n out = []\n x = torch.zeros(1, device=convouts[0].device)\n for i in range(len(convouts)):\n out.append(x)\n\n # For backward compatability, the conv layers are stored in reverse but the input and output is\n # given in the correct order. Thus, use j=-i-1 for the input and output and i for the conv layers.\n j = len(convouts)\n for lat_layer in self.lat_layers:\n j -= 1\n\n if j < len(convouts) - 1:\n _, _, h, w = convouts[j].size()\n x = F.interpolate(x, size=(h, w), mode=self.interpolation_mode, align_corners=False)\n \n x = x + lat_layer(convouts[j])\n out[j] = x\n \n # This janky second loop is here because TorchScript.\n j = len(convouts)\n for pred_layer in self.pred_layers:\n j -= 1\n out[j] = pred_layer(out[j])\n\n if self.relu_pred_layers:\n F.relu(out[j], inplace=True)\n\n cur_idx = len(out)\n\n # In the original paper, this takes care of P6\n if self.use_conv_downsample:\n for downsample_layer in self.downsample_layers:\n out.append(downsample_layer(out[-1]))\n else:\n for idx in range(self.num_downsample):\n # Note: this is an untested alternative to out.append(out[-1][:, :, ::2, ::2]). Thanks TorchScript.\n out.append(nn.functional.max_pool2d(out[-1], 1, stride=2))\n\n if self.relu_downsample_layers:\n for idx in range(len(out) - cur_idx):\n out[idx] = F.relu(out[idx + cur_idx], inplace=False)\n\n return out\n\nclass FastMaskIoUNet(ScriptModuleWrapper):\n\n def __init__(self):\n super().__init__()\n input_channels = 1\n last_layer = [(cfg.num_classes-1, 1, {})]\n self.maskiou_net, _ = make_net(input_channels, cfg.maskiou_net + last_layer, include_last_relu=True)\n\n def forward(self, x):\n x = self.maskiou_net(x)\n maskiou_p = F.max_pool2d(x, kernel_size=x.size()[2:]).squeeze(-1).squeeze(-1)\n\n return maskiou_p\n\n\n\nclass Yolact(nn.Module):\n \"\"\"\n\n\n ██╗ ██╗ ██████╗ ██╗ █████╗ ██████╗████████╗\n ╚██╗ ██╔╝██╔═══██╗██║ ██╔══██╗██╔════╝╚══██╔══╝\n ╚████╔╝ ██║ ██║██║ ███████║██║ ██║ \n ╚██╔╝ ██║ ██║██║ ██╔══██║██║ ██║ \n ██║ ╚██████╔╝███████╗██║ ██║╚██████╗ ██║ \n ╚═╝ ╚═════╝ ╚══════╝╚═╝ ╚═╝ ╚═════╝ ╚═╝ \n\n\n You can set the arguments by changing them in the backbone config object in config.py.\n\n Parameters (in cfg.backbone):\n - selected_layers: The indices of the conv layers to use for prediction.\n - pred_scales: A list with len(selected_layers) containing tuples of scales (see PredictionModule)\n - pred_aspect_ratios: A list of lists of aspect ratios with len(selected_layers) (see PredictionModule)\n \"\"\"\n\n def __init__(self, only_last_layer=False):\n super().__init__()\n \n self.only_last_layer = only_last_layer\n self.backbone = construct_backbone(cfg.backbone)\n\n if cfg.freeze_bn:\n self.freeze_bn()\n\n # Compute mask_dim here and add it back to the config. Make sure Yolact's constructor is called early!\n if cfg.mask_type == mask_type.direct:\n cfg.mask_dim = cfg.mask_size**2\n elif cfg.mask_type == mask_type.lincomb:\n if cfg.mask_proto_use_grid:\n self.grid = torch.Tensor(np.load(cfg.mask_proto_grid_file))\n self.num_grids = self.grid.size(0)\n else:\n self.num_grids = 0\n\n self.proto_src = cfg.mask_proto_src\n \n if self.proto_src is None: in_channels = 3\n elif cfg.fpn is not None: in_channels = cfg.fpn.num_features\n else: in_channels = self.backbone.channels[self.proto_src]\n in_channels += self.num_grids\n\n # The include_last_relu=false here is because we might want to change it to another function\n self.proto_net, cfg.mask_dim = make_net(in_channels, cfg.mask_proto_net, include_last_relu=False)\n\n if cfg.mask_proto_bias:\n cfg.mask_dim += 1\n\n\n self.selected_layers = cfg.backbone.selected_layers\n src_channels = self.backbone.channels\n\n if cfg.use_maskiou:\n self.maskiou_net = FastMaskIoUNet()\n\n if cfg.fpn is not None:\n # Some hacky rewiring to accomodate the FPN\n self.fpn = FPN([src_channels[i] for i in self.selected_layers])\n self.selected_layers = list(range(len(self.selected_layers) + cfg.fpn.num_downsample))\n src_channels = [cfg.fpn.num_features] * len(self.selected_layers)\n\n\n self.prediction_layers = nn.ModuleList()\n cfg.num_heads = len(self.selected_layers)\n\n for idx, layer_idx in enumerate(self.selected_layers):\n # If we're sharing prediction module weights, have every module's parent be the first one\n parent = None\n if cfg.share_prediction_module and idx > 0:\n parent = self.prediction_layers[0]\n\n pred = PredictionModule(src_channels[layer_idx], src_channels[layer_idx],\n aspect_ratios = cfg.backbone.pred_aspect_ratios[idx],\n scales = cfg.backbone.pred_scales[idx],\n parent = parent,\n index = idx)\n self.prediction_layers.append(pred)\n\n # Extra parameters for the extra losses\n if cfg.use_class_existence_loss:\n # This comes from the smallest layer selected\n # Also note that cfg.num_classes includes background\n self.class_existence_fc = nn.Linear(src_channels[-1], cfg.num_classes - 1)\n \n if cfg.use_semantic_segmentation_loss:\n self.semantic_seg_conv = nn.Conv2d(src_channels[0], cfg.num_classes-1, kernel_size=1)\n\n # For use in evaluation\n self.detect = Detect(cfg.num_classes, bkg_label=0, top_k=cfg.nms_top_k,\n conf_thresh=cfg.nms_conf_thresh, nms_thresh=cfg.nms_thresh)\n\n def save_weights(self, path):\n \"\"\" Saves the model's weights using compression because the file sizes were getting too big. \"\"\"\n torch.save(self.state_dict(), path)\n \n def load_weights(self, path):\n \"\"\" Loads weights from a compressed save file. \"\"\"\n state_dict = torch.load(path)\n\n # For backward compatability, remove these (the new variable is called layers)\n for key in list(state_dict.keys()):\n if key.startswith('backbone.layer') and not key.startswith('backbone.layers'):\n del state_dict[key]\n \n # Also for backward compatibility with v1.0 weights, do this check\n if key.startswith('fpn.downsample_layers.'):\n if cfg.fpn is not None and int(key.split('.')[2]) >= cfg.fpn.num_downsample:\n del state_dict[key]\n # Uncomment this in normal conditions\n # self.load_state_dict(state_dict)\n # Added this for fine-tuning. Comment this in normal conditions.\n try:\n self.load_state_dict(state_dict)\n except RuntimeError as e:\n print('Ignoring \"' + str(e) + '\"')\n\n def init_weights(self, backbone_path):\n \"\"\" Initialize weights for training. \"\"\"\n # Initialize the backbone with the pretrained weights.\n self.backbone.init_backbone(backbone_path)\n\n conv_constants = getattr(nn.Conv2d(1, 1, 1), '__constants__')\n \n # Quick lambda to test if one list contains the other\n def all_in(x, y):\n for _x in x:\n if _x not in y:\n return False\n return True\n\n # Initialize the rest of the conv layers with xavier\n for name, module in self.named_modules():\n # See issue #127 for why we need such a complicated condition if the module is a WeakScriptModuleProxy\n # Broke in 1.3 (see issue #175), WeakScriptModuleProxy was turned into just ScriptModule.\n # Broke in 1.4 (see issue #292), where RecursiveScriptModule is the new star of the show.\n # Note that this might break with future pytorch updates, so let me know if it does\n is_script_conv = False\n if 'Script' in type(module).__name__:\n # 1.4 workaround: now there's an original_name member so just use that\n if hasattr(module, 'original_name'):\n is_script_conv = 'Conv' in module.original_name\n # 1.3 workaround: check if this has the same constants as a conv module\n else:\n is_script_conv = (\n all_in(module.__dict__['_constants_set'], conv_constants)\n and all_in(conv_constants, module.__dict__['_constants_set']))\n \n is_conv_layer = isinstance(module, nn.Conv2d) or is_script_conv\n\n if is_conv_layer and module not in self.backbone.backbone_modules:\n nn.init.xavier_uniform_(module.weight.data)\n\n if module.bias is not None:\n if cfg.use_focal_loss and 'conf_layer' in name:\n if not cfg.use_sigmoid_focal_loss:\n # Initialize the last layer as in the focal loss paper.\n # Because we use softmax and not sigmoid, I had to derive an alternate expression\n # on a notecard. Define pi to be the probability of outputting a foreground detection.\n # Then let z = sum(exp(x)) - exp(x_0). Finally let c be the number of foreground classes.\n # Chugging through the math, this gives us\n # x_0 = log(z * (1 - pi) / pi) where 0 is the background class\n # x_i = log(z / c) for all i > 0\n # For simplicity (and because we have a degree of freedom here), set z = 1. Then we have\n # x_0 = log((1 - pi) / pi) note: don't split up the log for numerical stability\n # x_i = -log(c) for all i > 0\n module.bias.data[0] = np.log((1 - cfg.focal_loss_init_pi) / cfg.focal_loss_init_pi)\n module.bias.data[1:] = -np.log(module.bias.size(0) - 1)\n else:\n module.bias.data[0] = -np.log(cfg.focal_loss_init_pi / (1 - cfg.focal_loss_init_pi))\n module.bias.data[1:] = -np.log((1 - cfg.focal_loss_init_pi) / cfg.focal_loss_init_pi)\n else:\n module.bias.data.zero_()\n \n def train(self, mode=True):\n super().train(mode)\n\n if cfg.freeze_bn:\n self.freeze_bn()\n\n def freeze_bn(self, enable=False):\n \"\"\" Adapted from https://discuss.pytorch.org/t/how-to-train-with-frozen-batchnorm/12106/8 \"\"\"\n for module in self.modules():\n if isinstance(module, nn.BatchNorm2d):\n module.train() if enable else module.eval()\n\n module.weight.requires_grad = enable\n module.bias.requires_grad = enable\n \n def forward(self, x):\n \"\"\" The input should be of size [batch_size, 3, img_h, img_w] \"\"\"\n _, _, img_h, img_w = x.size()\n cfg._tmp_img_h = img_h\n cfg._tmp_img_w = img_w\n \n with timer.env('backbone'):\n outs = self.backbone(x)\n\n if cfg.fpn is not None:\n with timer.env('fpn'):\n # Use backbone.selected_layers because we overwrote self.selected_layers\n outs = [outs[i] for i in cfg.backbone.selected_layers]\n outs = self.fpn(outs)\n\n proto_out = None\n if cfg.mask_type == mask_type.lincomb and cfg.eval_mask_branch:\n with timer.env('proto'):\n proto_x = x if self.proto_src is None else outs[self.proto_src]\n \n if self.num_grids > 0:\n grids = self.grid.repeat(proto_x.size(0), 1, 1, 1)\n proto_x = torch.cat([proto_x, grids], dim=1)\n\n proto_out = self.proto_net(proto_x)\n proto_out = cfg.mask_proto_prototype_activation(proto_out)\n\n if cfg.mask_proto_prototypes_as_features:\n # Clone here because we don't want to permute this, though idk if contiguous makes this unnecessary\n proto_downsampled = proto_out.clone()\n\n if cfg.mask_proto_prototypes_as_features_no_grad:\n proto_downsampled = proto_out.detach()\n \n # Move the features last so the multiplication is easy\n proto_out = proto_out.permute(0, 2, 3, 1).contiguous()\n\n if cfg.mask_proto_bias:\n bias_shape = [x for x in proto_out.size()]\n bias_shape[-1] = 1\n proto_out = torch.cat([proto_out, torch.ones(*bias_shape)], -1)\n\n\n with timer.env('pred_heads'):\n pred_outs = { 'loc': [], 'conf': [], 'mask': [], 'priors': [] }\n\n if cfg.use_mask_scoring:\n pred_outs['score'] = []\n\n if cfg.use_instance_coeff:\n pred_outs['inst'] = []\n \n for idx, pred_layer in zip(self.selected_layers, self.prediction_layers):\n pred_x = outs[idx]\n\n if cfg.mask_type == mask_type.lincomb and cfg.mask_proto_prototypes_as_features:\n # Scale the prototypes down to the current prediction layer's size and add it as inputs\n proto_downsampled = F.interpolate(proto_downsampled, size=outs[idx].size()[2:], mode='bilinear', align_corners=False)\n pred_x = torch.cat([pred_x, proto_downsampled], dim=1)\n\n # A hack for the way dataparallel works\n if cfg.share_prediction_module and pred_layer is not self.prediction_layers[0]:\n pred_layer.parent = [self.prediction_layers[0]]\n \n if self.only_last_layer:\n p = pred_layer(pred_x.detach())\n else:\n p = pred_layer(pred_x)\n \n for k, v in p.items():\n pred_outs[k].append(v)\n\n for k, v in pred_outs.items():\n pred_outs[k] = torch.cat(v, -2)\n\n if proto_out is not None:\n pred_outs['proto'] = proto_out\n\n if self.training:\n # For the extra loss functions\n if cfg.use_class_existence_loss:\n pred_outs['classes'] = self.class_existence_fc(outs[-1].mean(dim=(2, 3)))\n\n if cfg.use_semantic_segmentation_loss:\n pred_outs['segm'] = self.semantic_seg_conv(outs[0])\n\n return pred_outs\n else:\n if cfg.use_mask_scoring:\n pred_outs['score'] = torch.sigmoid(pred_outs['score'])\n\n if cfg.use_focal_loss:\n if cfg.use_sigmoid_focal_loss:\n # Note: even though conf[0] exists, this mode doesn't train it so don't use it\n pred_outs['conf'] = torch.sigmoid(pred_outs['conf'])\n if cfg.use_mask_scoring:\n pred_outs['conf'] *= pred_outs['score']\n elif cfg.use_objectness_score:\n # See focal_loss_sigmoid in multibox_loss.py for details\n objectness = torch.sigmoid(pred_outs['conf'][:, :, 0])\n pred_outs['conf'][:, :, 1:] = objectness[:, :, None] * F.softmax(pred_outs['conf'][:, :, 1:], -1)\n pred_outs['conf'][:, :, 0 ] = 1 - objectness\n else:\n pred_outs['conf'] = F.softmax(pred_outs['conf'], -1)\n else:\n\n if cfg.use_objectness_score:\n objectness = torch.sigmoid(pred_outs['conf'][:, :, 0])\n \n pred_outs['conf'][:, :, 1:] = (objectness > 0.10)[..., None] \\\n * F.softmax(pred_outs['conf'][:, :, 1:], dim=-1)\n \n else:\n pred_outs['conf'] = F.softmax(pred_outs['conf'], -1)\n\n return self.detect(pred_outs, self)\n\n\n\n\n# Some testing code\nif __name__ == '__main__':\n from utils.functions import init_console\n init_console()\n\n # Use the first argument to set the config if you want\n import sys\n if len(sys.argv) > 1:\n from data.config import set_cfg\n set_cfg(sys.argv[1])\n\n net = Yolact()\n net.train()\n net.init_weights(backbone_path='weights/' + cfg.backbone.path)\n\n # GPU\n net = net.cuda()\n torch.set_default_tensor_type('torch.cuda.FloatTensor')\n\n x = torch.zeros((1, 3, cfg.max_size, cfg.max_size))\n y = net(x)\n\n for p in net.prediction_layers:\n print(p.last_conv_size)\n\n print()\n for k, a in y.items():\n print(k + ': ', a.size(), torch.sum(a))\n exit()\n \n net(x)\n # timer.disable('pass2')\n avg = MovingAverage()\n try:\n while True:\n timer.reset()\n with timer.env('everything else'):\n net(x)\n avg.add(timer.total_time())\n print('\\033[2J') # Moves console cursor to 0,0\n timer.print_stats()\n print('Avg fps: %.2f\\tAvg ms: %.2f ' % (1/avg.get_avg(), avg.get_avg()*1000))\n except KeyboardInterrupt:\n pass\n"
] | [
[
"torch.nn.init.xavier_uniform_",
"torch.nn.functional.softmax",
"torch.set_default_tensor_type",
"numpy.log",
"torch.nn.ModuleList",
"torch.nn.Conv2d",
"torch.cat",
"torch.nn.BatchNorm2d",
"torch.nn.functional.pad",
"torch.cuda.device_count",
"torch.sigmoid",
"torch.Tensor",
"numpy.load",
"torch.ones",
"torch.load",
"torch.nn.functional.max_pool2d",
"torch.cuda.current_device",
"torch.sum",
"torch.nn.Linear",
"torch.nn.functional.relu",
"torch.zeros",
"torch.nn.ReLU",
"torch.nn.functional.interpolate"
]
] |
metabacchi/FuzzyClassificator | [
"f59c10364b872edce342403db6ef26e30d7f69b8"
] | [
"pybrain/tools/functions.py"
] | [
"__author__ = 'Tom Schaul, tom@idsia.ch'\n\nfrom scipy import array, exp, tanh, clip, log, dot, sqrt, power, pi, tan, diag, rand, real_if_close\nfrom scipy.linalg import inv, det, svd, logm, expm2\n\n\ndef semilinear(x):\n \"\"\" This function ensures that the values of the array are always positive. It is\n x+1 for x=>0 and exp(x) for x<0. \"\"\"\n try:\n # assume x is a numpy array\n shape = x.shape\n x.flatten()\n x = x.tolist()\n except AttributeError:\n # no, it wasn't: build shape from length of list\n shape = (1, len(x))\n def f(val):\n if val < 0:\n # exponential function for x<0\n return safeExp(val)\n else:\n # linear function for x>=0\n return val + 1.0\n return array(map(f, x)).reshape(shape)\n\n\ndef semilinearPrime(x):\n \"\"\" This function is the first derivative of the semilinear function (above).\n It is needed for the backward pass of the module. \"\"\"\n try:\n # assume x is a numpy array\n shape = x.shape\n x.flatten()\n x = x.tolist()\n except AttributeError:\n # no, it wasn't: build shape from length of list\n shape = (1, len(x))\n def f(val):\n if val < 0:\n # exponential function for x<0\n return safeExp(val)\n else:\n # linear function for x>=0\n return 1.0\n return array(map(f, x)).reshape(shape)\n\n\ndef safeExp(x):\n \"\"\" Bounded range for the exponential function (won't produce inf or NaN). \"\"\"\n return exp(clip(x, -500, 500))\n\n\ndef sigmoid(x):\n \"\"\" Logistic sigmoid function. \"\"\"\n return 1. / (1. + safeExp(-x))\n\n\ndef sigmoidPrime(x):\n \"\"\" Derivative of logistic sigmoid. \"\"\"\n tmp = sigmoid(x)\n return tmp * (1 - tmp)\n\n\ndef tanhPrime(x):\n \"\"\" Derivative of tanh. \"\"\"\n tmp = tanh(x)\n return 1 - tmp * tmp\n\n\ndef ranking(R):\n \"\"\" Produces a linear ranking of the values in R. \"\"\"\n l = sorted(list(enumerate(R)), cmp=lambda a, b: cmp(a[1], b[1]))\n l = sorted(list(enumerate(l)), cmp=lambda a, b: cmp(a[1], b[1]))\n return array(map(lambda kv: kv[0], l))\n\n\ndef expln(x):\n \"\"\" This continuous function ensures that the values of the array are always positive.\n It is ln(x+1)+1 for x >= 0 and exp(x) for x < 0. \"\"\"\n def f(val):\n if val < 0:\n # exponential function for x < 0\n return exp(val)\n else:\n # natural log function for x >= 0\n return log(val + 1.0) + 1\n try:\n result = array(map(f, x))\n except TypeError:\n result = array(f(x))\n\n return result\n\n\ndef explnPrime(x):\n \"\"\" This function is the first derivative of the expln function (above).\n It is needed for the backward pass of the module. \"\"\"\n def f(val):\n if val < 0:\n # exponential function for x<0\n return exp(val)\n else:\n # linear function for x>=0\n return 1.0 / (val + 1.0)\n try:\n result = array(map(f, x))\n except TypeError:\n result = array(f(x))\n\n return result\n\n\ndef multivariateNormalPdf(z, x, sigma):\n \"\"\" The pdf of a multivariate normal distribution (not in scipy).\n The sample z and the mean x should be 1-dim-arrays, and sigma a square 2-dim-array. \"\"\"\n assert len(z.shape) == 1 and len(x.shape) == 1 and len(x) == len(z) and sigma.shape == (len(x), len(z))\n tmp = -0.5 * dot(dot((z - x), inv(sigma)), (z - x))\n res = (1. / power(2.0 * pi, len(z) / 2.)) * (1. / sqrt(det(sigma))) * exp(tmp)\n return res\n\n\ndef simpleMultivariateNormalPdf(z, detFactorSigma):\n \"\"\" Assuming z has been transformed to a mean of zero and an identity matrix of covariances.\n Needs to provide the determinant of the factorized (real) covariance matrix. \"\"\"\n dim = len(z)\n return exp(-0.5 * dot(z, z)) / (power(2.0 * pi, dim / 2.) * detFactorSigma)\n\n\ndef multivariateCauchy(mu, sigma, onlyDiagonal=True):\n \"\"\" Generates a sample according to a given multivariate Cauchy distribution. \"\"\"\n if not onlyDiagonal:\n u, s, d = svd(sigma)\n coeffs = sqrt(s)\n else:\n coeffs = diag(sigma)\n r = rand(len(mu))\n res = coeffs * tan(pi * (r - 0.5))\n if not onlyDiagonal:\n res = dot(d, dot(res, u))\n return res + mu\n\n\ndef approxChiFunction(dim):\n \"\"\" Returns Chi (expectation of the length of a normal random vector)\n approximation according to: Ostermeier 1997. \"\"\"\n dim = float(dim)\n return sqrt(dim) * (1 - 1 / (4 * dim) + 1 / (21 * dim ** 2))\n\n\ndef sqrtm(M):\n \"\"\" Returns the symmetric semi-definite positive square root of a matrix. \"\"\"\n r = real_if_close(expm2(0.5 * logm(M)), 1e-8)\n return (r + r.T) / 2\n\n"
] | [
[
"scipy.sqrt",
"scipy.log",
"scipy.dot",
"scipy.exp",
"scipy.linalg.inv",
"scipy.tanh",
"scipy.clip",
"scipy.linalg.logm",
"scipy.tan",
"scipy.diag",
"scipy.power",
"scipy.linalg.det",
"scipy.linalg.svd"
]
] |
RQuispeC/opacus | [
"5c83d59fc169e93667946204f7a6859827a38ace"
] | [
"opacus/tests/ddp_hook_check.py"
] | [
"#!/usr/bin/env python3\n# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\nimport os\nimport sys\nimport unittest\n\nimport torch\nimport torch.distributed as dist\nimport torch.multiprocessing as mp\nimport torch.nn as nn\nimport torch.optim as optim\nfrom opacus import PrivacyEngine\nfrom opacus.distributed import DifferentiallyPrivateDistributedDataParallel as DPDDP\nfrom torch.nn.parallel import DistributedDataParallel as DDP\n\n\nPRIVACY_ALPHAS = [1 + x / 10.0 for x in range(1, 100)] + list(range(12, 64))\n\n\ndef setup_and_get_device(rank, world_size, nonce=0):\n \"\"\"\n Initialize the torch.distributed process group.\n If you run multiple groups in parallel or if you have zombie processes, you can add a nonce to avoid errors.\n \"\"\"\n device = 0\n if sys.platform == \"win32\":\n # Distributed package only covers collective communications with Gloo\n # backend and FileStore on Windows platform. Set init_method parameter\n # in init_process_group to a local file.\n # Example init_method=\"file:///f:/libtmp/some_file\"\n init_method = \"file:///{your local file path}\"\n\n # initialize the process group\n dist.init_process_group(\n \"gloo\", init_method=init_method, rank=rank, world_size=world_size\n )\n device = rank\n elif os.environ.get(\"SLURM_NTASKS\") is not None:\n # Running on a Slurm cluster\n os.environ[\"MASTER_ADDR\"] = \"127.0.0.1\"\n os.environ[\"MASTER_PORT\"] = str(7440 + nonce)\n local_rank = int(os.environ.get(\"SLURM_LOCALID\"))\n dist.init_process_group(backend=\"gloo\", rank=rank, world_size=world_size)\n\n # The device is the local rank (if you have 2 nodes with 8 GPUs each, you will have two \"cuda:0\" devices)\n device = local_rank\n else:\n os.environ[\"MASTER_ADDR\"] = \"localhost\"\n os.environ[\"MASTER_PORT\"] = \"12355\"\n\n os.environ[\"RANK\"] = str(rank)\n os.environ[\"WORLD_SIZE\"] = str(world_size)\n dist.init_process_group(\n init_method=\"env://\",\n backend=\"nccl\",\n )\n\n # Single node experiment\n device = rank\n return device\n\n\ndef cleanup():\n dist.destroy_process_group()\n\n\nclass ToyModel(nn.Module):\n def __init__(self):\n super(ToyModel, self).__init__()\n self.net1 = nn.Linear(10, 10)\n self.relu = nn.ReLU()\n self.net2 = nn.Linear(10, 5)\n\n def forward(self, x):\n return self.net2(self.relu(self.net1(x)))\n\n\ndef demo_basic(rank, world_size, weight, dp, noise_multiplier=0, max_grad_norm=1e8):\n # We don't want the 2 GPUs to work on the same examples/labels in parallel\n torch.manual_seed(rank)\n batch_size = 32\n withdp = \"with\" + (\"out \" if not dp else \"\")\n print(f\"Running basic DDP {withdp} differential privacy example on rank {rank}.\")\n\n device = setup_and_get_device(rank, world_size)\n\n # create model and move it to GPU with id rank\n model = ToyModel().to(device)\n print(f\"Initial weight: {model.net1.weight.data}\")\n\n # Freeze all the parameters except one, to ensure that the noise is the same\n # (the DDP hook does not browse the layers in the same order as the naive implementation)\n model.net1.bias.requires_grad = False\n model.net2.bias.requires_grad = False\n model.net2.weight.requires_grad = False\n\n if dp:\n ddp_model = DPDDP(model)\n engine = PrivacyEngine(\n ddp_model,\n batch_size=batch_size,\n sample_size=10 * batch_size,\n alphas=PRIVACY_ALPHAS,\n noise_multiplier=noise_multiplier,\n max_grad_norm=[max_grad_norm],\n )\n engine.random_number_generator = engine._set_seed(0)\n else:\n ddp_model = DDP(model, device_ids=[device])\n\n loss_fn = nn.MSELoss()\n optimizer = optim.SGD(ddp_model.parameters(), lr=1)\n if dp:\n engine.attach(optimizer)\n\n optimizer.zero_grad()\n labels = torch.randn(batch_size, 5).to(device)\n\n outputs = ddp_model(torch.randn(batch_size, 10).to(device))\n loss_fn(outputs, labels).backward()\n optimizer.step()\n\n weight.copy_(model.net1.weight.data.cpu())\n\n cleanup()\n\n\ndef demo_ddp_hook(rank, world_size, weight, dp, noise_multiplier, max_grad_norm):\n torch.manual_seed(rank)\n batch_size = 32\n withdp = \"with\" + (\"out \" if not dp else \"\")\n print(f\"Running DDP hook {withdp} differential privacy example on rank {rank}.\")\n\n device = setup_and_get_device(rank, world_size, nonce=1)\n\n # create model and move it to GPU with id rank\n model = ToyModel().to(device)\n\n model.net1.bias.requires_grad = False\n model.net2.bias.requires_grad = False\n model.net2.weight.requires_grad = False\n\n ddp_model = DDP(model, device_ids=[device])\n\n if dp:\n engine = PrivacyEngine(\n ddp_model,\n batch_size=batch_size,\n sample_size=10 * batch_size,\n alphas=PRIVACY_ALPHAS,\n noise_multiplier=noise_multiplier,\n max_grad_norm=[max_grad_norm],\n )\n engine.random_number_generator = engine._set_seed(0)\n\n loss_fn = nn.MSELoss()\n optimizer = optim.SGD(ddp_model.parameters(), lr=1)\n if dp:\n engine.attach(optimizer)\n\n optimizer.zero_grad()\n labels = torch.randn(batch_size, 5).to(device)\n\n outputs = ddp_model(torch.randn(batch_size, 10).to(device))\n loss_fn(outputs, labels).backward()\n optimizer.step()\n\n weight.copy_(model.net1.weight.data.cpu())\n\n del ddp_model\n cleanup()\n\n\ndef add_remove_ddp_hooks(\n rank, world_size, remaining_hooks, dp, noise_multiplier=0, max_grad_norm=1e8\n):\n device = setup_and_get_device(rank, world_size, nonce=2)\n\n model = ToyModel().to(device)\n ddp_model = nn.parallel.DistributedDataParallel(model, device_ids=[device])\n\n engine = PrivacyEngine(\n ddp_model,\n batch_size=1,\n sample_size=10,\n alphas=PRIVACY_ALPHAS,\n noise_multiplier=noise_multiplier,\n max_grad_norm=[max_grad_norm],\n )\n\n optimizer = optim.SGD(ddp_model.parameters(), lr=1)\n\n engine.attach(optimizer)\n\n remaining_hooks[\"attached\"] = {\n p: p._backward_hooks for p in engine.module.parameters() if p._backward_hooks\n }\n engine.detach()\n\n remaining_hooks[\"detached\"] = {\n p: p._backward_hooks for p in engine.module.parameters() if p._backward_hooks\n }\n\n cleanup()\n\n\ndef debug(rank, world_size, tensor, dp, noise_multiplier=0, max_grad_norm=1e8):\n local_rank = setup_and_get_device(rank, world_size)\n print(f\"Rank: {rank},World size: {world_size}, local_rank: {local_rank}\")\n tensor = tensor.to(local_rank)\n print(f\"dp: {dp}\")\n print(tensor)\n\n cleanup()\n\n\ndef run_function(local_function, tensor, dp, noise_multiplier=0, max_grad_norm=1e8):\n if os.environ.get(\"SLURM_NTASKS\") is not None:\n world_size = int(os.environ.get(\"SLURM_NTASKS\"))\n rank = int(os.environ.get(\"SLURM_PROCID\"))\n print(f\"Running on a Slurm cluster with {world_size} tasks.\")\n\n local_function(rank, world_size, tensor, dp, noise_multiplier, max_grad_norm)\n else:\n world_size = torch.cuda.device_count()\n print(f\"Spawning multiple processes on a local machine with {world_size} GPUs\")\n\n # The rank will be passed as the first argument\n mp.spawn(\n local_function,\n args=(\n world_size,\n tensor,\n dp,\n noise_multiplier,\n max_grad_norm,\n ),\n nprocs=world_size,\n join=True,\n )\n return world_size\n\n\nclass GradientComputationTest(unittest.TestCase):\n def test_connection(self):\n tensor = torch.zeros(10, 10)\n world_size = run_function(debug, tensor, dp=True)\n\n self.assertTrue(\n world_size >= 2, f\"Need at least 2 gpus but was provided only {world_size}.\"\n )\n\n def test_gradient_noclip_zeronoise(self):\n # Tests that gradient is the same with DP or with DDP\n weight_dp, weight_nodp = torch.zeros(10, 10), torch.zeros(10, 10)\n\n run_function(demo_basic, weight_dp, dp=True)\n run_function(demo_basic, weight_nodp, dp=False)\n\n self.assertTrue(torch.norm(weight_dp - weight_nodp) < 1e-7)\n\n def test_ddp_hook(self):\n # Tests that the DDP hook does the same thing as naive aggregation with per layer clipping\n weight_ddp_naive, weight_ddp_hook = torch.zeros(10, 10), torch.zeros(10, 10)\n\n run_function(\n demo_basic,\n weight_ddp_naive,\n dp=True,\n noise_multiplier=0.1,\n max_grad_norm=1.0,\n )\n\n run_function(\n demo_ddp_hook,\n weight_ddp_hook,\n dp=True,\n noise_multiplier=0.1,\n max_grad_norm=1.0,\n )\n\n self.assertTrue(\n torch.norm(weight_ddp_naive - weight_ddp_hook) < 1e-7,\n f\"DDP naive: {weight_ddp_naive}\\nDDP hook: {weight_ddp_hook}\",\n )\n\n def test_add_remove_ddp_hooks(self):\n\n remaining_hooks = {\n \"attached\": None,\n \"detached\": None,\n }\n\n run_function(\n add_remove_ddp_hooks,\n remaining_hooks,\n dp=True,\n noise_multiplier=0.1,\n max_grad_norm=1.0,\n )\n\n assert remaining_hooks[\"attached\"], \"There are no hooks.\"\n\n assert not remaining_hooks[\n \"detached\"\n ], f\"Some hooks remain after .remove_hooks(): {remaining_hooks}\"\n"
] | [
[
"torch.nn.Linear",
"torch.nn.MSELoss",
"torch.multiprocessing.spawn",
"torch.randn",
"torch.manual_seed",
"torch.distributed.init_process_group",
"torch.cuda.device_count",
"torch.norm",
"torch.nn.parallel.DistributedDataParallel",
"torch.zeros",
"torch.distributed.destroy_process_group",
"torch.nn.ReLU"
]
] |
scpepper69/ml | [
"13ad41dd7b22d3fa152cf3665fc4dc7c1c747917"
] | [
"mnist/app/app/mnist.py"
] | [
"from datetime import datetime\nimport cv2\nimport re\nimport base64\nfrom flask import Flask, render_template, request, jsonify\nfrom flask_cors import CORS\nimport numpy as np\n\nfrom io import BytesIO\nfrom PIL import Image, ImageOps\nimport os,sys\nimport requests\nfrom graphpipe import remote\nfrom matplotlib import pylab as plt\n\n\napp = Flask(__name__)\nCORS(app) # To Post by Ajax\n\n@app.route('/', methods=['GET', 'POST'])\ndef index():\n if request.method == 'POST':\n ans,t1,t2,t3 = get_answer(request)\n return jsonify({'ans': ans, 't1': t1, 't2': t2, 't3': t3})\n else:\n return render_template('index.html')\n\ndef result(img):\n img = img.reshape(1, 784)\n img = img.astype(np.float32)\n img = np.multiply(img, 1.0 / 255.0)\n pred = remote.execute(\"http://localhost:9001\", img)\n r = np.argmax(pred, axis=1)\n pp = pred*100\n top1 = str(np.argsort(-pp)[0][0])+ \" (\" +str(int(np.sort(-pp)[0][0]*-1))+\"%)\"\n top2 = str(np.argsort(-pp)[0][1])+ \" (\" +str(int(np.sort(-pp)[0][1]*-1))+\"%)\"\n top3 = str(np.argsort(-pp)[0][2])+ \" (\" +str(int(np.sort(-pp)[0][2]*-1))+\"%)\"\n# return int(r)\n return r,top1,top2,top3\n\ndef get_answer(req):\n img_str = re.search(r'base64,(.*)', req.form['img']).group(1)\n nparr = np.fromstring(base64.b64decode(img_str), np.uint8)\n img_src = cv2.imdecode(nparr, cv2.IMREAD_COLOR)\n img_negaposi = 255 - img_src\n img_gray = cv2.cvtColor(img_negaposi, cv2.COLOR_BGR2GRAY)\n img_resize = cv2.resize(img_gray,(28,28))\n cv2.imwrite(f\"images/{datetime.now().strftime('%s')}.jpg\",img_resize)\n ans,t1,t2,t3 = result(img_resize)\n return int(ans),t1,t2,t3\n\nif __name__ == \"__main__\":\n app.run(debug=False, host='0.0.0.0', port=8001)\n"
] | [
[
"numpy.sort",
"numpy.multiply",
"numpy.argsort",
"numpy.argmax"
]
] |
Manish-rai21bit/deep_learning_for_camera_trap_images | [
"f9d9fd50824ece4743b39d5136f67235871cc0ef"
] | [
"phase2_recognition_only/architectures/vgg.py"
] | [
"import tensorflow as tf\nimport common\n\ndef inference(x, num_output, wd, dropout_rate, is_training, transfer_mode= False, model_type= 'A'):\n # Create tables describing VGG configurations A, B, D, E\n if model_type == 'A':\n config = [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M']\n elif model_type == 'B':\n config = [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M']\n elif model_type == 'D':\n config = [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M']\n elif model_type == 'E':\n config = [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M']\n else:\n print('Unknown model type: ' + model_type + ' | Please specify a modelType A or B or D or E')\n \n network= x\n\n for k,v in enumerate(config):\n if v == 'M':\n network= common.maxPool(network, 2, 2)\n else: \n with tf.variable_scope('conv'+str(k)):\n network = common.spatialConvolution(network, 3, 1, v, wd= wd)\n network = tf.nn.relu(network)\n\n network= common.flatten(network)\n\n with tf.variable_scope('fc1'): \n network = common.fullyConnected(network, 4096, wd= wd)\n network = tf.nn.relu(network)\n network = common.batchNormalization(network, is_training= is_training)\n network = tf.nn.dropout(network, dropout_rate)\n with tf.variable_scope('fc2'):\n network = common.fullyConnected(network, 4096, wd= wd)\n network = tf.nn.relu(network)\n network = common.batchNormalization(network, is_training= is_training)\n network = tf.nn.dropout(network, dropout_rate)\n if not transfer_mode:\n with tf.variable_scope('output'):\n network = common.fullyConnected(network, num_output, wd= wd)\n else:\n with tf.variable_scope('transfer_output'):\n network = common.fullyConnected(network, num_output, wd= wd)\n\n return network\n"
] | [
[
"tensorflow.nn.relu",
"tensorflow.variable_scope",
"tensorflow.nn.dropout"
]
] |
innovator-zero/Python | [
"f776eb081c6688c2f5a98b0050b33582c1769391"
] | [
"kmeans/fish.py"
] | [
"import numpy as np\nimport random\nimport matplotlib.pyplot as plt\n\npoints=np.loadtxt('points.txt')\nherring_r = np.loadtxt('distribution.txt')\nherring=np.zeros((802,350))\nfor i in range(350):\n for j in range(802):\n herring[j,349-i]=herring_r[i,j]\n\n# s=np.zeros(10)\n#\n# for i in range(10):\n# x=int(round(points[i,0]))-1\n# y=int(round(points[i,1]))\n#\n# for xx in range(x-11,x+12):\n# for yy in range(y-11,y+12):\n# if herring[xx,yy]>0:\n# s[i]+=herring[xx,yy]\n#\n# f = open('fish_count.txt', 'w')\n# for i in range(10):\n# f.write(str(s[i])+'\\n')\n# f.close()\ns=0\nfor i in range(802):\n for j in range(350):\n if herring[i,j]>0:\n s+=herring[i,j]\n\nprint(s)\n\n\n"
] | [
[
"numpy.zeros",
"numpy.loadtxt"
]
] |
luoyi1hao/ACRN_Chest_X-ray_IA | [
"b2ecaf88e6b1bb59101fd2d611bf9d1e6716367a",
"b2ecaf88e6b1bb59101fd2d611bf9d1e6716367a"
] | [
"acregnet/train_acregnet.py",
"acregnet/data.py"
] | [
"from data import DataHandler\nfrom models import ACRegNet\nimport tensorflow as tf\nfrom utils import get_random_batch, read_config_file, create_dir\n\n\nRUN_IN_GPU = False\n\n\ndef train_acregnet_model(config):\n tf.reset_default_graph()\n tf_config = tf.ConfigProto()\n\n if RUN_IN_GPU:\n tf_config.gpu_options.allow_growth = True\n\n sess = tf.Session(config=tf_config)\n\n train_ims, _ = DataHandler.load_images(config['train_ims_file'])\n train_lbs, _ = DataHandler.load_labels(config['train_lbs_file'])\n print('Loading training data...done')\n\n acregnet = ACRegNet(sess, config, 'ACRegNet', is_train=True)\n print('Building AC-RegNet model...done')\n\n print('Training...')\n for i in range(config['iterations']):\n batch_ims_x, batch_ims_y, batch_lbs_x, batch_lbs_y = get_random_batch(\n train_ims, config['batch_size'], train_lbs)\n cur_loss = acregnet.fit(\n batch_ims_x, batch_ims_y, batch_lbs_x, batch_lbs_y)\n print('Iteration {:>8d}/{}: Loss: {}'.format(\n i + 1, config['iterations'], cur_loss))\n\n acregnet.save(config['ckpt_dir'])\n print('Saving current AC-RegNet model...done')\n\n print('Training...done')\n\n tf.reset_default_graph()\n sess.close()\n\n\nif __name__ == \"__main__\":\n config = read_config_file('./config/JSRT/ACRegNet.cfg')\n create_dir(config['ckpt_dir'])\n train_acregnet_model(config)\n",
"import os\nimport numpy as np\nfrom sklearn.model_selection import train_test_split\nimport cv2\n\n\nclass DataHandler(object):\n\n def _load_data(im_fnames, add_channel_dim=True):\n im0 = cv2.imread(im_fnames[0], 0)\n im_batch = np.zeros((len(im_fnames),) + im0.shape)\n im_batch[0] = im0\n for i, fname in enumerate(im_fnames[1:], 1):\n im_batch[i] = cv2.imread(fname, 0)\n\n if add_channel_dim:\n return np.expand_dims(im_batch, axis=-1)\n\n return im_batch\n\n @staticmethod\n def load_images(_file, normalize=True):\n im_fnames = list(np.loadtxt(_file, dtype='str'))\n im_batch = DataHandler._load_data(im_fnames).astype(np.float32)\n\n if normalize:\n im_batch = im_batch / 255.\n\n return im_batch, im_fnames\n\n @staticmethod\n def load_labels(_file):\n lb_fnames = list(np.loadtxt(_file, dtype='str'))\n lb_batch = DataHandler._load_data(lb_fnames).astype(np.int32)\n\n cur_labels = np.unique(lb_batch)\n new_labels = range(np.unique(lb_batch).shape[0])\n if not np.array_equal(cur_labels, new_labels):\n for cur_l, new_l in zip(cur_labels, new_labels):\n lb_batch[lb_batch == cur_l] = new_l\n\n return lb_batch, lb_fnames\n\n @staticmethod\n def train_test_split(data_dir, out_dir,\n test_size=0.2, seed=1):\n data_fnames = [\n os.path.join(data_dir, f) for f in sorted(os.listdir(data_dir))]\n\n train_fnames, test_fnames = train_test_split(\n data_fnames, test_size, True, seed)\n\n np.savetxt(os.path.join(out_dir, 'train_fnames'),\n np.array(train_fnames), fmt='%s')\n np.savetxt(os.path.join(out_dir, 'test_fnames'),\n np.array(test_fnames), fmt='%s')\n\n @staticmethod\n def train_valid_test_split(data_dir, out_dir, valid_size=0.1,\n test_size=0.2, seed=1):\n data_fnames = [\n os.path.join(data_dir, f) for f in sorted(os.listdir(data_dir))]\n\n train_fnames, test_fnames = train_test_split(\n data_fnames, test_size, True, seed)\n train_fnames, valid_fnames = train_test_split(\n train_fnames, valid_size/(1 - test_size), False, seed + 1)\n\n np.savetxt(os.path.join(out_dir, 'train_fnames'),\n np.array(train_fnames), fmt='%s')\n np.savetxt(os.path.join(out_dir, 'valid_fnames'),\n np.array(valid_fnames), fmt='%s')\n np.savetxt(os.path.join(out_dir, 'test_fnames'),\n np.array(test_fnames), fmt='%s')\n"
] | [
[
"tensorflow.reset_default_graph",
"tensorflow.ConfigProto",
"tensorflow.Session"
],
[
"numpy.unique",
"numpy.expand_dims",
"numpy.array_equal",
"numpy.array",
"sklearn.model_selection.train_test_split",
"numpy.loadtxt"
]
] |
NNstorm/MinkowskiEngine | [
"443b37a58c379b2482b5d160d9e874b356b4bf2f"
] | [
"examples/classification_modelnet40.py"
] | [
"# Copyright (c) 2020 NVIDIA CORPORATION.\n# Copyright (c) 2018-2020 Chris Choy (chrischoy@ai.stanford.edu).\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy of\n# this software and associated documentation files (the \"Software\"), to deal in\n# the Software without restriction, including without limitation the rights to\n# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies\n# of the Software, and to permit persons to whom the Software is furnished to do\n# so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n#\n# Please cite \"4D Spatio-Temporal ConvNets: Minkowski Convolutional Neural\n# Networks\", CVPR'19 (https://arxiv.org/abs/1904.08755) if you use any part\n# of the code.\nimport argparse\nimport sklearn.metrics as metrics\nimport numpy as np\n\nimport torch\nimport torch.nn as nn\nimport torch.utils.data\nfrom torch.utils.data import DataLoader\nimport torch.optim as optim\nimport torch.nn.functional as F\n\nimport MinkowskiEngine as ME\nfrom examples.pointnet import (\n PointNet,\n MinkowskiPointNet,\n CoordinateTransformation,\n ModelNet40H5,\n stack_collate_fn,\n minkowski_collate_fn,\n)\nfrom examples.common import seed_all\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--voxel_size\", type=float, default=0.05)\nparser.add_argument(\"--max_steps\", type=int, default=100000)\nparser.add_argument(\"--val_freq\", type=int, default=1000)\nparser.add_argument(\"--batch_size\", default=32, type=int)\nparser.add_argument(\"--lr\", default=1e-1, type=float)\nparser.add_argument(\"--weight_decay\", type=float, default=1e-4)\nparser.add_argument(\"--num_workers\", type=int, default=2)\nparser.add_argument(\"--stat_freq\", type=int, default=100)\nparser.add_argument(\"--weights\", type=str, default=\"modelnet.pth\")\nparser.add_argument(\"--seed\", type=int, default=777)\nparser.add_argument(\"--translation\", type=float, default=0.2)\nparser.add_argument(\"--test_translation\", type=float, default=0.0)\nparser.add_argument(\n \"--network\",\n type=str,\n choices=[\"pointnet\", \"minkpointnet\", \"minkfcnn\", \"minksplatfcnn\"],\n default=\"minkfcnn\",\n)\n\n\nclass MinkowskiFCNN(ME.MinkowskiNetwork):\n def __init__(\n self,\n in_channel,\n out_channel,\n embedding_channel=1024,\n channels=(32, 48, 64, 96, 128),\n D=3,\n ):\n ME.MinkowskiNetwork.__init__(self, D)\n\n self.network_initialization(\n in_channel,\n out_channel,\n channels=channels,\n embedding_channel=embedding_channel,\n kernel_size=3,\n D=D,\n )\n self.weight_initialization()\n\n def get_mlp_block(self, in_channel, out_channel):\n return nn.Sequential(\n ME.MinkowskiLinear(in_channel, out_channel, bias=False),\n ME.MinkowskiBatchNorm(out_channel),\n ME.MinkowskiLeakyReLU(),\n )\n\n def get_conv_block(self, in_channel, out_channel, kernel_size, stride):\n return nn.Sequential(\n ME.MinkowskiConvolution(\n in_channel,\n out_channel,\n kernel_size=kernel_size,\n stride=stride,\n dimension=self.D,\n ),\n ME.MinkowskiBatchNorm(out_channel),\n ME.MinkowskiLeakyReLU(),\n )\n\n def network_initialization(\n self,\n in_channel,\n out_channel,\n channels,\n embedding_channel,\n kernel_size,\n D=3,\n ):\n self.mlp1 = self.get_mlp_block(in_channel, channels[0])\n self.conv1 = self.get_conv_block(\n channels[0],\n channels[1],\n kernel_size=kernel_size,\n stride=1,\n )\n self.conv2 = self.get_conv_block(\n channels[1],\n channels[2],\n kernel_size=kernel_size,\n stride=2,\n )\n\n self.conv3 = self.get_conv_block(\n channels[2],\n channels[3],\n kernel_size=kernel_size,\n stride=2,\n )\n\n self.conv4 = self.get_conv_block(\n channels[3],\n channels[4],\n kernel_size=kernel_size,\n stride=2,\n )\n self.conv5 = nn.Sequential(\n self.get_conv_block(\n channels[1] + channels[2] + channels[3] + channels[4],\n embedding_channel // 4,\n kernel_size=3,\n stride=2,\n ),\n self.get_conv_block(\n embedding_channel // 4,\n embedding_channel // 2,\n kernel_size=3,\n stride=2,\n ),\n self.get_conv_block(\n embedding_channel // 2,\n embedding_channel,\n kernel_size=3,\n stride=2,\n ),\n )\n\n self.pool = ME.MinkowskiMaxPooling(kernel_size=3, stride=2, dimension=D)\n\n self.global_max_pool = ME.MinkowskiGlobalMaxPooling()\n self.global_avg_pool = ME.MinkowskiGlobalAvgPooling()\n\n self.final = nn.Sequential(\n self.get_mlp_block(embedding_channel * 2, 512),\n ME.MinkowskiDropout(),\n self.get_mlp_block(512, 512),\n ME.MinkowskiLinear(512, out_channel, bias=True),\n )\n\n # No, Dropout, last 256 linear, AVG_POOLING 92%\n\n def weight_initialization(self):\n for m in self.modules():\n if isinstance(m, ME.MinkowskiConvolution):\n ME.utils.kaiming_normal_(m.kernel, mode=\"fan_out\", nonlinearity=\"relu\")\n\n if isinstance(m, ME.MinkowskiBatchNorm):\n nn.init.constant_(m.bn.weight, 1)\n nn.init.constant_(m.bn.bias, 0)\n\n def forward(self, x: ME.TensorField):\n x = self.mlp1(x)\n y = x.sparse()\n\n y = self.conv1(y)\n y1 = self.pool(y)\n\n y = self.conv2(y1)\n y2 = self.pool(y)\n\n y = self.conv3(y2)\n y3 = self.pool(y)\n\n y = self.conv4(y3)\n y4 = self.pool(y)\n\n x1 = y1.slice(x)\n x2 = y2.slice(x)\n x3 = y3.slice(x)\n x4 = y4.slice(x)\n\n x = ME.cat(x1, x2, x3, x4)\n\n y = self.conv5(x.sparse())\n x1 = self.global_max_pool(y)\n x2 = self.global_avg_pool(y)\n\n return self.final(ME.cat(x1, x2)).F\n\n\nclass GlobalMaxAvgPool(torch.nn.Module):\n def __init__(self):\n torch.nn.Module.__init__(self)\n self.global_max_pool = ME.MinkowskiGlobalMaxPooling()\n self.global_avg_pool = ME.MinkowskiGlobalAvgPooling()\n\n def forward(self, tensor):\n x = self.global_max_pool(tensor)\n y = self.global_avg_pool(tensor)\n return ME.cat(x, y)\n\n\nclass MinkowskiSplatFCNN(MinkowskiFCNN):\n def __init__(\n self,\n in_channel,\n out_channel,\n embedding_channel=1024,\n channels=(32, 48, 64, 96, 128),\n D=3,\n ):\n MinkowskiFCNN.__init__(\n self, in_channel, out_channel, embedding_channel, channels, D\n )\n\n def forward(self, x: ME.TensorField):\n x = self.mlp1(x)\n y = x.splat()\n\n y = self.conv1(y)\n y1 = self.pool(y)\n\n y = self.conv2(y1)\n y2 = self.pool(y)\n\n y = self.conv3(y2)\n y3 = self.pool(y)\n\n y = self.conv4(y3)\n y4 = self.pool(y)\n\n x1 = y1.interpolate(x)\n x2 = y2.interpolate(x)\n x3 = y3.interpolate(x)\n x4 = y4.interpolate(x)\n\n x = ME.cat(x1, x2, x3, x4)\n y = self.conv5(x.sparse())\n\n x1 = self.global_max_pool(y)\n x2 = self.global_avg_pool(y)\n\n return self.final(ME.cat(x1, x2)).F\n\n\nSTR2NETWORK = dict(\n pointnet=PointNet,\n minkpointnet=MinkowskiPointNet,\n minkfcnn=MinkowskiFCNN,\n minksplatfcnn=MinkowskiSplatFCNN,\n)\n\n\ndef create_input_batch(batch, is_minknet, device=\"cuda\", quantization_size=0.05):\n if is_minknet:\n batch[\"coordinates\"][:, 1:] = batch[\"coordinates\"][:, 1:] / quantization_size\n return ME.TensorField(\n coordinates=batch[\"coordinates\"],\n features=batch[\"features\"],\n device=device,\n )\n else:\n return batch[\"coordinates\"].permute(0, 2, 1).to(device)\n\n\nclass CoordinateTranslation:\n def __init__(self, translation):\n self.trans = translation\n\n def __call__(self, coords):\n if self.trans > 0:\n coords += np.random.uniform(low=-self.trans, high=self.trans, size=[1, 3])\n return coords\n\n\ndef make_data_loader(phase, is_minknet, config):\n assert phase in [\"train\", \"val\", \"test\"]\n is_train = phase == \"train\"\n dataset = ModelNet40H5(\n phase=phase,\n transform=CoordinateTransformation(trans=config.translation)\n if is_train\n else CoordinateTranslation(config.test_translation),\n data_root=\"modelnet40_ply_hdf5_2048\",\n )\n return DataLoader(\n dataset,\n num_workers=config.num_workers,\n shuffle=is_train,\n collate_fn=minkowski_collate_fn if is_minknet else stack_collate_fn,\n batch_size=config.batch_size,\n )\n\n\ndef test(net, device, config, phase=\"val\"):\n is_minknet = isinstance(net, ME.MinkowskiNetwork)\n data_loader = make_data_loader(\n \"test\",\n is_minknet,\n config=config,\n )\n\n net.eval()\n labels, preds = [], []\n with torch.no_grad():\n for batch in data_loader:\n input = create_input_batch(\n batch,\n is_minknet,\n device=device,\n quantization_size=config.voxel_size,\n )\n logit = net(input)\n pred = torch.argmax(logit, 1)\n labels.append(batch[\"labels\"].cpu().numpy())\n preds.append(pred.cpu().numpy())\n torch.cuda.empty_cache()\n return metrics.accuracy_score(np.concatenate(labels), np.concatenate(preds))\n\n\ndef criterion(pred, labels, smoothing=True):\n \"\"\"Calculate cross entropy loss, apply label smoothing if needed.\"\"\"\n\n labels = labels.contiguous().view(-1)\n if smoothing:\n eps = 0.2\n n_class = pred.size(1)\n\n one_hot = torch.zeros_like(pred).scatter(1, labels.view(-1, 1), 1)\n one_hot = one_hot * (1 - eps) + (1 - one_hot) * eps / (n_class - 1)\n log_prb = F.log_softmax(pred, dim=1)\n\n loss = -(one_hot * log_prb).sum(dim=1).mean()\n else:\n loss = F.cross_entropy(pred, labels, reduction=\"mean\")\n\n return loss\n\n\ndef train(net, device, config):\n is_minknet = isinstance(net, ME.MinkowskiNetwork)\n optimizer = optim.SGD(\n net.parameters(),\n lr=config.lr,\n momentum=0.9,\n weight_decay=config.weight_decay,\n )\n scheduler = optim.lr_scheduler.CosineAnnealingLR(\n optimizer,\n T_max=config.max_steps,\n )\n print(optimizer)\n print(scheduler)\n\n train_iter = iter(make_data_loader(\"train\", is_minknet, config))\n best_metric = 0\n net.train()\n for i in range(config.max_steps):\n optimizer.zero_grad()\n try:\n data_dict = train_iter.next()\n except StopIteration:\n train_iter = iter(make_data_loader(\"train\", is_minknet, config))\n data_dict = train_iter.next()\n input = create_input_batch(\n data_dict, is_minknet, device=device, quantization_size=config.voxel_size\n )\n logit = net(input)\n loss = criterion(logit, data_dict[\"labels\"].to(device))\n loss.backward()\n optimizer.step()\n scheduler.step()\n torch.cuda.empty_cache()\n\n if i % config.stat_freq == 0:\n print(f\"Iter: {i}, Loss: {loss.item():.3e}\")\n\n if i % config.val_freq == 0 and i > 0:\n torch.save(\n {\n \"state_dict\": net.state_dict(),\n \"optimizer\": optimizer.state_dict(),\n \"scheduler\": scheduler.state_dict(),\n \"curr_iter\": i,\n },\n config.weights,\n )\n accuracy = test(net, device, config, phase=\"val\")\n if best_metric < accuracy:\n best_metric = accuracy\n print(f\"Validation accuracy: {accuracy}. Best accuracy: {best_metric}\")\n net.train()\n\n\nif __name__ == \"__main__\":\n config = parser.parse_args()\n seed_all(config.seed)\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n print(\"===================ModelNet40 Dataset===================\")\n print(f\"Training with translation {config.translation}\")\n print(f\"Evaluating with translation {config.test_translation}\")\n print(\"=============================================\\n\\n\")\n\n net = STR2NETWORK[config.network](\n in_channel=3, out_channel=40, embedding_channel=1024\n ).to(device)\n print(\"===================Network===================\")\n print(net)\n print(\"=============================================\\n\\n\")\n\n train(net, device, config)\n accuracy = test(net, device, config, phase=\"test\")\n print(f\"Test accuracy: {accuracy}\")\n"
] | [
[
"torch.optim.lr_scheduler.CosineAnnealingLR",
"torch.utils.data.DataLoader",
"torch.nn.functional.log_softmax",
"torch.cuda.empty_cache",
"numpy.random.uniform",
"torch.nn.Module.__init__",
"torch.nn.init.constant_",
"torch.argmax",
"torch.zeros_like",
"torch.no_grad",
"torch.cuda.is_available",
"torch.nn.functional.cross_entropy",
"numpy.concatenate"
]
] |
brunomarct/archetypal | [
"ce8daf4e18ef3ec92967e5d6837b392199caf83b"
] | [
"archetypal/schedule.py"
] | [
"################################################################################\n# Module: schedule.py\n# Description: Functions for handling conversion of EnergyPlus schedule objects\n# License: MIT, see full license in LICENSE.txt\n# Web: https://github.com/samuelduchesne/archetypal\n################################################################################\n\nimport functools\nimport io\nimport logging as lg\nfrom datetime import datetime, timedelta\n\nimport archetypal\nimport numpy as np\nimport pandas as pd\nfrom archetypal import log\n\n\nclass Schedule(object):\n \"\"\"An object designed to handle any EnergyPlys schedule object\"\"\"\n\n def __init__(self, sch_name, idf=None, start_day_of_the_week=0,\n strict=False, base_year=2018, schType=None, **kwargs):\n \"\"\"\n\n Args:\n idf (IDF): IDF object\n sch_name (str): The schedule name in the idf file\n start_day_of_the_week (int): 0-based day of week (Monday=0)\n strict (bool): if True, schedules that have the Field-Sets such\n as Holidays and CustomDay will raise an error if they are absent\n from the IDF file. If False, any missing qualifiers will be\n ignored.\n base_year (int): The base year of the schedule. Defaults to 2018\n since the first day of that year is a Monday.\n \"\"\"\n super(Schedule, self).__init__(**kwargs)\n self.strict = strict\n self.idf = idf\n self.schName = sch_name\n self.startDayOfTheWeek = self.get_sdow(start_day_of_the_week)\n self.year = base_year\n self.startDate = self.start_date()\n self.count = 0\n self.startHOY = 1\n self.endHOY = 24\n self.unit = \"unknown\"\n\n self.index_ = None\n self.values = None\n self.schType = schType\n _type = kwargs.get('Type', None)\n if _type is None:\n self.schTypeLimitsName = self.get_schedule_type_limits_name(\n sch_type=self.schType)\n else:\n self.schTypeLimitsName = _type\n\n @classmethod\n def constant_schedule(cls, hourly_value=1, Name='AlwaysOn', **kwargs):\n idftxt = \"VERSION, 8.9;\" # Not an emplty string. has just the\n # version number\n # we can make a file handle of a string\n fhandle = io.StringIO(idftxt)\n # initialize the IDF object with the file handle\n idf_scratch = archetypal.IDF(fhandle)\n\n idf_scratch.add_object(ep_object='Schedule:Constant'.upper(),\n **dict(Name=Name,\n Schedule_Type_Limits_Name='',\n Hourly_Value=hourly_value),\n save=False)\n\n sched = Schedule(sch_name=Name, idf=idf_scratch, **kwargs)\n return sched\n\n @property\n def all_values(self):\n \"\"\"returns the values array\"\"\"\n if self.values is None:\n self.values = self.get_schedule_values(sch_name=self.schName,\n sch_type=self.schType)\n return self.values\n else:\n return self.values\n\n @property\n def max(self):\n return max(self.all_values)\n\n @property\n def min(self):\n return min(self.all_values)\n\n @property\n def mean(self):\n return np.mean(self.all_values)\n\n @property\n def series(self):\n \"\"\"Returns the schedule values as a pd.Series object with a\n DateTimeIndex\"\"\"\n index = pd.date_range(start=self.startDate, periods=len(\n self.all_values), freq='1H')\n return pd.Series(self.all_values, index=index)\n\n def get_schedule_type_limits_name(self, sch_name=None, sch_type=None):\n \"\"\"Return the Schedule Type Limits name associated to a schedule\n name\"\"\"\n if sch_name is None:\n sch_name = self.schName\n if sch_type is None:\n schedule_values = self.idf.get_schedule_data_by_name(sch_name,\n sch_type=sch_type)\n try:\n schedule_limit_name = schedule_values.Schedule_Type_Limits_Name\n except:\n return 'unknown'\n else:\n return schedule_limit_name\n\n def get_schedule_type_limits_data(self, sch_name=None):\n \"\"\"Returns Schedule Type Limits data from schedule name\"\"\"\n\n if sch_name is None:\n sch_name = self.schName\n\n schedule_values = self.idf.get_schedule_data_by_name(sch_name)\n try:\n schedule_limit_name = schedule_values.Schedule_Type_Limits_Name\n except:\n # this schedule is probably a 'Schedule:Week:Daily' which does\n # not have a Schedule_Type_Limits_Name field\n return '', '', '', ''\n else:\n lower_limit, upper_limit, numeric_type, unit_type = \\\n self.idf.get_schedule_type_limits_data_by_name(\n schedule_limit_name)\n\n self.unit = unit_type\n if self.unit == \"unknown\":\n self.unit = numeric_type\n\n return lower_limit, upper_limit, numeric_type, unit_type\n\n def get_schedule_type(self, sch_name=None):\n \"\"\"Return the schedule type\"\"\"\n if sch_name is None:\n sch_name = self.schName\n\n schedule_values = self.idf.get_schedule_data_by_name(sch_name)\n sch_type = schedule_values.fieldvalues[0]\n\n return sch_type\n\n def start_date(self):\n \"\"\"The start date of the schedule. Satisfies `startDayOfTheWeek`\"\"\"\n import calendar\n c = calendar.Calendar(firstweekday=self.startDayOfTheWeek)\n start_date = c.monthdatescalendar(self.year, 1)[0][0]\n return datetime(start_date.year, start_date.month, start_date.day)\n\n def plot(self, slice=None, **kwargs):\n hourlyvalues = self.all_values\n index = pd.date_range(self.startDate, periods=len(\n hourlyvalues),\n freq='1H')\n series = pd.Series(hourlyvalues, index=index, dtype=float)\n if slice is None:\n slice = pd.IndexSlice[:]\n elif len(slice) > 1:\n slice = pd.IndexSlice[slice[0]:slice[1]]\n ax = series.loc[slice].plot(**kwargs, label=self.schName)\n return ax\n\n def get_interval_day_ep_schedule_values(self, sch_name=None):\n \"\"\"'Schedule:Day:Interval\"\"\"\n\n if sch_name is None:\n sch_name = self.schName\n\n values = self.idf.getobject('Schedule:Day:Interval'.upper(), sch_name)\n lower_limit, upper_limit, numeric_type, unit_type = \\\n self.get_schedule_type_limits_data(sch_name)\n\n number_of_day_sch = int((len(values.fieldvalues) - 3) / 2)\n\n hourly_values = np.arange(24)\n start_hour = 0\n for i in range(number_of_day_sch):\n value = float(values['Value_Until_Time_{}'.format(i + 1)])\n until_time = [int(s.strip()) for s in\n values['Time_{}'.format(i + 1)].split(\":\") if\n s.strip().isdigit()]\n end_hour = int(until_time[0] + until_time[1] / 60)\n for hour in range(start_hour, end_hour):\n hourly_values[hour] = value\n\n start_hour = end_hour\n\n if numeric_type.strip().lower() == \"discrete\":\n hourly_values = hourly_values.astype(int)\n\n return hourly_values\n\n def get_hourly_day_ep_schedule_values(self, sch_name=None):\n \"\"\"'Schedule:Day:Hourly'\"\"\"\n if sch_name is None:\n sch_name = self.schName\n\n values = self.idf.getobject('Schedule:Day:Hourly'.upper(), sch_name)\n\n fieldvalues_ = np.array(values.fieldvalues[3:])\n\n return fieldvalues_\n\n def get_compact_weekly_ep_schedule_values(self, sch_name=None,\n start_date=None, index=None):\n \"\"\"'schedule:week:compact'\"\"\"\n if start_date is None:\n start_date = self.startDate\n if index is None:\n idx = pd.date_range(start=start_date, periods=168, freq='1H')\n slicer_ = pd.Series([False] * (len(idx)), index=idx)\n else:\n slicer_ = pd.Series([False] * (len(index)), index=index)\n\n if sch_name is None:\n sch_name = self.schName\n values = self.idf.getobject('schedule:week:compact'.upper(), sch_name)\n\n weekly_schedules = pd.Series([0] * len(slicer_), index=slicer_.index)\n # update last day of schedule\n\n if self.count == 0:\n self.schType = values.key\n self.endHOY = 168\n\n num_of_daily_schedules = int(len(values.fieldvalues[2:]) / 2)\n\n for i in range(num_of_daily_schedules):\n day_type = values['DayType_List_{}'.format(i + 1)].lower()\n how = self.field_set(day_type, slicer_)\n if not weekly_schedules.loc[how].empty:\n # Loop through days and replace with day:schedule values\n days = []\n for name, day in weekly_schedules.loc[how].groupby(pd.Grouper(\n freq='D')):\n if not day.empty:\n ref = values.get_referenced_object(\n \"ScheduleDay_Name_{}\".format(i + 1))\n day.loc[:] = self.get_schedule_values(\n sch_name=ref.Name, sch_type=ref.key)\n days.append(day)\n new = pd.concat(days)\n slicer_.update(\n pd.Series([True] * len(new.index), index=new.index))\n slicer_ = slicer_.apply(lambda x: x == True)\n weekly_schedules.update(new)\n else:\n return weekly_schedules.values\n\n return weekly_schedules.values\n\n def get_daily_weekly_ep_schedule_values(self, sch_name=None):\n \"\"\"'schedule:week:daily'\"\"\"\n if sch_name is None:\n sch_name = self.schName\n\n values = self.idf.getobject('schedule:week:daily'.upper(), sch_name)\n\n # 7 list for 7 days of the week\n hourly_values = []\n for day in ['Monday', 'Tuesday', 'Wednesday', 'Thursday',\n 'Friday', 'Saturday', 'Sunday']:\n ref = values.get_referenced_object(\n '{}_ScheduleDay_Name'.format(day))\n h = self.get_schedule_values(sch_name=ref.Name, sch_type=ref.key)\n hourly_values.append(h)\n hourly_values = np.array(hourly_values)\n # shift days earlier by self.startDayOfTheWeek\n hourly_values = np.roll(hourly_values, -self.startDayOfTheWeek, axis=0)\n\n return hourly_values.ravel()\n\n def get_list_day_ep_schedule_values(self, sch_name=None):\n \"\"\"'schedule:day:list'\"\"\"\n if sch_name is None:\n sch_name = self.schName\n\n values = self.idf.getobject('schedule:day:list'.upper(), sch_name)\n lower_limit, upper_limit, numeric_type, unit_type = \\\n self.get_schedule_type_limits_data(sch_name)\n\n import pandas as pd\n freq = int(values['Minutes_per_Item']) # Frequency of the values\n num_values = values.fieldvalues[5:] # List of values\n method = values['Interpolate_to_Timestep'] # How to resample\n\n # fill a list of available values and pad with zeros (this is safer\n # but should not occur)\n all_values = np.arange(int(24 * 60 / freq))\n for i in all_values:\n try:\n all_values[i] = num_values[i]\n except:\n all_values[i] = 0\n # create a fake index to help us with the resampling\n index = pd.date_range(start=self.startDate,\n periods=(24 * 60) / freq,\n freq='{}T'.format(freq))\n series = pd.Series(all_values, index=index)\n\n # resample series to hourly values and apply resampler function\n series = series.resample('1H').apply(_how(method))\n\n return series.values\n\n def get_constant_ep_schedule_values(self, sch_name=None):\n \"\"\"'schedule:constant'\"\"\"\n if sch_name is None:\n sch_name = self.schName\n\n values = self.idf.getobject('schedule:constant'.upper(), sch_name)\n lower_limit, upper_limit, numeric_type, unit_type = \\\n self.get_schedule_type_limits_data(sch_name)\n\n hourly_values = np.arange(8760)\n value = float(values['Hourly_Value'])\n for hour in hourly_values:\n hourly_values[hour] = value\n\n if numeric_type.strip().lower() == 'discrete':\n hourly_values = hourly_values.astype(int)\n\n return hourly_values\n\n def get_file_ep_schedule_values(self, sch_name=None):\n \"\"\"'schedule:file'\"\"\"\n if sch_name is None:\n sch_name = self.schName\n\n values = self.idf.getobject('schedule:file'.upper(), sch_name)\n lower_limit, upper_limit, numeric_type, unit_type = \\\n self.get_schedule_type_limits_data(sch_name)\n\n filename = values['File_Name']\n column = values['Column_Number']\n rows = values['Rows_to_Skip_at_Top']\n hours = values['Number_of_Hours_of_Data']\n sep = values['Column_Separator']\n interp = values['Interpolate_to_Timestep']\n\n import pandas as pd\n import os\n idfdir = os.path.dirname(self.idf.idfname)\n file = os.path.join(idfdir, filename)\n delimeter = _separator(sep)\n skip_rows = int(rows) - 1 # We want to keep the column\n col = [int(column) - 1] # zero-based\n values = pd.read_csv(file, delimiter=delimeter, skiprows=skip_rows,\n usecols=col)\n\n return values.iloc[:, 0].values\n\n def get_compact_ep_schedule_values(self, sch_name=None):\n \"\"\"'schedule:compact'\"\"\"\n\n if sch_name is None:\n sch_name = self.schName\n\n values = self.idf.getobject('schedule:compact'.upper(), sch_name)\n lower_limit, upper_limit, numeric_type, unit_type = \\\n self.get_schedule_type_limits_data(sch_name)\n\n field_sets = ['through', 'for', 'interpolate', 'until', 'value']\n fields = values.fieldvalues[3:]\n\n index = pd.date_range(start=self.startDate, periods=8760, freq='H')\n zeros = np.zeros(len(index))\n\n slicer_ = pd.Series([False] * len(index), index=index)\n series = pd.Series(zeros, index=index)\n\n from_day = self.startDate\n ep_from_day = datetime(self.year, 1, 1)\n from_time = '00:00'\n how_interpolate = None\n for field in fields:\n if any([spe in field.lower() for spe in field_sets]):\n f_set, hour, minute, value = self.field_interpreter(field)\n\n if f_set.lower() == 'through':\n # main condition. All sub-conditions must obey a\n # `Through` condition\n\n # First, initialize the slice (all False for now)\n through_conditions = self.invalidate_condition(series)\n\n # reset from_time\n from_time = '00:00'\n\n # Prepare ep_to_day variable\n ep_to_day = self.date_field_interpretation(value) + \\\n timedelta(days=1)\n\n # Calculate Timedelta in days\n days = (ep_to_day - ep_from_day).days\n # Add timedelta to start_date\n to_day = from_day + timedelta(days=days) + timedelta(\n hours=-1)\n\n # slice the conditions with the range and apply True\n through_conditions.loc[from_day:to_day] = True\n\n from_day = to_day + timedelta(hours=1)\n ep_from_day = ep_to_day\n elif f_set.lower() == 'for':\n # slice specific days\n # reset from_time\n from_time = '00:00'\n\n for_condition = self.invalidate_condition(series)\n values = value.split()\n if len(values) > 1:\n # if multiple `For`. eg.: For: Weekends Holidays,\n # Combine both conditions\n for value in values:\n if value.lower() == 'allotherdays':\n # Apply condition to slice\n how = self.field_set(value, slicer_)\n # Reset though condition\n through_conditions = how\n for_condition = how\n else:\n how = self.field_set(value, slicer_)\n for_condition.loc[how] = True\n elif value.lower() == 'allotherdays':\n # Apply condition to slice\n how = self.field_set(value, slicer_)\n # Reset though condition\n through_conditions = how\n for_condition = how\n else:\n # Apply condition to slice\n how = self.field_set(value)\n for_condition.loc[how] = True\n\n # Combine the for_condition with all_conditions\n all_conditions = through_conditions & for_condition\n\n # update in memory slice\n # self.sliced_day_.loc[all_conditions] = True\n elif 'interpolate' in f_set.lower():\n # we need to upsample to series to 8760 * 60 values\n new_idx = pd.date_range(start=self.startDate,\n periods=525600, closed='left',\n freq='T')\n series = series.resample('T').pad()\n series = series.reindex(new_idx)\n series.fillna(method='pad', inplace=True)\n through_conditions = through_conditions.resample('T').pad()\n through_conditions = through_conditions.reindex(new_idx)\n through_conditions.fillna(method='pad', inplace=True)\n for_condition = for_condition.resample('T').pad()\n for_condition = for_condition.reindex(new_idx)\n for_condition.fillna(method='pad', inplace=True)\n how_interpolate = value.lower()\n elif f_set.lower() == 'until':\n until_condition = self.invalidate_condition(series)\n if series.index.freq.name == 'T':\n # until_time = str(int(hour) - 1) + ':' + minute\n until_time = timedelta(hours=int(hour),\n minutes=int(minute)) - timedelta(\n minutes=1)\n\n else:\n until_time = str(int(hour) - 1) + ':' + minute\n until_condition.loc[until_condition.between_time(from_time,\n str(\n until_time)).index] = True\n all_conditions = for_condition & through_conditions & \\\n until_condition\n\n from_time = str(int(hour)) + ':' + minute\n elif f_set.lower() == 'value':\n # If the therm `Value: ` field is used, we will catch it\n # here.\n # update in memory slice\n slicer_.loc[all_conditions] = True\n series[all_conditions] = value\n else:\n # Do something here before looping to the next Field\n pass\n else:\n # If the term `Value: ` is not used; the variable is simply\n # passed in the Field\n value = float(field)\n series[all_conditions] = value\n\n # update in memory slice\n slicer_.loc[all_conditions] = True\n if how_interpolate:\n return series.resample('H').mean().values\n else:\n return series.values\n\n def field_interpreter(self, field):\n \"\"\"dealing with a Field-Set (Through, For, Interpolate,\n # Until, Value) and return the parsed string\"\"\"\n\n if 'through' in field.lower():\n # deal with through\n if ':' in field.lower():\n # parse colon\n f_set, statement = field.split(':')\n hour = None\n minute = None\n value = statement.strip()\n else:\n msg = 'The schedule \"{sch}\" contains a Field ' \\\n 'that is not understood: \"{field}\"'.format(\n sch=self.schName, field=field)\n raise NotImplementedError(msg)\n elif 'for' in field.lower():\n if ':' in field.lower():\n # parse colon\n f_set, statement = field.split(':')\n value = statement.strip()\n hour = None\n minute = None\n else:\n # parse without a colon\n msg = 'The schedule \"{sch}\" contains a Field ' \\\n 'that is not understood: \"{field}\"'.format(\n sch=self.schName, field=field)\n raise NotImplementedError(msg)\n elif 'interpolate' in field.lower():\n msg = 'The schedule \"{sch}\" contains sub-hourly values (' \\\n 'Field-Set=\"{field}\"). The average over the hour is ' \\\n 'taken'.format(sch=self.schName, field=field)\n log(msg, lg.WARNING)\n f_set, value = field.split(':')\n hour = None\n minute = None\n elif 'until' in field.lower():\n if ':' in field.lower():\n # parse colon\n try:\n f_set, hour, minute = field.split(':')\n hour = hour.strip() # remove trailing spaces\n minute = minute.strip() # remove trailing spaces\n value = None\n except:\n f_set = 'until'\n hour, minute = field.split(':')\n hour = hour[-2:].strip()\n minute = minute.strip()\n value = None\n else:\n msg = 'The schedule \"{sch}\" contains a Field ' \\\n 'that is not understood: \"{field}\"'.format(\n sch=self.schName, field=field)\n raise NotImplementedError(msg)\n elif 'value' in field.lower():\n if ':' in field.lower():\n # parse colon\n f_set, statement = field.split(':')\n value = statement.strip()\n hour = None\n minute = None\n else:\n msg = 'The schedule \"{sch}\" contains a Field ' \\\n 'that is not understood: \"{field}\"'.format(\n sch=self.schName, field=field)\n raise NotImplementedError(msg)\n else:\n # deal with the data value\n f_set = field\n hour = None\n minute = None\n value = field[len(field) + 1:].strip()\n\n return f_set, hour, minute, value\n\n @staticmethod\n def invalidate_condition(series):\n index = series.index\n periods = len(series)\n return pd.Series([False] * periods, index=index)\n\n def get_yearly_ep_schedule_values(self, sch_name=None):\n \"\"\"'schedule:year'\"\"\"\n # first week\n\n start_date = self.startDate\n idx = pd.date_range(start=start_date, periods=8760, freq='1H')\n hourly_values = pd.Series([0] * 8760, index=idx)\n\n # update last day of schedule\n self.endHOY = 8760\n\n if sch_name is None:\n sch_name = self.schName\n\n values = self.idf.getobject('schedule:year'.upper(), sch_name)\n\n # generate weekly schedules\n num_of_weekly_schedules = int(len(values.fieldvalues[3:]) / 5)\n\n for i in range(num_of_weekly_schedules):\n ref = values.get_referenced_object(\n 'ScheduleWeek_Name_{}'.format(i + 1))\n\n start_month = values['Start_Month_{}'.format(i + 1)]\n end_month = values['End_Month_{}'.format(i + 1)]\n start_day = values['Start_Day_{}'.format(i + 1)]\n end_day = values['End_Day_{}'.format(i + 1)]\n\n start = datetime.strptime(\n '{}/{}/{}'.format(self.year, start_month, start_day),\n '%Y/%m/%d')\n end = datetime.strptime(\n '{}/{}/{}'.format(self.year, end_month, end_day),\n '%Y/%m/%d')\n days = (end - start).days + 1\n\n end_date = start_date + timedelta(days=days) + timedelta(hours=23)\n how = pd.IndexSlice[start_date:end_date]\n\n weeks = []\n for name, week in hourly_values.loc[how].groupby(\n pd.Grouper(freq='168H')):\n if not week.empty:\n try:\n week.loc[:] = self.get_schedule_values(\n sch_name=ref.Name, start_date=week.index[0],\n index=week.index, sch_type=ref.key)\n except ValueError:\n week.loc[:] = self.get_schedule_values(\n ref.Name, week.index[0])[0:len(week)]\n finally:\n weeks.append(week)\n new = pd.concat(weeks)\n hourly_values.update(new)\n start_date += timedelta(days=days)\n\n return hourly_values.values\n\n def get_schedule_values(self, sch_name=None, start_date=None, index=None,\n sch_type=None):\n \"\"\"Main function that returns the schedule values\n\n Args:\n sch_type:\n index:\n start_date:\n \"\"\"\n\n if sch_name is None:\n sch_name = self.schName\n\n if sch_type is None:\n schedule_values = self.idf.get_schedule_data_by_name(sch_name)\n self.schType = schedule_values.key.upper()\n sch_type = self.schType\n if self.count == 0:\n # This is the first time, get the schedule type and the type limits.\n self.schTypeLimitsName = self.get_schedule_type_limits_name()\n self.count += 1\n\n if sch_type.upper() == \"schedule:year\".upper():\n hourly_values = self.get_yearly_ep_schedule_values(\n sch_name)\n elif sch_type.upper() == \"schedule:day:interval\".upper():\n hourly_values = self.get_interval_day_ep_schedule_values(\n sch_name)\n elif sch_type.upper() == \"schedule:day:hourly\".upper():\n hourly_values = self.get_hourly_day_ep_schedule_values(\n sch_name)\n elif sch_type.upper() == \"schedule:day:list\".upper():\n hourly_values = self.get_list_day_ep_schedule_values(\n sch_name)\n elif sch_type.upper() == \"schedule:week:compact\".upper():\n hourly_values = self.get_compact_weekly_ep_schedule_values(\n sch_name, start_date, index)\n elif sch_type.upper() == \"schedule:week:daily\".upper():\n hourly_values = self.get_daily_weekly_ep_schedule_values(\n sch_name)\n elif sch_type.upper() == \"schedule:constant\".upper():\n hourly_values = self.get_constant_ep_schedule_values(\n sch_name)\n elif sch_type.upper() == \"schedule:compact\".upper():\n hourly_values = self.get_compact_ep_schedule_values(\n sch_name)\n elif sch_type.upper() == \"schedule:file\".upper():\n hourly_values = self.get_file_ep_schedule_values(\n sch_name)\n else:\n log('Archetypal does not support \"{}\" currently'.format(\n self.schType), lg.WARNING)\n\n hourly_values = []\n\n return hourly_values\n\n def is_schedule(self, sch_name):\n \"\"\"Returns True if idfobject is one of 'schedule_types'\"\"\"\n if sch_name.upper() in self.idf.schedules_dict:\n return True\n else:\n return False\n\n def to_year_week_day(self):\n \"\"\"convert a Schedule Class to the 'Schedule:Year',\n 'Schedule:Week:Daily' and 'Schedule:Day:Hourly' representation\n\n Returns:\n 'Schedule:Year', list of ['Schedule:Week:Daily'],\n list of ['Schedule:Day:Hourly']\n \"\"\"\n\n full_year = np.array(self.all_values) # array of shape (8760,)\n values = full_year.reshape(-1, 24) # shape (365, 24)\n\n # create unique days\n unique_days, nds = np.unique(values, axis=0, return_inverse=True)\n\n ep_days = []\n dict_day = {}\n count_day = 0\n for unique_day in unique_days:\n name = 'd_' + self.schName + '_' + '%03d' % count_day\n name, count_day = archetypal.check_unique_name('d', count_day,\n name,\n archetypal.settings.unique_schedules,\n suffix=True)\n dict_day[name] = unique_day\n\n archetypal.settings.unique_schedules.append(name)\n\n # Create idf_objects for schedule:day:hourly\n ep_day = self.idf.add_object(\n ep_object='Schedule:Day:Hourly'.upper(),\n save=False,\n **dict(Name=name,\n Schedule_Type_Limits_Name=self.schType,\n **{'Hour_{}'.format(i + 1): unique_day[i]\n for i in range(24)})\n )\n ep_days.append(ep_day)\n\n # create unique weeks from unique days\n unique_weeks, nwsi, nws, count = np.unique(\n full_year[:364 * 24, ...].reshape(-1, 168), return_index=True,\n axis=0, return_inverse=True, return_counts=True)\n\n # Appending unique weeks in dictionary with name and values of weeks as\n # keys\n # {'name_week': {'dayName':[]}}\n dict_week = {}\n count_week = 0\n for unique_week in unique_weeks:\n week_id = 'w_' + self.schName + '_' + '%03d' % count_week\n week_id, count_week = archetypal.check_unique_name('w',\n count_week,\n week_id,\n archetypal.settings.unique_schedules,\n suffix=True)\n archetypal.settings.unique_schedules.append(week_id)\n\n dict_week[week_id] = {}\n for i in list(range(0, 7)):\n day_of_week = unique_week[..., i * 24:(i + 1) * 24]\n for key in dict_day:\n if (day_of_week == dict_day[key]).all():\n dict_week[week_id]['day_{}'.format(i)] = key\n\n # Create idf_objects for schedule:week:daily\n list_day_of_week = ['Sunday', 'Monday', 'Tuesday',\n 'Wednesday', 'Thursday', 'Friday', 'Saturday']\n ordered_day_n = np.array([6, 0, 1, 2, 3, 4, 5])\n ordered_day_n = np.roll(ordered_day_n, self.startDayOfTheWeek)\n ep_weeks = []\n for week_id in dict_week:\n ep_week = self.idf.add_object(\n ep_object='Schedule:Week:Daily'.upper(),\n save=False,\n **dict(Name=week_id,\n **{'{}_ScheduleDay_Name'.format(\n weekday): dict_week[week_id][\n 'day_{}'.format(i)] for\n i, weekday in\n zip(ordered_day_n, list_day_of_week)\n },\n Holiday_ScheduleDay_Name=\n dict_week[week_id]['day_6'],\n SummerDesignDay_ScheduleDay_Name=\n dict_week[week_id]['day_1'],\n WinterDesignDay_ScheduleDay_Name=\n dict_week[week_id]['day_1'],\n CustomDay1_ScheduleDay_Name=\n dict_week[week_id]['day_2'],\n CustomDay2_ScheduleDay_Name=\n dict_week[week_id]['day_5'])\n )\n ep_weeks.append(ep_week)\n\n import itertools\n blocks = {}\n from_date = datetime(self.year, 1, 1)\n bincount = [sum(1 for _ in group)\n for key, group in itertools.groupby(nws + 1) if key]\n week_order = {i: v for i, v in enumerate(np.array(\n [key for key, group in itertools.groupby(nws + 1) if key]) - 1)}\n for i, (week_n, count) in enumerate(\n zip(week_order, bincount)):\n week_id = list(dict_week)[week_order[i]]\n to_date = from_date + timedelta(days=int(count * 7), hours=-1)\n blocks[i] = {}\n blocks[i]['week_id'] = week_id\n blocks[i]['from_day'] = from_date.day\n blocks[i]['end_day'] = to_date.day\n blocks[i]['from_month'] = from_date.month\n blocks[i]['end_month'] = to_date.month\n from_date = to_date + timedelta(hours=1)\n\n # If this is the last block, force end of year\n if i == len(bincount) - 1:\n blocks[i]['end_day'] = 31\n blocks[i]['end_month'] = 12\n\n new_dict = dict(Name=self.schName + '_',\n Schedule_Type_Limits_Name=self.schTypeLimitsName)\n for i in blocks:\n new_dict.update({\"ScheduleWeek_Name_{}\".format(i + 1):\n blocks[i]['week_id'],\n \"Start_Month_{}\".format(i + 1):\n blocks[i]['from_month'],\n \"Start_Day_{}\".format(i + 1):\n blocks[i]['from_day'],\n \"End_Month_{}\".format(i + 1):\n blocks[i]['end_month'],\n \"End_Day_{}\".format(i + 1):\n blocks[i]['end_day']})\n\n ep_year = self.idf.add_object(ep_object='Schedule:Year'.upper(),\n save=False, **new_dict)\n return ep_year, ep_weeks, ep_days\n\n def date_field_interpretation(self, field):\n \"\"\"Date Field Interpretation\n\n Args:\n field (str): The EnergyPlus Field Contents\n\n Returns:\n (datetime): The datetime object\n\n Info:\n See EnergyPlus documentation for more details:\n 1.6.8.1.2 Field: Start Date (Table 1.4: Date Field Interpretation)\n \"\"\"\n # < number > Weekday in Month\n formats = ['%m/%d', '%d %B', '%B %d', '%d %b', '%b %d']\n date = None\n for format_str in formats:\n # Tru to parse using each defined formats\n try:\n date = datetime.strptime(field, format_str)\n except:\n pass\n else:\n date = datetime(self.year, date.month, date.day)\n if date is None:\n # if the defined formats did not work, try the fancy parse\n try:\n date = self.parse_fancy_string(field)\n except:\n msg = \"the schedule '{sch}' contains a \" \\\n \"Field that is not understood: '{field}'\".format(\n sch=self.schName,\n field=field)\n raise ValueError(msg)\n else:\n return date\n else:\n return date\n\n def parse_fancy_string(self, field):\n \"\"\"Will try to parse cases such as `3rd Monday in February` or `Last\n Weekday In Month`\n\n Args:\n field (str): The EnergyPlus Field Contents\n\n Returns:\n (datetime): The datetime object\n \"\"\"\n import re\n\n # split the string at the term ' in '\n time, month = field.lower().split(' in ')\n month = datetime.strptime(month, '%B').month\n\n # split the first part into nth and dayofweek\n nth, dayofweek = time.split(' ')\n if 'last' in nth:\n nth = -1 # Use the last one\n else:\n nth = re.findall(r'\\d+', nth) # use the nth one\n nth = int(nth[0]) - 1 # python is zero-based\n\n weekday = {'monday': 0, 'tuesday': 1, 'wednesday': 2, 'thursday': 3,\n 'friday': 4, 'saturday': 5, 'sunday': 6}\n\n # parse the dayofweek eg. monday\n dayofweek = weekday.get(dayofweek, 6)\n\n # create list of possible days using Calendar\n import calendar\n c = calendar.Calendar(firstweekday=self.startDayOfTheWeek)\n monthcal = c.monthdatescalendar(self.year, month)\n\n # iterate though the month and get the nth weekday\n date = [day for week in monthcal for day in week if \\\n day.weekday() == dayofweek and \\\n day.month == month][nth]\n return datetime(date.year, date.month, date.day)\n\n def field_set(self, field, slicer_=None):\n \"\"\"helper function to return the proper slicer depending on the\n field_set value.\n\n Available values are:\n Weekdays, Weekends, Holidays, Alldays, SummerDesignDay,\n WinterDesignDay, Sunday, Monday, Tuesday, Wednesday, Thursday,\n Friday, Saturday, CustomDay1, CustomDay2, AllOtherDays\n\n Args:\n field (str): The EnergyPlus field set value.\n slicer_ (pd.Series): The persistent slicer for this schedule\n\n Returns:\n (indexer-like): Returns the appropriate indexer for the series.\n \"\"\"\n\n if field.lower() == 'weekdays':\n # return only days of weeks\n return lambda x: x.index.dayofweek < 5\n elif field.lower() == 'weekends':\n # return only weekends\n return lambda x: x.index.dayofweek >= 5\n elif field.lower() == 'alldays':\n log('For schedule \"{}\", the field-set \"AllDays\" may be overridden '\n 'by the \"AllOtherDays\" field-set'.format(\n self.schName), lg.WARNING)\n # return all days := equivalenet to .loc[:]\n return pd.IndexSlice[:]\n elif field.lower() == 'allotherdays':\n # return unused days (including special days). Uses the global\n # variable `slicer_`\n import operator\n if slicer_ is not None:\n return _conjunction(*[self.special_day(field, slicer_),\n ~slicer_], logical=operator.or_)\n else:\n raise NotImplementedError\n elif field.lower() == 'sunday':\n # return only sundays\n return lambda x: x.index.dayofweek == 6\n elif field.lower() == 'monday':\n # return only mondays\n return lambda x: x.index.dayofweek == 0\n elif field.lower() == 'tuesday':\n # return only Tuesdays\n return lambda x: x.index.dayofweek == 1\n elif field.lower() == 'wednesday':\n # return only Wednesdays\n return lambda x: x.index.dayofweek == 2\n elif field.lower() == 'thursday':\n # return only Thursdays\n return lambda x: x.index.dayofweek == 3\n elif field.lower() == 'friday':\n # return only Fridays\n return lambda x: x.index.dayofweek == 4\n elif field.lower() == 'saturday':\n # return only Saturdays\n return lambda x: x.index.dayofweek == 5\n elif field.lower() == 'summerdesignday':\n # return design_day(self, field)\n return None\n elif field.lower() == 'winterdesignday':\n # return design_day(self, field)\n return None\n elif field.lower() == 'holiday' or field.lower() == 'holidays':\n field = 'holiday'\n return self.special_day(field, slicer_)\n elif not self.strict:\n # If not strict, ignore missing field-sets such as CustomDay1\n return pd.IndexSlice[:]\n else:\n raise NotImplementedError(\n 'Archetypal does not yet support The '\n 'Field_set \"{}\"'.format(field))\n\n def __len__(self):\n \"\"\"returns the length of all values of the schedule\"\"\"\n return len(self.all_values)\n\n def __eq__(self, other):\n \"\"\"Overrides the default implementation\"\"\"\n if isinstance(other, Schedule):\n return self.all_values == other.all_values\n else:\n raise NotImplementedError\n\n def __ne__(self, other):\n return ~(self.__eq__(other))\n\n def __add__(self, other):\n if isinstance(other, Schedule):\n return self.all_values + other.all_values\n elif isinstance(other, list):\n return self.all_values + other\n else:\n raise NotImplementedError\n\n def __sub__(self, other):\n if isinstance(other, Schedule):\n return self.all_values - other.all_values\n elif isinstance(other, list):\n return self.all_values - other\n else:\n raise NotImplementedError\n\n def __mul__(self, other):\n if isinstance(other, Schedule):\n return self.all_values * other.all_values\n elif isinstance(other, list):\n return self.all_values * other\n else:\n raise NotImplementedError\n\n def get_sdow(self, start_day_of_week):\n \"\"\"Returns the start day of the week\"\"\"\n if start_day_of_week is None:\n return self.idf.day_of_week_for_start_day\n else:\n return start_day_of_week\n\n def special_day(self, field, slicer_):\n \"\"\"try to get the RunPeriodControl:SpecialDays for the corresponding\n Day Type\"\"\"\n sp_slicer_ = slicer_.copy()\n sp_slicer_.loc[:] = False\n special_day_types = ['holiday', 'customday1', 'customday2']\n\n dds = self.idf.idfobjects['RunPeriodControl:SpecialDays'.upper()]\n dd = [dd for dd in dds if dd.Special_Day_Type.lower() == field\n or dd.Special_Day_Type.lower() in special_day_types]\n if len(dd) > 0:\n slice = []\n for dd in dd:\n # can have more than one special day types\n data = dd.Start_Date\n ep_start_date = self.date_field_interpretation(data)\n ep_orig = datetime(self.year, 1, 1)\n days_to_speciald = (ep_start_date - ep_orig).days\n duration = int(dd.Duration)\n from_date = self.startDate + timedelta(days=days_to_speciald)\n to_date = from_date + timedelta(days=duration) + timedelta(\n hours=-1)\n\n sp_slicer_.loc[from_date:to_date] = True\n return sp_slicer_\n elif not self.strict:\n return sp_slicer_\n else:\n msg = 'Could not find a \"SizingPeriod:DesignDay\" object ' \\\n 'needed for schedule \"{}\" with Day Type \"{}\"'.format(\n self.schName, field.capitalize()\n )\n raise ValueError(msg)\n\n\ndef design_day(schedule, field):\n # try to get the SizingPeriod:DesignDay for the corresponding Day Type\n dds = schedule.idf.idfobjects['SizingPeriod:DesignDay'.upper()]\n dd = [dd for dd in dds if dd.Day_Type.lower() == field]\n if len(dd) > 0:\n # should have found only one design day matching the Day Type\n\n data = [dd[0].Month, dd[0].Day_of_Month]\n date = '/'.join([str(item).zfill(2) for item in data])\n date = schedule.date_field_interpretation(date)\n return lambda x: x.index == date\n else:\n msg = 'Could not find a \"SizingPeriod:DesignDay\" object ' \\\n 'needed for schedule \"{}\" with Day Type \"{}\"'.format(\n schedule.schName, field.capitalize()\n )\n raise ValueError(msg)\n\n\ndef _conjunction(*conditions, logical=np.logical_and):\n \"\"\"Applies a logical function on n conditions\"\"\"\n return functools.reduce(logical, conditions)\n\n\ndef _separator(sep):\n \"\"\"helper function to return the correct delimiter\"\"\"\n if sep == 'Comma':\n return ','\n elif sep == 'Tab':\n return '\\t'\n elif sep == 'Fixed':\n return None\n elif sep == 'Semicolon':\n return ';'\n else:\n return ','\n\n\ndef _how(how):\n \"\"\"Helper function to return the correct resampler\"\"\"\n if how.lower() == 'average':\n return 'mean'\n elif how.lower() == 'linear':\n return 'interpolate'\n elif how.lower() == 'no':\n return 'max'\n else:\n return 'max'\n"
] | [
[
"numpy.roll",
"pandas.Series",
"pandas.date_range",
"pandas.read_csv",
"numpy.arange",
"pandas.Grouper",
"pandas.concat",
"numpy.array",
"numpy.unique",
"numpy.mean"
]
] |
sleep-yearning/magenta | [
"a03a14ef5a691ee9e3d336aa621281028dc5af32",
"a03a14ef5a691ee9e3d336aa621281028dc5af32"
] | [
"magenta/models/score2perf/music_encoders_test.py",
"magenta/models/drums_rnn/drums_rnn_generate.py"
] | [
"# Copyright 2020 The Magenta Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests for Score2Perf music encoders.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tempfile\n\nimport magenta\nfrom magenta.models.score2perf import music_encoders\nfrom magenta.music import testing_lib\nfrom magenta.music.protobuf import music_pb2\nimport tensorflow.compat.v1 as tf\n\n\nclass MidiPerformanceEncoderTest(tf.test.TestCase):\n\n def testNumReservedIds(self):\n encoder = music_encoders.MidiPerformanceEncoder(\n steps_per_second=100, num_velocity_bins=32, min_pitch=21, max_pitch=108)\n self.assertEqual(2, encoder.num_reserved_ids)\n\n def testEncodeEmptyNoteSequence(self):\n encoder = music_encoders.MidiPerformanceEncoder(\n steps_per_second=100, num_velocity_bins=32, min_pitch=21, max_pitch=108)\n ids = encoder.encode_note_sequence(music_pb2.NoteSequence())\n self.assertEqual([], ids)\n\n def testEncodeEmptyNoteSequenceAddEos(self):\n encoder = music_encoders.MidiPerformanceEncoder(\n steps_per_second=100, num_velocity_bins=32, min_pitch=21, max_pitch=108,\n add_eos=True)\n ids = encoder.encode_note_sequence(music_pb2.NoteSequence())\n self.assertEqual([1], ids)\n\n def testEncodeNoteSequence(self):\n encoder = music_encoders.MidiPerformanceEncoder(\n steps_per_second=100, num_velocity_bins=32, min_pitch=21, max_pitch=108)\n\n ns = music_pb2.NoteSequence()\n testing_lib.add_track_to_sequence(\n ns, 0, [(60, 100, 0.0, 4.0), (64, 100, 0.0, 3.0), (67, 127, 1.0, 2.0)])\n ids = encoder.encode_note_sequence(ns)\n\n expected_ids = [\n 302, # VELOCITY(25)\n 41, # NOTE-ON(60)\n 45, # NOTE-ON(64)\n 277, # TIME-SHIFT(100)\n 309, # VELOCITY(32)\n 48, # NOTE-ON(67)\n 277, # TIME-SHIFT(100)\n 136, # NOTE-OFF(67)\n 277, # TIME-SHIFT(100)\n 133, # NOTE-OFF(64\n 277, # TIME-SHIFT(100)\n 129 # NOTE-OFF(60)\n ]\n\n self.assertEqual(expected_ids, ids)\n\n def testEncodeNoteSequenceAddEos(self):\n encoder = music_encoders.MidiPerformanceEncoder(\n steps_per_second=100, num_velocity_bins=32, min_pitch=21, max_pitch=108,\n add_eos=True)\n\n ns = music_pb2.NoteSequence()\n testing_lib.add_track_to_sequence(\n ns, 0, [(60, 100, 0.0, 4.0), (64, 100, 0.0, 3.0), (67, 127, 1.0, 2.0)])\n ids = encoder.encode_note_sequence(ns)\n\n expected_ids = [\n 302, # VELOCITY(25)\n 41, # NOTE-ON(60)\n 45, # NOTE-ON(64)\n 277, # TIME-SHIFT(100)\n 309, # VELOCITY(32)\n 48, # NOTE-ON(67)\n 277, # TIME-SHIFT(100)\n 136, # NOTE-OFF(67)\n 277, # TIME-SHIFT(100)\n 133, # NOTE-OFF(64\n 277, # TIME-SHIFT(100)\n 129, # NOTE-OFF(60)\n 1 # EOS\n ]\n\n self.assertEqual(expected_ids, ids)\n\n def testEncodeNoteSequenceNGrams(self):\n encoder = music_encoders.MidiPerformanceEncoder(\n steps_per_second=100, num_velocity_bins=32, min_pitch=21, max_pitch=108,\n ngrams=[(41, 45), (277, 309, 300), (309, 48), (277, 129, 130)])\n\n ns = music_pb2.NoteSequence()\n testing_lib.add_track_to_sequence(\n ns, 0, [(60, 100, 0.0, 4.0), (64, 100, 0.0, 3.0), (67, 127, 1.0, 2.0)])\n ids = encoder.encode_note_sequence(ns)\n\n expected_ids = [\n 302, # VELOCITY(25)\n 310, # NOTE-ON(60), NOTE-ON(64)\n 277, # TIME-SHIFT(100)\n 312, # VELOCITY(32), NOTE-ON(67)\n 277, # TIME-SHIFT(100)\n 136, # NOTE-OFF(67)\n 277, # TIME-SHIFT(100)\n 133, # NOTE-OFF(64\n 277, # TIME-SHIFT(100)\n 129 # NOTE-OFF(60)\n ]\n\n self.assertEqual(expected_ids, ids)\n\n def testEncode(self):\n encoder = music_encoders.MidiPerformanceEncoder(\n steps_per_second=100, num_velocity_bins=32, min_pitch=21, max_pitch=108,\n ngrams=[(277, 129)])\n\n ns = music_pb2.NoteSequence()\n testing_lib.add_track_to_sequence(ns, 0, [(60, 97, 0.0, 1.0)])\n\n # Write NoteSequence to MIDI file as encoder takes in filename.\n with tempfile.NamedTemporaryFile(suffix='.mid') as f:\n magenta.music.sequence_proto_to_midi_file(ns, f.name)\n ids = encoder.encode(f.name)\n\n expected_ids = [\n 302, # VELOCITY(25)\n 41, # NOTE-ON(60)\n 310 # TIME-SHIFT(100), NOTE-OFF(60)\n ]\n\n self.assertEqual(expected_ids, ids)\n\n def testDecode(self):\n encoder = music_encoders.MidiPerformanceEncoder(\n steps_per_second=100, num_velocity_bins=32, min_pitch=21, max_pitch=108,\n ngrams=[(277, 129)])\n\n ids = [\n 302, # VELOCITY(25)\n 41, # NOTE-ON(60)\n 310 # TIME-SHIFT(100), NOTE-OFF(60)\n ]\n\n # Decode method returns MIDI filename, read and convert to NoteSequence.\n filename = encoder.decode(ids)\n ns = magenta.music.midi_file_to_sequence_proto(filename)\n\n # Remove default tempo & time signature.\n del ns.tempos[:]\n del ns.time_signatures[:]\n\n expected_ns = music_pb2.NoteSequence(ticks_per_quarter=220)\n testing_lib.add_track_to_sequence(expected_ns, 0, [(60, 97, 0.0, 1.0)])\n\n # Add source info fields.\n expected_ns.source_info.encoding_type = (\n music_pb2.NoteSequence.SourceInfo.MIDI)\n expected_ns.source_info.parser = (\n music_pb2.NoteSequence.SourceInfo.PRETTY_MIDI)\n\n self.assertEqual(expected_ns, ns)\n\n def testVocabSize(self):\n encoder = music_encoders.MidiPerformanceEncoder(\n steps_per_second=100, num_velocity_bins=32, min_pitch=21, max_pitch=108)\n self.assertEqual(310, encoder.vocab_size)\n\n def testVocabSizeNGrams(self):\n encoder = music_encoders.MidiPerformanceEncoder(\n steps_per_second=100, num_velocity_bins=32, min_pitch=21, max_pitch=108,\n ngrams=[(41, 45), (277, 309, 300), (309, 48), (277, 129, 130)])\n self.assertEqual(314, encoder.vocab_size)\n\n\nclass TextChordsEncoderTest(tf.test.TestCase):\n\n def testEncodeNoteSequence(self):\n encoder = music_encoders.TextChordsEncoder(steps_per_quarter=1)\n\n ns = music_pb2.NoteSequence()\n ns.tempos.add(qpm=60)\n testing_lib.add_chords_to_sequence(\n ns, [('C', 1), ('Dm', 3), ('Bdim', 4)])\n ns.total_time = 5.0\n ids = encoder.encode_note_sequence(ns)\n\n expected_ids = [\n 2, # no-chord\n 3, # C major\n 3, # C major\n 17, # D minor\n 50 # B diminished\n ]\n\n self.assertEqual(expected_ids, ids)\n\n def testEncode(self):\n encoder = music_encoders.TextChordsEncoder(steps_per_quarter=1)\n\n ids = encoder.encode('C G Am F')\n expected_ids = [\n 3, # C major\n 10, # G major\n 24, # A minor\n 8 # F major\n ]\n\n self.assertEqual(expected_ids, ids)\n\n def testVocabSize(self):\n encoder = music_encoders.TextChordsEncoder(steps_per_quarter=1)\n self.assertEqual(51, encoder.vocab_size)\n\n\nclass TextMelodyEncoderTest(tf.test.TestCase):\n\n def testEncodeNoteSequence(self):\n encoder = music_encoders.TextMelodyEncoder(\n steps_per_quarter=4, min_pitch=21, max_pitch=108)\n encoder_absolute = music_encoders.TextMelodyEncoderAbsolute(\n steps_per_second=4, min_pitch=21, max_pitch=108)\n\n ns = music_pb2.NoteSequence()\n ns.tempos.add(qpm=60)\n testing_lib.add_track_to_sequence(\n ns, 0,\n [(60, 127, 0.0, 0.25), (62, 127, 0.25, 0.75), (64, 127, 1.25, 2.0)])\n ids = encoder.encode_note_sequence(ns)\n ids_absolute = encoder_absolute.encode_note_sequence(ns)\n\n expected_ids = [\n 43, # ON(60)\n 45, # ON(62)\n 2, # HOLD(62)\n 3, # OFF(62)\n 2, # REST\n 47, # ON(64)\n 2, # HOLD(64)\n 2 # HOLD(64)\n ]\n\n self.assertEqual(expected_ids, ids)\n self.assertEqual(expected_ids, ids_absolute)\n\n def testEncode(self):\n encoder = music_encoders.TextMelodyEncoder(\n steps_per_quarter=4, min_pitch=21, max_pitch=108)\n\n ids = encoder.encode('60 -2 62 -1 64 -2')\n expected_ids = [\n 43, # ON(60)\n 2, # HOLD(60)\n 45, # ON(62)\n 3, # OFF(62)\n 47, # ON(64)\n 2 # HOLD(64)\n ]\n\n self.assertEqual(expected_ids, ids)\n\n def testVocabSize(self):\n encoder = music_encoders.TextMelodyEncoder(\n steps_per_quarter=4, min_pitch=21, max_pitch=108)\n self.assertEqual(92, encoder.vocab_size)\n\n\nclass FlattenedTextMelodyEncoderTest(tf.test.TestCase):\n\n def testEncodeNoteSequence(self):\n encoder = music_encoders.FlattenedTextMelodyEncoderAbsolute(\n steps_per_second=4, num_velocity_bins=127)\n\n ns = music_pb2.NoteSequence()\n ns.tempos.add(qpm=60)\n testing_lib.add_track_to_sequence(\n ns, 0,\n [(60, 127, 0.0, 0.25), (62, 15, 0.25, 0.75), (64, 32, 1.25, 2.0)])\n ids = encoder.encode_note_sequence(ns)\n expected_ids = [\n 130, # ON(vel=127)\n 18, # ON(vel=15)\n 2, # HOLD(62)\n 2, # REST\n 2, # REST\n 35, # ON(vel=32)\n 2, # HOLD(64)\n 2 # HOLD(64)\n ]\n\n self.assertEqual(expected_ids, ids)\n\n def testVocabSize(self):\n num_vel_bins = 12\n encoder = music_encoders.FlattenedTextMelodyEncoderAbsolute(\n steps_per_second=4, num_velocity_bins=num_vel_bins)\n expected = num_vel_bins + encoder.num_reserved_ids + 2\n self.assertEqual(expected, encoder.vocab_size)\n\n\nclass CompositeScoreEncoderTest(tf.test.TestCase):\n\n def testEncodeNoteSequence(self):\n encoder = music_encoders.CompositeScoreEncoder([\n music_encoders.TextChordsEncoder(steps_per_quarter=4),\n music_encoders.TextMelodyEncoder(\n steps_per_quarter=4, min_pitch=21, max_pitch=108)\n ])\n\n ns = music_pb2.NoteSequence()\n ns.tempos.add(qpm=60)\n testing_lib.add_chords_to_sequence(ns, [('C', 0.5), ('Dm', 1.0)])\n testing_lib.add_track_to_sequence(\n ns, 0,\n [(60, 127, 0.0, 0.25), (62, 127, 0.25, 0.75), (64, 127, 1.25, 2.0)])\n chord_ids, melody_ids = zip(*encoder.encode_note_sequence(ns))\n\n expected_chord_ids = [\n 2, # no-chord\n 2, # no-chord\n 3, # C major\n 3, # C major\n 17, # D minor\n 17, # D minor\n 17, # D minor\n 17 # D minor\n ]\n\n expected_melody_ids = [\n 43, # ON(60)\n 45, # ON(62)\n 2, # HOLD(62)\n 3, # OFF(62)\n 2, # REST\n 47, # ON(64)\n 2, # HOLD(64)\n 2 # HOLD(64)\n ]\n\n self.assertEqual(expected_chord_ids, list(chord_ids))\n self.assertEqual(expected_melody_ids, list(melody_ids))\n\n # TODO(iansimon): also test MusicXML encoding\n\n def testVocabSize(self):\n encoder = music_encoders.CompositeScoreEncoder([\n music_encoders.TextChordsEncoder(steps_per_quarter=4),\n music_encoders.TextMelodyEncoder(\n steps_per_quarter=4, min_pitch=21, max_pitch=108)\n ])\n self.assertEqual([51, 92], encoder.vocab_size)\n\n\nif __name__ == '__main__':\n tf.test.main()\n",
"# Copyright 2020 The Magenta Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Generate drum tracks from a trained checkpoint of a drums RNN model.\n\nUses flags to define operation.\n\"\"\"\n\nimport ast\nimport os\nimport time\n\nimport magenta\nfrom magenta.models.drums_rnn import drums_rnn_config_flags\nfrom magenta.models.drums_rnn import drums_rnn_model\nfrom magenta.models.drums_rnn import drums_rnn_sequence_generator\nfrom magenta.models.shared import sequence_generator\nfrom magenta.models.shared import sequence_generator_bundle\nfrom magenta.music.protobuf import generator_pb2\nfrom magenta.music.protobuf import music_pb2\nimport tensorflow.compat.v1 as tf\n\nFLAGS = tf.app.flags.FLAGS\ntf.app.flags.DEFINE_string(\n 'run_dir', None,\n 'Path to the directory where the latest checkpoint will be loaded from.')\ntf.app.flags.DEFINE_string(\n 'checkpoint_file', None,\n 'Path to the checkpoint file. run_dir will take priority over this flag.')\ntf.app.flags.DEFINE_string(\n 'bundle_file', None,\n 'Path to the bundle file. If specified, this will take priority over '\n 'run_dir and checkpoint_file, unless save_generator_bundle is True, in '\n 'which case both this flag and either run_dir or checkpoint_file are '\n 'required')\ntf.app.flags.DEFINE_boolean(\n 'save_generator_bundle', False,\n 'If true, instead of generating a sequence, will save this generator as a '\n 'bundle file in the location specified by the bundle_file flag')\ntf.app.flags.DEFINE_string(\n 'bundle_description', None,\n 'A short, human-readable text description of the bundle (e.g., training '\n 'data, hyper parameters, etc.).')\ntf.app.flags.DEFINE_string(\n 'output_dir', '/tmp/drums_rnn/generated',\n 'The directory where MIDI files will be saved to.')\ntf.app.flags.DEFINE_integer(\n 'num_outputs', 10,\n 'The number of drum tracks to generate. One MIDI file will be created for '\n 'each.')\ntf.app.flags.DEFINE_integer(\n 'num_steps', 128,\n 'The total number of steps the generated drum tracks should be, priming '\n 'drum track length + generated steps. Each step is a 16th of a bar.')\ntf.app.flags.DEFINE_string(\n 'primer_drums', '',\n 'A string representation of a Python list of tuples containing drum pitch '\n 'values. For example: '\n '\"[(36,42),(),(),(),(42,),(),(),()]\". If specified, this drum track will '\n 'be used as the priming drum track. If a priming drum track is not '\n 'specified, drum tracks will be generated from scratch.')\ntf.app.flags.DEFINE_string(\n 'primer_midi', '',\n 'The path to a MIDI file containing a drum track that will be used as a '\n 'priming drum track. If a primer drum track is not specified, drum tracks '\n 'will be generated from scratch.')\ntf.app.flags.DEFINE_float(\n 'qpm', None,\n 'The quarters per minute to play generated output at. If a primer MIDI is '\n 'given, the qpm from that will override this flag. If qpm is None, qpm '\n 'will default to 120.')\ntf.app.flags.DEFINE_float(\n 'temperature', 1.0,\n 'The randomness of the generated drum tracks. 1.0 uses the unaltered '\n 'softmax probabilities, greater than 1.0 makes tracks more random, less '\n 'than 1.0 makes tracks less random.')\ntf.app.flags.DEFINE_integer(\n 'beam_size', 1,\n 'The beam size to use for beam search when generating drum tracks.')\ntf.app.flags.DEFINE_integer(\n 'branch_factor', 1,\n 'The branch factor to use for beam search when generating drum tracks.')\ntf.app.flags.DEFINE_integer(\n 'steps_per_iteration', 1,\n 'The number of steps to take per beam search iteration.')\ntf.app.flags.DEFINE_string(\n 'log', 'INFO',\n 'The threshold for what messages will be logged DEBUG, INFO, WARN, ERROR, '\n 'or FATAL.')\n\n\ndef get_checkpoint():\n \"\"\"Get the training dir or checkpoint path to be used by the model.\"\"\"\n if ((FLAGS.run_dir or FLAGS.checkpoint_file) and\n FLAGS.bundle_file and not FLAGS.save_generator_bundle):\n raise sequence_generator.SequenceGeneratorError(\n 'Cannot specify both bundle_file and run_dir or checkpoint_file')\n if FLAGS.run_dir:\n train_dir = os.path.join(os.path.expanduser(FLAGS.run_dir), 'train')\n return train_dir\n elif FLAGS.checkpoint_file:\n return os.path.expanduser(FLAGS.checkpoint_file)\n else:\n return None\n\n\ndef get_bundle():\n \"\"\"Returns a generator_pb2.GeneratorBundle object based read from bundle_file.\n\n Returns:\n Either a generator_pb2.GeneratorBundle or None if the bundle_file flag is\n not set or the save_generator_bundle flag is set.\n \"\"\"\n if FLAGS.save_generator_bundle:\n return None\n if FLAGS.bundle_file is None:\n return None\n bundle_file = os.path.expanduser(FLAGS.bundle_file)\n return sequence_generator_bundle.read_bundle_file(bundle_file)\n\n\ndef run_with_flags(generator):\n \"\"\"Generates drum tracks and saves them as MIDI files.\n\n Uses the options specified by the flags defined in this module.\n\n Args:\n generator: The DrumsRnnSequenceGenerator to use for generation.\n \"\"\"\n if not FLAGS.output_dir:\n tf.logging.fatal('--output_dir required')\n return\n FLAGS.output_dir = os.path.expanduser(FLAGS.output_dir)\n\n primer_midi = None\n if FLAGS.primer_midi:\n primer_midi = os.path.expanduser(FLAGS.primer_midi)\n\n if not tf.gfile.Exists(FLAGS.output_dir):\n tf.gfile.MakeDirs(FLAGS.output_dir)\n\n primer_sequence = None\n qpm = FLAGS.qpm if FLAGS.qpm else magenta.music.DEFAULT_QUARTERS_PER_MINUTE\n if FLAGS.primer_drums:\n primer_drums = magenta.music.DrumTrack(\n [frozenset(pitches)\n for pitches in ast.literal_eval(FLAGS.primer_drums)])\n primer_sequence = primer_drums.to_sequence(qpm=qpm)\n elif primer_midi:\n primer_sequence = magenta.music.midi_file_to_sequence_proto(primer_midi)\n if primer_sequence.tempos and primer_sequence.tempos[0].qpm:\n qpm = primer_sequence.tempos[0].qpm\n else:\n tf.logging.warning(\n 'No priming sequence specified. Defaulting to a single bass drum hit.')\n primer_drums = magenta.music.DrumTrack([frozenset([36])])\n primer_sequence = primer_drums.to_sequence(qpm=qpm)\n\n # Derive the total number of seconds to generate based on the QPM of the\n # priming sequence and the num_steps flag.\n seconds_per_step = 60.0 / qpm / generator.steps_per_quarter\n total_seconds = FLAGS.num_steps * seconds_per_step\n\n # Specify start/stop time for generation based on starting generation at the\n # end of the priming sequence and continuing until the sequence is num_steps\n # long.\n generator_options = generator_pb2.GeneratorOptions()\n if primer_sequence:\n input_sequence = primer_sequence\n # Set the start time to begin on the next step after the last note ends.\n if primer_sequence.notes:\n last_end_time = max(n.end_time for n in primer_sequence.notes)\n else:\n last_end_time = 0\n generate_section = generator_options.generate_sections.add(\n start_time=last_end_time + seconds_per_step,\n end_time=total_seconds)\n\n if generate_section.start_time >= generate_section.end_time:\n tf.logging.fatal(\n 'Priming sequence is longer than the total number of steps '\n 'requested: Priming sequence length: %s, Generation length '\n 'requested: %s',\n generate_section.start_time, total_seconds)\n return\n else:\n input_sequence = music_pb2.NoteSequence()\n input_sequence.tempos.add().qpm = qpm\n generate_section = generator_options.generate_sections.add(\n start_time=0,\n end_time=total_seconds)\n generator_options.args['temperature'].float_value = FLAGS.temperature\n generator_options.args['beam_size'].int_value = FLAGS.beam_size\n generator_options.args['branch_factor'].int_value = FLAGS.branch_factor\n generator_options.args[\n 'steps_per_iteration'].int_value = FLAGS.steps_per_iteration\n tf.logging.debug('input_sequence: %s', input_sequence)\n tf.logging.debug('generator_options: %s', generator_options)\n\n # Make the generate request num_outputs times and save the output as midi\n # files.\n date_and_time = time.strftime('%Y-%m-%d_%H%M%S')\n digits = len(str(FLAGS.num_outputs))\n for i in range(FLAGS.num_outputs):\n generated_sequence = generator.generate(input_sequence, generator_options)\n\n midi_filename = '%s_%s.mid' % (date_and_time, str(i + 1).zfill(digits))\n midi_path = os.path.join(FLAGS.output_dir, midi_filename)\n magenta.music.sequence_proto_to_midi_file(generated_sequence, midi_path)\n\n tf.logging.info('Wrote %d MIDI files to %s',\n FLAGS.num_outputs, FLAGS.output_dir)\n\n\ndef main(unused_argv):\n \"\"\"Saves bundle or runs generator based on flags.\"\"\"\n tf.logging.set_verbosity(FLAGS.log)\n\n bundle = get_bundle()\n\n if bundle:\n config_id = bundle.generator_details.id\n config = drums_rnn_model.default_configs[config_id]\n config.hparams.parse(FLAGS.hparams)\n else:\n config = drums_rnn_config_flags.config_from_flags()\n # Having too large of a batch size will slow generation down unnecessarily.\n config.hparams.batch_size = min(\n config.hparams.batch_size, FLAGS.beam_size * FLAGS.branch_factor)\n\n generator = drums_rnn_sequence_generator.DrumsRnnSequenceGenerator(\n model=drums_rnn_model.DrumsRnnModel(config),\n details=config.details,\n steps_per_quarter=config.steps_per_quarter,\n checkpoint=get_checkpoint(),\n bundle=bundle)\n\n if FLAGS.save_generator_bundle:\n bundle_filename = os.path.expanduser(FLAGS.bundle_file)\n if FLAGS.bundle_description is None:\n tf.logging.warning('No bundle description provided.')\n tf.logging.info('Saving generator bundle to %s', bundle_filename)\n generator.create_bundle_file(bundle_filename, FLAGS.bundle_description)\n else:\n run_with_flags(generator)\n\n\ndef console_entry_point():\n tf.app.run(main)\n\n\nif __name__ == '__main__':\n console_entry_point()\n"
] | [
[
"tensorflow.compat.v1.test.main"
],
[
"tensorflow.compat.v1.logging.set_verbosity",
"tensorflow.compat.v1.app.flags.DEFINE_boolean",
"tensorflow.compat.v1.logging.debug",
"tensorflow.compat.v1.gfile.Exists",
"tensorflow.compat.v1.app.flags.DEFINE_float",
"tensorflow.compat.v1.gfile.MakeDirs",
"tensorflow.compat.v1.logging.warning",
"tensorflow.compat.v1.app.flags.DEFINE_integer",
"tensorflow.compat.v1.app.flags.DEFINE_string",
"tensorflow.compat.v1.app.run",
"tensorflow.compat.v1.logging.fatal",
"tensorflow.compat.v1.logging.info"
]
] |
energyinpython/pre-pyrepo | [
"92e44594e12d1110247f011e51734e5ce1fe0b8e"
] | [
"tests/test_correlations.py"
] | [
"from pyrepo import correlations as corrs\nfrom scipy.stats import pearsonr\nimport unittest\nimport numpy as np\n\n\n# Test for Spearman rank correlation coefficient\nclass Test_Spearman(unittest.TestCase):\n\n def test_spearman(self):\n \"\"\"Test based on paper Sałabun, W., & Urbaniak, K. (2020, June). A new coefficient of rankings similarity \n in decision-making problems. In International Conference on Computational Science \n (pp. 632-645). Springer, Cham.\"\"\"\n\n R = np.array([1, 2, 3, 4, 5])\n Q = np.array([1, 3, 2, 4, 5])\n test_result = corrs.spearman(R, Q)\n real_result = 0.9\n self.assertEqual(test_result, real_result)\n\n\n# Test for Weighted Spearman rank correlation coefficient\nclass Test_Weighted_Spearman(unittest.TestCase):\n\n def test_weighted_spearman(self):\n \"\"\"Test based on paper Sałabun, W., & Urbaniak, K. (2020, June). A new coefficient of rankings similarity \n in decision-making problems. In International Conference on Computational Science \n (pp. 632-645). Springer, Cham.\"\"\"\n\n R = np.array([1, 2, 3, 4, 5])\n Q = np.array([1, 3, 2, 4, 5])\n test_result = corrs.weighted_spearman(R, Q)\n real_result = 0.8833\n self.assertEqual(np.round(test_result, 4), real_result)\n\n\n# Test for Similarity rank coefficient WS\nclass Test_WS(unittest.TestCase):\n\n def test_ws(self):\n \"\"\"Test based on paper Sałabun, W., & Urbaniak, K. (2020, June). A new coefficient of rankings similarity \n in decision-making problems. In International Conference on Computational Science \n (pp. 632-645). Springer, Cham.\"\"\"\n\n R = np.array([1, 2, 3, 4, 5])\n Q = np.array([1, 3, 2, 4, 5])\n test_result = corrs.WS_coeff(R, Q)\n real_result = 0.8542\n self.assertEqual(np.round(test_result, 4), real_result)\n\n\n# Test for Pearson correlation coefficient\nclass Test_Pearson(unittest.TestCase):\n\n def test_pearson(self):\n \"\"\"Test based on paper Sałabun, W., & Urbaniak, K. (2020, June). A new coefficient of rankings similarity \n in decision-making problems. In International Conference on Computational Science \n (pp. 632-645). Springer, Cham.\"\"\"\n\n R = np.array([1, 2, 3, 4, 5])\n Q = np.array([1, 3, 2, 4, 5])\n test_result = corrs.pearson_coeff(R, Q)\n real_result, _ = pearsonr(R, Q)\n self.assertEqual(test_result, real_result)\n\n\ndef main():\n test_spearman_coeff = Test_Spearman()\n test_spearman_coeff.test_spearman()\n\n test_weighted_spearman_coeff = Test_Weighted_Spearman()\n test_weighted_spearman_coeff.test_weighted_spearman()\n\n test_pearson_coeff = Test_Pearson()\n test_pearson_coeff.test_pearson()\n\n test_ws = Test_WS()\n test_ws.test_ws()\n\n\nif __name__ == '__main__':\n main()"
] | [
[
"numpy.array",
"numpy.round",
"scipy.stats.pearsonr"
]
] |
YifanQie/Deep_Learning_for_Manufacturing | [
"9ba19e41f69c561b04b8573ab9c52c0969f45bfd"
] | [
"core/assembly_system.py"
] | [
"import numpy as np\nimport pandas as pd\n\"\"\" Contains core classes and methods for initializing a Assembly System, the inputs are provided in assemblyconfig file in utilities\"\"\"\n\nclass AssemblySystem:\n\t\"\"\"Assembly System Class\n\n\t\t:param assembly_type: Type of assembly Single-Station/Multi-Station\n\t\t:type assembly_system: str (required)\n\n\t\t:param assembly_kccs: Number of KCCs for the assembly\n\t\t:type assembly_kccs: int (required)\n\n\t\t:param assembly_kpis: Number of Kpis for the assembly\n\t\t:type assembly_kpis: int (required) \n\t\"\"\"\n\tdef __init__(self,assembly_type,assembly_kccs,assembly_kpis):\n\t\tself.assembly_type=assembly_type\n\t\tself.assembly_kccs=assembly_kccs\n\t\tself.assembly_kpis=assembly_kpis\n\nclass PartType(AssemblySystem):\n\t\"\"\"Part System Class, inherits the Assembly System Class, additional parameters for this class include\n\t\t\n\t\t:param voxel_dim: Dimension of the voxel\n\t\t:type assembly_system: int (required)\n\n\t\t:param voxel_dim: Dimension of the voxel Channel, single channel output - 1 or multi channel - 2,3 (use 1 for deviations in one direction, 2 or 3 if data for multiple deviation directions are present)\n\t\t:type assembly_system: int (required)\n\n\t\t:param voxel_dim: Dimension of the voxel\n\t\t:type assembly_system: int (required)\n\n\t\tThe class contains two functions - get_nominal_cop and get_nominal_cop_database\n\t\"\"\"\n\tdef __init__(self,assembly_type,assembly_kccs,assembly_kpis,part_name,part_type,voxel_dim,voxel_channels,point_dim):\n\t\tsuper().__init__(assembly_type,assembly_kccs,assembly_kpis)\n\t\tself.part_name=part_name\n\t\tself.part_type=part_type\n\t\tself.voxel_dim=voxel_dim\n\t\tself.voxel_channels=voxel_channels\n\t\tself.point_dim=point_dim\n\t\t\n\n\tdef get_nominal_cop(self,file_name):\n\t\t\"\"\"Import nominal cloud-of-point of the assembly from a text/csv file\n\n\t\t\t:param file_name: Name of the input file\n\t\t\t:type file_name: str (required)\n\n\t\t\t:returns: numpy array of nominal COP\n\t\t\t:rtype: numpy.array [point_dim,3]\n\t\t\"\"\"\n\t\tdf=pd.read_csv(file_name, sep=',',header=None)\n\t\tnominal_cop=df.values\n\t\treturn nominal_cop\n\n\tdef get_nominal_cop_database(self,conn_str,table_name):\n\t\t\"\"\"Import nominal cloud-of-point of the assembly from a SQL database assumes the table only contains three columns of the nominal COPs in order of the Node IDs\t\t\n\t\t\t\n\t\t\t:param conn_str: Connection String for Database\n\t\t\t:type conn_str: str (required)\n\n\t\t\t:param table_name: Name of table in the database\n\t\t\t:type table_name: str (required)\n\n\t\t\t:returns: numpy array of dim points * 3\n\t\t\t:rtype: numpy.array [point_dim,3]\n\t\t\"\"\"\n\t\tengine = create_engine(conn_str)\n\t\tsquery ='select * from '+table_name\n\t\tdf_nom = pd.read_sql_query(squery,con=engine)\n\t\tdf_nom = df_nom.values\n\t\treturn df_nom\n\nclass VRMSimulationModel(PartType):\n\t\n\t\"\"\"VRM Simulation Model class inherits the part type class, additional parameters of this class include\n\n\t\t:param noise_level: The level of artificial noise to be added to simulated data, typically set to 0.1 mm from the measurement system class depending on the scanner\n\t\t:type noise_level: float (required)\n\n\t\t:param noise_type: The type of noise to be added, can be Gaussian or uniform , for Gaussian noise_level is set as standard deviation and mean as zero for uniform the min and max are set -noise_level and +noise_level respectively\n\t\t:type noise_type: str (optional)\n\n\t\t:param convergency_flag: Flag to denote if the simulation model had converged while simulating, is set to 1 by default\n\t\t:type convergency_flag: int (optional)\n\n\t\tThe class contains one function kpi_calculator that needs to be defined by the user depending on the assembly output\n\n\t\"\"\"\n\tdef __init__(self,assembly_type,assembly_kccs,assembly_kpis,part_name,part_type,voxel_dim,voxel_channels,point_dim,noise_level,noise_type='uniform',convergency_flag=1):\n\t\tsuper().__init__(assembly_type,assembly_kccs,assembly_kpis,part_name,part_type,voxel_dim,voxel_channels,point_dim)\n\t\tself.noise_level=noise_level\n\t\tself.noise_type=noise_type\n\t\tself.convergency_flag=convergency_flag\n\n\tdef kpi_calculator(self,cop_data,kpi_params=[]):\n\t\t\"\"\" User defined function to calculate KPI from Cloud of Point Data [KPI]=f(Cop)\n\n\t\t\t:param cop_data: CoP data for a given sample\n\t\t\t:type cop_data: np_array [point_dim,3] (required)\n\n\t\t\t:param kpi_params: Various parameters required to calculate the KPI, can be blank if no parameters are required to calculate KPI from CoP\n\t\t\t:type kpi_params: list (optional)\n\n\t\t\t:returns: list of multivariate KPIs for the given CoP\n\t\t\t:rtype: list\n\n\t\t\"\"\"\n\t\t\n\t\tkpi=[None]*self.assembly_kpis\n\n\t\t#define function here \n\t\treturn kpi"
] | [
[
"pandas.read_csv",
"pandas.read_sql_query"
]
] |
dipdeb/DAT210x | [
"9103844fa7f76052bdcc5a4ec60e8afbc91a9f6b"
] | [
"Module3/assignment2.py"
] | [
"import pandas as pd\nimport matplotlib.pyplot as plt\nimport matplotlib\n\n# Look pretty...\n# matplotlib.style.use('ggplot')\nplt.style.use('ggplot')\n\n\n#\n# TODO: Load up the Seeds Dataset into a Dataframe\n# It's located at 'Datasets/wheat.data'\n# \nwheat_df = pd.read_csv('/home/dipanjan/DAT210x/Module3/Datasets/wheat.data', index_col=0);\n\n\n#\n# TODO: Create a 2d scatter plot that graphs the\n# area and perimeter features\n# \n# .. your code here ..\nwheat_df.plot.scatter(x='area', y='perimeter')\n\n#\n# TODO: Create a 2d scatter plot that graphs the\n# groove and asymmetry features\n# \n# .. your code here ..\nwheat_df.plot.scatter(x='groove', y='asymmetry')\n\n#\n# TODO: Create a 2d scatter plot that graphs the\n# compactness and width features\n# \n# .. your code here ..\nwheat_df.plot.scatter(x='compactness', y='width')\n\n\n# BONUS TODO:\n# After completing the above, go ahead and run your program\n# Check out the results, and see what happens when you add\n# in the optional display parameter marker with values of\n# either '^', '.', or 'o'.\nwheat_df.plot.scatter(x='compactness', y='width', marker='o')\n\nplt.show()\n\n\n"
] | [
[
"pandas.read_csv",
"matplotlib.pyplot.style.use",
"matplotlib.pyplot.show"
]
] |
hongzhouye/sigma-SCF | [
"62e2dce538d1e68c4dc3c72fdf27beb1911e544f"
] | [
"scf/scf_utils.py"
] | [
"import numpy as np\nimport os, sys\nsys.path.append(os.path.dirname(__file__))\nfrom diis_solver import diis_solver, diis_solver_uhf\nsys.path.pop()\nimport jk\nimport xform\n\n\ndef homo_lumo_mix(C, nocc, beta):\n \"\"\"\n Mix a portion of LUMO to HOMO.\n Used when generating spin-unrestricted guess.\n \"\"\"\n if beta < 0. or beta > 1.:\n raise Exception(\"Mixing beta must be in [0, 1]\")\n Cb = C.copy()\n homo = C[:, nocc - 1]\n lumo = C[:, nocc]\n Cb[:, nocc - 1] = (1. - beta) ** 0.5 * homo + beta ** 0.5 * lumo\n return Cb\n\n\ndef get_dm(C, nel):\n D = C[:, :nel]\n D = D @ D.T\n return D\n\n\ndef get_JK(is_fitted, g, D):\n if(is_fitted):\n # FINISH LATER\n X = np.einsum(\"Pls,ls->P\", g, D)\n J = np.einsum(\"mnP,P->mn\", np.swapaxes(g, 0, 2), X)\n Z = np.einsum(\"Pns,ls->Pnl\", g, D)\n K = np.einsum('mlP,Pnl->mn', np.swapaxes(g, 0, 2), Z)\n return (J, K)\n else:\n #J = np.einsum(\"pqrs,rs->pq\", g, D)\n #K = np.einsum(\"prqs,rs->pq\", g, D)\n J, K = jk.getJK_np_Dshift(g, D - np.diag(np.diag(D) * 0.5))\n return (J, K)\n\n\ndef get_JK_uhf(is_fitted, g, Ds):\n \"\"\"\n Ds = [Da, Db]\n \"\"\"\n Da, Db = Ds[0], Ds[1]\n Dtot = Da + Db\n if (is_fitted == True):\n X = np.einsum(\"Pls,ls->P\", g, Dtot)\n Jtot = np.einsum(\"mnP,P->mn\", np.swapaxes(g, 0, 2), X)\n Za = np.einsum(\"Pns,ls->Pnl\", g, Da)\n Ka = np.einsum('mlP,Pnl->mn', np.swapaxes(g, 0, 2), Za)\n Zb = np.einsum(\"Pns,ls->Pnl\", g, Db)\n Kb = np.einsum('mlP,Pnl->mn', np.swapaxes(g, 0, 2), Zb)\n return Jtot, Ka, Kb\n else:\n Jtot = np.einsum(\"pqrs, rs -> pq\", g, Dtot)\n Ka = np.einsum(\"prqs, rs -> pq\", g, Da)\n Kb = np.einsum(\"prqs, rs -> pq\", g, Db)\n return Jtot, Ka, Kb\n\n\ndef get_fock(H, g, D):\n J, K = get_JK(len(g.shape) == 3, g, D)\n return H + 2 * J - K\n\n\ndef diis_update(F_prev_list, r_prev_list):\n c = diis_solver(r_prev_list) # GET THE COEFFICIENTS!!\n out = 0 * F_prev_list[0]\n for i, element in enumerate(F_prev_list):\n out += c[i] * element\n return out\n\n\ndef oda_update(dF, dD, dE):\n \"\"\"\n ODA update:\n lbd = 0.5 - dE / E_deriv\n \"\"\"\n E_deriv = np.sum(dF * dD)\n lbd = 0.5 * (1. - dE / E_deriv)\n if lbd < 0 or lbd > 1:\n lbd = 0.9999 if dE < 0 else 1.e-4\n return lbd\n\n\ndef get_fock_uhf(H, g, Ds):\n \"\"\"\n DIIS update given previous Fock matrices and error vectors.\n Note that if there are less than two F's, return normal F.\n \"\"\"\n Jtot, Ka, Kb = get_JK_uhf(len(g.shape) == 3, g, Ds)\n return H + Jtot - Ka, H + Jtot - Kb\n\n\ndef diis_update_uhf(F_prev_lists, r_prev_lists):\n c = diis_solver_uhf(r_prev_lists[0], r_prev_lists[1])\n Fa = 0 * F_prev_lists[0][0]\n for i, element in enumerate(F_prev_lists[0]):\n Fa += c[i] * element\n Fb = 0 * F_prev_lists[0][0]\n for i, element in enumerate(F_prev_lists[1]):\n Fb += c[i] * element\n return Fa, Fb\n\n\ndef oda_update_uhf(dFs, dDs, dE):\n \"\"\"\n ODA update:\n lbd = 0.5 - dE / E_deriv\n \"\"\"\n if type(dFs) is not list:\n raise Exception(\"arg1 and arg2 are list of alpha/beta matrices.\")\n E_deriv = np.sum(dFs[0] * dDs[0] + dFs[1] * dDs[1])\n lbd = 0.5 * (1. - dE / E_deriv)\n if lbd < 0 or lbd > 1:\n lbd = 0.9999 if dE < 0 else 1.e-4\n return lbd\n\n\ndef diag(F, A):\n Fp = A.T @ F @ A\n eps, Cp = np.linalg.eigh(Fp)\n C = A @ Cp\n return eps, C\n\n\ndef get_SCF_err(S, D, F):\n err_v = S @ D @ F - F @ D @ S\n err = np.mean(err_v ** 2) ** 0.5\n return err, err_v\n\n\ndef get_SCF_energy(H, F, D, unrestricted):\n \"\"\"\n Calculates the energy.\n \"\"\"\n if unrestricted == True:\n if type(F) is not list or type(D) is not list:\n raise Exception(\"For UHF, F and D must have type list.\")\n Fa, Fb = F[0], F[1]\n Da, Db = D[0], D[1]\n Dtot = Da + Db\n return np.sum(Dtot * H + Da * Fa + Db * Fb) * 0.5\n else:\n return np.sum((H + F) * D)\n\n\ndef xform_2(H, A):\n \"\"\"\n Basis xform for 2-tensor\n \"\"\"\n if len(H.shape) != 2:\n raise Exception(\"Dimension error: arg1 should be a matrix\")\n\n return A.T @ H @ A\n\n\ndef xform_4(g, A):\n \"\"\"\n Basis xform for 4-tensor\n \"\"\"\n if len(g.shape) != 4:\n raise Exception(\"\"\"\n Dimension error: arg1 should be a four-tensor.\n Note that you should set is_fitted to be False.\n \"\"\")\n\n #return np.einsum(\"pi, qj, pqrs, rk, sl -> ijkl\", A, A, g, A, A, optimize=True)\n return xform.xform_4_np(g, A)\n"
] | [
[
"numpy.sum",
"numpy.diag",
"numpy.swapaxes",
"numpy.linalg.eigh",
"numpy.einsum",
"numpy.mean"
]
] |
APrigarina/open_model_zoo | [
"b1ff98b64a6222cf6b5f3838dc0271422250de95"
] | [
"tools/accuracy_checker/accuracy_checker/annotation_converters/cvat_multilabel_recognition.py"
] | [
"\"\"\"\nCopyright (c) 2018-2021 Intel Corporation\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\n\nimport numpy as np\nfrom .format_converter import FileBasedAnnotationConverter, ConverterReturn\nfrom ..representation import MultiLabelRecognitionAnnotation\nfrom ..utils import read_xml, check_file_existence\nfrom ..config import StringField, PathField, ConfigError\n\n\nclass CVATMultilabelAttributesRecognitionConverter(FileBasedAnnotationConverter):\n __provider__ = 'cvat_multilabel_binary_attributes_recognition'\n annotation_types = (MultiLabelRecognitionAnnotation, )\n\n @classmethod\n def parameters(cls):\n configuration_parameters = super().parameters()\n configuration_parameters.update({\n 'label': StringField(description='specific label for attribute collection'),\n 'images_dir': PathField(\n is_directory=True, optional=True,\n description='path to dataset images, used only for content existence check'\n )\n })\n return configuration_parameters\n\n def configure(self):\n super().configure()\n self.label = self.get_value_from_config('label')\n self.images_dir = self.get_value_from_config('images_dir') or self.annotation_file.parent\n\n def convert(self, check_content=False, progress_callback=None, progress_interval=100, **kwargs):\n annotation = read_xml(self.annotation_file)\n meta = annotation.find('meta')\n size = int(meta.find('task').find('size').text)\n label = self.select_label(meta)\n label_to_id = {attribute.find('name').text: idx for idx, attribute in enumerate(label.iter('attribute'))}\n num_attributes = len(label_to_id)\n\n annotations = []\n content_errors = None if not check_content else []\n for image_id, image in enumerate(annotation.iter('image')):\n identifier = image.attrib['name'].split('/')[-1]\n if check_content:\n if not check_file_existence(self.images_dir / identifier):\n content_errors.append('{}: does not exist'.format(self.images_dir / identifier))\n for bbox in image:\n if 'label' not in bbox.attrib.keys() or bbox.attrib['label'] != self.label:\n continue\n bbox_rect = [\n float(bbox.attrib['xtl']), float(bbox.attrib['ytl']),\n float(bbox.attrib['xbr']), float(bbox.attrib['ybr'])\n ]\n attributes = -np.ones(num_attributes)\n for attribute in bbox.iter('attribute'):\n attribute_name = attribute.attrib['name']\n attribute_label = label_to_id[attribute_name]\n attributes[attribute_label] = 1 if attribute.text == 'T' else 0\n attributes_annotation = MultiLabelRecognitionAnnotation(identifier, attributes)\n attributes_annotation.metadata['rect'] = bbox_rect\n annotations.append(attributes_annotation)\n\n if progress_callback is not None and image_id % progress_interval == 0:\n progress_callback(image_id * 100 / size)\n\n return ConverterReturn(annotations, self.generate_meta(label_to_id), content_errors)\n\n @staticmethod\n def generate_meta(attribute_values_mapping):\n return {'label_map': {value: key for key, value in attribute_values_mapping.items()}}\n\n def select_label(self, meta):\n label = [label for label in meta.iter('label') if label.find('name').text == self.label]\n if not label:\n raise ConfigError('{} does not present in annotation'.format(self.label))\n return label[0]\n"
] | [
[
"numpy.ones"
]
] |
AXATechLab/models | [
"c39ac760cfa6ce2339f5781f2a78d70db3ea5bb2"
] | [
"research/object_detection/meta_architectures/faster_rcnn_meta_arch_override_RPN.py"
] | [
"# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Faster R-CNN meta-architecture definition.\n\nGeneral tensorflow implementation of Faster R-CNN detection models.\n\nSee Faster R-CNN: Ren, Shaoqing, et al.\n\"Faster R-CNN: Towards real-time object detection with region proposal\nnetworks.\" Advances in neural information processing systems. 2015.\n\nWe allow for three modes: number_of_stages={1, 2, 3}. In case of 1 stage,\nall of the user facing methods (e.g., predict, postprocess, loss) can be used as\nif the model consisted only of the RPN, returning class agnostic proposals\n(these can be thought of as approximate detections with no associated class\ninformation). In case of 2 stages, proposals are computed, then passed\nthrough a second stage \"box classifier\" to yield (multi-class) detections.\nFinally, in case of 3 stages which is only used during eval, proposals are\ncomputed, then passed through a second stage \"box classifier\" that will compute\nrefined boxes and classes, and then features are pooled from the refined and\nnon-maximum suppressed boxes and are passed through the box classifier again. If\nnumber of stages is 3 during training it will be reduced to two automatically.\n\nImplementations of Faster R-CNN models must define a new\nFasterRCNNFeatureExtractor and override three methods: `preprocess`,\n`_extract_proposal_features` (the first stage of the model), and\n`_extract_box_classifier_features` (the second stage of the model). Optionally,\nthe `restore_fn` method can be overridden. See tests for an example.\n\nA few important notes:\n+ Batching conventions: We support batched inference and training where\nall images within a batch have the same resolution. Batch sizes are determined\ndynamically via the shape of the input tensors (rather than being specified\ndirectly as, e.g., a model constructor).\n\nA complication is that due to non-max suppression, we are not guaranteed to get\nthe same number of proposals from the first stage RPN (region proposal network)\nfor each image (though in practice, we should often get the same number of\nproposals). For this reason we pad to a max number of proposals per image\nwithin a batch. This `self.max_num_proposals` property is set to the\n`first_stage_max_proposals` parameter at inference time and the\n`second_stage_batch_size` at training time since we subsample the batch to\nbe sent through the box classifier during training.\n\nFor the second stage of the pipeline, we arrange the proposals for all images\nwithin the batch along a single batch dimension. For example, the input to\n_extract_box_classifier_features is a tensor of shape\n`[total_num_proposals, crop_height, crop_width, depth]` where\ntotal_num_proposals is batch_size * self.max_num_proposals. (And note that per\nthe above comment, a subset of these entries correspond to zero paddings.)\n\n+ Coordinate representations:\nFollowing the API (see model.DetectionModel definition), our outputs after\npostprocessing operations are always normalized boxes however, internally, we\nsometimes convert to absolute --- e.g. for loss computation. In particular,\nanchors and proposal_boxes are both represented as absolute coordinates.\n\nImages are resized in the `preprocess` method.\n\nThe Faster R-CNN meta architecture has two post-processing methods\n`_postprocess_rpn` which is applied after first stage and\n`_postprocess_box_classifier` which is applied after second stage. There are\nthree different ways post-processing can happen depending on number_of_stages\nconfigured in the meta architecture:\n\n1. When number_of_stages is 1:\n `_postprocess_rpn` is run as part of the `postprocess` method where\n true_image_shapes is used to clip proposals, perform non-max suppression and\n normalize them.\n2. When number of stages is 2:\n `_postprocess_rpn` is run as part of the `_predict_second_stage` method where\n `resized_image_shapes` is used to clip proposals, perform non-max suppression\n and normalize them. In this case `postprocess` method skips `_postprocess_rpn`\n and only runs `_postprocess_box_classifier` using `true_image_shapes` to clip\n detections, perform non-max suppression and normalize them.\n3. When number of stages is 3:\n `_postprocess_rpn` is run as part of the `_predict_second_stage` using\n `resized_image_shapes` to clip proposals, perform non-max suppression and\n normalize them. Subsequently, `_postprocess_box_classifier` is run as part of\n `_predict_third_stage` using `true_image_shapes` to clip detections, peform\n non-max suppression and normalize them. In this case, the `postprocess` method\n skips both `_postprocess_rpn` and `_postprocess_box_classifier`.\n\"\"\"\nfrom abc import abstractmethod\nfrom functools import partial\nimport tensorflow as tf\nimport json\nimport numpy as np\n\nfrom object_detection.anchor_generators import grid_anchor_generator\nfrom object_detection.builders import box_predictor_builder\nfrom object_detection.core import box_list\nfrom object_detection.core import box_list_ops\nfrom object_detection.core import box_predictor\nfrom object_detection.core import losses\nfrom object_detection.core import model\nfrom object_detection.core import post_processing\nfrom object_detection.core import standard_fields as fields\nfrom object_detection.core import target_assigner\nfrom object_detection.utils import ops\nfrom object_detection.utils import shape_utils\nimport sys # for debug\nsys.path.append(\"/notebooks/text-renderer/\")\nimport data_util\n\nslim = tf.contrib.slim\n\n\nclass FasterRCNNFeatureExtractor(object):\n \"\"\"Faster R-CNN Feature Extractor definition.\"\"\"\n\n def __init__(self,\n is_training,\n first_stage_features_stride,\n batch_norm_trainable=False,\n reuse_weights=None,\n weight_decay=0.0):\n \"\"\"Constructor.\n\n Args:\n is_training: A boolean indicating whether the training version of the\n computation graph should be constructed.\n first_stage_features_stride: Output stride of extracted RPN feature map.\n batch_norm_trainable: Whether to update batch norm parameters during\n training or not. When training with a relative large batch size\n (e.g. 8), it could be desirable to enable batch norm update.\n reuse_weights: Whether to reuse variables. Default is None.\n weight_decay: float weight decay for feature extractor (default: 0.0).\n \"\"\"\n self._is_training = is_training\n self._first_stage_features_stride = first_stage_features_stride\n self._train_batch_norm = (batch_norm_trainable and is_training)\n self._reuse_weights = reuse_weights\n self._weight_decay = weight_decay\n\n @abstractmethod\n def preprocess(self, resized_inputs):\n \"\"\"Feature-extractor specific preprocessing (minus image resizing).\"\"\"\n pass\n\n def extract_proposal_features(self, preprocessed_inputs, scope):\n \"\"\"Extracts first stage RPN features.\n\n This function is responsible for extracting feature maps from preprocessed\n images. These features are used by the region proposal network (RPN) to\n predict proposals.\n\n Args:\n preprocessed_inputs: A [batch, height, width, channels] float tensor\n representing a batch of images.\n scope: A scope name.\n\n Returns:\n rpn_feature_map: A tensor with shape [batch, height, width, depth]\n activations: A dictionary mapping activation tensor names to tensors.\n \"\"\"\n with tf.variable_scope(scope, values=[preprocessed_inputs]):\n return self._extract_proposal_features(preprocessed_inputs, scope)\n\n @abstractmethod\n def _extract_proposal_features(self, preprocessed_inputs, scope):\n \"\"\"Extracts first stage RPN features, to be overridden.\"\"\"\n pass\n\n def extract_box_classifier_features(self, proposal_feature_maps, scope):\n \"\"\"Extracts second stage box classifier features.\n\n Args:\n proposal_feature_maps: A 4-D float tensor with shape\n [batch_size * self.max_num_proposals, crop_height, crop_width, depth]\n representing the feature map cropped to each proposal.\n scope: A scope name.\n\n Returns:\n proposal_classifier_features: A 4-D float tensor with shape\n [batch_size * self.max_num_proposals, height, width, depth]\n representing box classifier features for each proposal.\n \"\"\"\n with tf.variable_scope(\n scope, values=[proposal_feature_maps], reuse=tf.AUTO_REUSE):\n return self._extract_box_classifier_features(proposal_feature_maps, scope)\n\n @abstractmethod\n def _extract_box_classifier_features(self, proposal_feature_maps, scope):\n \"\"\"Extracts second stage box classifier features, to be overridden.\"\"\"\n pass\n\n def restore_from_classification_checkpoint_fn(\n self,\n first_stage_feature_extractor_scope,\n second_stage_feature_extractor_scope):\n \"\"\"Returns a map of variables to load from a foreign checkpoint.\n\n Args:\n first_stage_feature_extractor_scope: A scope name for the first stage\n feature extractor.\n second_stage_feature_extractor_scope: A scope name for the second stage\n feature extractor.\n\n Returns:\n A dict mapping variable names (to load from a checkpoint) to variables in\n the model graph.\n \"\"\"\n variables_to_restore = {}\n for variable in tf.global_variables():\n for scope_name in [first_stage_feature_extractor_scope,\n second_stage_feature_extractor_scope]:\n if variable.op.name.startswith(scope_name):\n var_name = variable.op.name.replace(scope_name + '/', '')\n variables_to_restore[var_name] = variable\n return variables_to_restore\n\n\nclass FasterRCNNMetaArchOverrideRPN(model.DetectionModel):\n \"\"\"Faster R-CNN Meta-architecture definition.\"\"\"\n\n def __init__(self,\n is_training,\n num_classes,\n image_resizer_fn,\n feature_extractor,\n number_of_stages,\n first_stage_anchor_generator,\n first_stage_target_assigner,\n first_stage_atrous_rate,\n first_stage_box_predictor_arg_scope_fn,\n first_stage_box_predictor_kernel_size,\n first_stage_box_predictor_depth,\n first_stage_minibatch_size,\n first_stage_sampler,\n first_stage_nms_score_threshold,\n first_stage_nms_iou_threshold,\n first_stage_max_proposals,\n first_stage_proposals_path,\n first_stage_localization_loss_weight,\n first_stage_objectness_loss_weight,\n initial_crop_size,\n maxpool_kernel_size,\n maxpool_stride,\n second_stage_target_assigner,\n second_stage_mask_rcnn_box_predictor,\n second_stage_batch_size,\n second_stage_sampler,\n second_stage_non_max_suppression_fn,\n second_stage_score_conversion_fn,\n second_stage_localization_loss_weight,\n second_stage_classification_loss_weight,\n second_stage_classification_loss,\n second_stage_mask_prediction_loss_weight=1.0,\n hard_example_miner=None,\n parallel_iterations=16,\n add_summaries=True,\n use_matmul_crop_and_resize=False,\n clip_anchors_to_image=False):\n \"\"\"FasterRCNNMetaArch Constructor.\n\n Args:\n is_training: A boolean indicating whether the training version of the\n computation graph should be constructed.\n num_classes: Number of classes. Note that num_classes *does not*\n include the background category, so if groundtruth labels take values\n in {0, 1, .., K-1}, num_classes=K (and not K+1, even though the\n assigned classification targets can range from {0,... K}).\n image_resizer_fn: A callable for image resizing. This callable\n takes a rank-3 image tensor of shape [height, width, channels]\n (corresponding to a single image), an optional rank-3 instance mask\n tensor of shape [num_masks, height, width] and returns a resized rank-3\n image tensor, a resized mask tensor if one was provided in the input. In\n addition this callable must also return a 1-D tensor of the form\n [height, width, channels] containing the size of the true image, as the\n image resizer can perform zero padding. See protos/image_resizer.proto.\n feature_extractor: A FasterRCNNFeatureExtractor object.\n number_of_stages: An integer values taking values in {1, 2, 3}. If\n 1, the function will construct only the Region Proposal Network (RPN)\n part of the model. If 2, the function will perform box refinement and\n other auxiliary predictions all in the second stage. If 3, it will\n extract features from refined boxes and perform the auxiliary\n predictions on the non-maximum suppressed refined boxes.\n If is_training is true and the value of number_of_stages is 3, it is\n reduced to 2 since all the model heads are trained in parallel in second\n stage during training.\n first_stage_anchor_generator: An anchor_generator.AnchorGenerator object\n (note that currently we only support\n grid_anchor_generator.GridAnchorGenerator objects)\n first_stage_target_assigner: Target assigner to use for first stage of\n Faster R-CNN (RPN).\n first_stage_atrous_rate: A single integer indicating the atrous rate for\n the single convolution op which is applied to the `rpn_features_to_crop`\n tensor to obtain a tensor to be used for box prediction. Some feature\n extractors optionally allow for producing feature maps computed at\n denser resolutions. The atrous rate is used to compensate for the\n denser feature maps by using an effectively larger receptive field.\n (This should typically be set to 1).\n first_stage_box_predictor_arg_scope_fn: A function to construct tf-slim\n arg_scope for conv2d, separable_conv2d and fully_connected ops for the\n RPN box predictor.\n first_stage_box_predictor_kernel_size: Kernel size to use for the\n convolution op just prior to RPN box predictions.\n first_stage_box_predictor_depth: Output depth for the convolution op\n just prior to RPN box predictions.\n first_stage_minibatch_size: The \"batch size\" to use for computing the\n objectness and location loss of the region proposal network. This\n \"batch size\" refers to the number of anchors selected as contributing\n to the loss function for any given image within the image batch and is\n only called \"batch_size\" due to terminology from the Faster R-CNN paper.\n first_stage_sampler: Sampler to use for first stage loss (RPN loss).\n first_stage_nms_score_threshold: Score threshold for non max suppression\n for the Region Proposal Network (RPN). This value is expected to be in\n [0, 1] as it is applied directly after a softmax transformation. The\n recommended value for Faster R-CNN is 0.\n first_stage_nms_iou_threshold: The Intersection Over Union (IOU) threshold\n for performing Non-Max Suppression (NMS) on the boxes predicted by the\n Region Proposal Network (RPN).\n first_stage_max_proposals: Maximum number of boxes to retain after\n performing Non-Max Suppression (NMS) on the boxes predicted by the\n Region Proposal Network (RPN).\n first_stage_localization_loss_weight: A float\n first_stage_objectness_loss_weight: A float\n initial_crop_size: A single integer indicating the output size\n (width and height are set to be the same) of the initial bilinear\n interpolation based cropping during ROI pooling.\n maxpool_kernel_size: A single integer indicating the kernel size of the\n max pool op on the cropped feature map during ROI pooling.\n maxpool_stride: A single integer indicating the stride of the max pool\n op on the cropped feature map during ROI pooling.\n second_stage_target_assigner: Target assigner to use for second stage of\n Faster R-CNN. If the model is configured with multiple prediction heads,\n this target assigner is used to generate targets for all heads (with the\n correct `unmatched_class_label`).\n second_stage_mask_rcnn_box_predictor: Mask R-CNN box predictor to use for\n the second stage.\n second_stage_batch_size: The batch size used for computing the\n classification and refined location loss of the box classifier. This\n \"batch size\" refers to the number of proposals selected as contributing\n to the loss function for any given image within the image batch and is\n only called \"batch_size\" due to terminology from the Faster R-CNN paper.\n second_stage_sampler: Sampler to use for second stage loss (box\n classifier loss).\n second_stage_non_max_suppression_fn: batch_multiclass_non_max_suppression\n callable that takes `boxes`, `scores`, optional `clip_window` and\n optional (kwarg) `mask` inputs (with all other inputs already set)\n and returns a dictionary containing tensors with keys:\n `detection_boxes`, `detection_scores`, `detection_classes`,\n `num_detections`, and (optionally) `detection_masks`. See\n `post_processing.batch_multiclass_non_max_suppression` for the type and\n shape of these tensors.\n second_stage_score_conversion_fn: Callable elementwise nonlinearity\n (that takes tensors as inputs and returns tensors). This is usually\n used to convert logits to probabilities.\n second_stage_localization_loss_weight: A float indicating the scale factor\n for second stage localization loss.\n second_stage_classification_loss_weight: A float indicating the scale\n factor for second stage classification loss.\n second_stage_classification_loss: Classification loss used by the second\n stage classifier. Either losses.WeightedSigmoidClassificationLoss or\n losses.WeightedSoftmaxClassificationLoss.\n second_stage_mask_prediction_loss_weight: A float indicating the scale\n factor for second stage mask prediction loss. This is applicable only if\n second stage box predictor is configured to predict masks.\n hard_example_miner: A losses.HardExampleMiner object (can be None).\n parallel_iterations: (Optional) The number of iterations allowed to run\n in parallel for calls to tf.map_fn.\n add_summaries: boolean (default: True) controlling whether summary ops\n should be added to tensorflow graph.\n use_matmul_crop_and_resize: Force the use of matrix multiplication based\n crop and resize instead of standard tf.image.crop_and_resize while\n computing second stage input feature maps.\n clip_anchors_to_image: Normally, anchors generated for a given image size\n are pruned during training if they lie outside the image window. This\n option clips the anchors to be within the image instead of pruning.\n\n Raises:\n ValueError: If `second_stage_batch_size` > `first_stage_max_proposals` at\n training time.\n ValueError: If first_stage_anchor_generator is not of type\n grid_anchor_generator.GridAnchorGenerator.\n \"\"\"\n # TODO(rathodv): add_summaries is currently unused. Respect that directive\n # in the future.\n print(\"Running FasterRCNN with overriden RPN\")\n super(FasterRCNNMetaArchOverrideRPN, self).__init__(num_classes=num_classes)\n\n # There is no RPN in this implementation!\n if (number_of_stages==1):\n raise ValueError('Number of stages = 1 is not allowed for overriden RPN proposals')\n\n if is_training and second_stage_batch_size > first_stage_max_proposals:\n raise ValueError('second_stage_batch_size should be no greater than '\n 'first_stage_max_proposals.')\n if not isinstance(first_stage_anchor_generator,\n grid_anchor_generator.GridAnchorGenerator):\n raise ValueError('first_stage_anchor_generator must be of type '\n 'grid_anchor_generator.GridAnchorGenerator.')\n\n # Michele: Proposals that override the RPN\n first_stage_proposals_path = os.path.join(first_stage_proposals_path, '')\n xml_root = data_util.read_xml_batch(first_stage_proposals_path)[0]['annot']\n _, self.proposals = data_util.xml_to_numpy(None, xml_root)\n\n print(\"Shape of overriding proposals\",self.proposals.shape)\n\n self._is_training = is_training\n self._image_resizer_fn = image_resizer_fn\n self._feature_extractor = feature_extractor\n self._number_of_stages = number_of_stages\n\n self._proposal_target_assigner = first_stage_target_assigner\n self._detector_target_assigner = second_stage_target_assigner\n # Both proposal and detector target assigners use the same box coder\n self._box_coder = self._proposal_target_assigner.box_coder\n\n # (First stage) Region proposal network parameters\n self._first_stage_anchor_generator = first_stage_anchor_generator\n self._first_stage_atrous_rate = first_stage_atrous_rate\n self._first_stage_box_predictor_arg_scope_fn = (\n first_stage_box_predictor_arg_scope_fn)\n self._first_stage_box_predictor_kernel_size = (\n first_stage_box_predictor_kernel_size)\n self._first_stage_box_predictor_depth = first_stage_box_predictor_depth\n self._first_stage_minibatch_size = first_stage_minibatch_size\n self._first_stage_sampler = first_stage_sampler\n self._first_stage_box_predictor = (\n box_predictor_builder.build_convolutional_box_predictor(\n is_training=self._is_training,\n num_classes=1,\n conv_hyperparams_fn=self._first_stage_box_predictor_arg_scope_fn,\n use_dropout=False,\n dropout_keep_prob=1.0,\n box_code_size=self._box_coder.code_size,\n kernel_size=1,\n num_layers_before_predictor=0,\n min_depth=0,\n max_depth=0))\n\n self._first_stage_nms_score_threshold = first_stage_nms_score_threshold\n self._first_stage_nms_iou_threshold = first_stage_nms_iou_threshold\n self._first_stage_max_proposals = first_stage_max_proposals\n\n self._first_stage_localization_loss = (\n losses.WeightedSmoothL1LocalizationLoss())\n self._first_stage_objectness_loss = (\n losses.WeightedSoftmaxClassificationLoss())\n self._first_stage_loc_loss_weight = first_stage_localization_loss_weight\n self._first_stage_obj_loss_weight = first_stage_objectness_loss_weight\n\n # Per-region cropping parameters\n self._initial_crop_size = initial_crop_size\n self._maxpool_kernel_size = maxpool_kernel_size\n self._maxpool_stride = maxpool_stride\n\n self._mask_rcnn_box_predictor = second_stage_mask_rcnn_box_predictor\n\n self._second_stage_batch_size = second_stage_batch_size\n self._second_stage_sampler = second_stage_sampler\n\n self._second_stage_nms_fn = second_stage_non_max_suppression_fn\n self._second_stage_score_conversion_fn = second_stage_score_conversion_fn\n\n self._second_stage_localization_loss = (\n losses.WeightedSmoothL1LocalizationLoss())\n self._second_stage_classification_loss = second_stage_classification_loss\n self._second_stage_mask_loss = (\n losses.WeightedSigmoidClassificationLoss())\n self._second_stage_loc_loss_weight = second_stage_localization_loss_weight\n self._second_stage_cls_loss_weight = second_stage_classification_loss_weight\n self._second_stage_mask_loss_weight = (\n second_stage_mask_prediction_loss_weight)\n self._use_matmul_crop_and_resize = use_matmul_crop_and_resize\n self._hard_example_miner = hard_example_miner\n self._parallel_iterations = parallel_iterations\n\n self.clip_anchors_to_image = clip_anchors_to_image\n\n if self._number_of_stages <= 0 or self._number_of_stages > 3:\n raise ValueError('Number of stages should be a value in {1, 2, 3}.')\n\n @property\n def first_stage_feature_extractor_scope(self):\n return 'FirstStageFeatureExtractor'\n\n @property\n def second_stage_feature_extractor_scope(self):\n return 'SecondStageFeatureExtractor'\n\n @property\n def first_stage_box_predictor_scope(self):\n return 'FirstStageBoxPredictor'\n\n @property\n def second_stage_box_predictor_scope(self):\n return 'SecondStageBoxPredictor'\n\n @property\n def max_num_proposals(self):\n \"\"\"Max number of proposals (to pad to) for each image in the input batch.\n\n At training time, this is set to be the `second_stage_batch_size` if hard\n example miner is not configured, else it is set to\n `first_stage_max_proposals`. At inference time, this is always set to\n `first_stage_max_proposals`.\n\n Returns:\n A positive integer.\n \"\"\"\n if self._is_training and not self._hard_example_miner:\n return self._second_stage_batch_size\n #return self._first_stage_max_proposals\n return self.proposals.shape[1]\n\n @property\n def anchors(self):\n if not self._anchors:\n raise RuntimeError('anchors have not been constructed yet!')\n if not isinstance(self._anchors, box_list.BoxList):\n raise RuntimeError('anchors should be a BoxList object, but is not.')\n return self._anchors\n\n def preprocess(self, inputs):\n \"\"\"Feature-extractor specific preprocessing.\n\n See base class.\n\n For Faster R-CNN, we perform image resizing in the base class --- each\n class subclassing FasterRCNNMetaArch is responsible for any additional\n preprocessing (e.g., scaling pixel values to be in [-1, 1]).\n\n Args:\n inputs: a [batch, height_in, width_in, channels] float tensor representing\n a batch of images with values between 0 and 255.0.\n\n Returns:\n preprocessed_inputs: a [batch, height_out, width_out, channels] float\n tensor representing a batch of images.\n true_image_shapes: int32 tensor of shape [batch, 3] where each row is\n of the form [height, width, channels] indicating the shapes\n of true images in the resized images, as resized images can be padded\n with zeros.\n Raises:\n ValueError: if inputs tensor does not have type tf.float32\n \"\"\"\n if inputs.dtype is not tf.float32:\n raise ValueError('`preprocess` expects a tf.float32 tensor')\n with tf.name_scope('Preprocessor'):\n outputs = shape_utils.static_or_dynamic_map_fn(\n self._image_resizer_fn,\n elems=inputs,\n dtype=[tf.float32, tf.int32],\n parallel_iterations=self._parallel_iterations)\n resized_inputs = outputs[0]\n true_image_shapes = outputs[1]\n return (self._feature_extractor.preprocess(resized_inputs),\n true_image_shapes)\n\n def _compute_clip_window(self, image_shapes):\n \"\"\"Computes clip window for non max suppression based on image shapes.\n\n This function assumes that the clip window's left top corner is at (0, 0).\n\n Args:\n image_shapes: A 2-D int32 tensor of shape [batch_size, 3] containing\n shapes of images in the batch. Each row represents [height, width,\n channels] of an image.\n\n Returns:\n A 2-D float32 tensor of shape [batch_size, 4] containing the clip window\n for each image in the form [ymin, xmin, ymax, xmax].\n \"\"\"\n clip_heights = image_shapes[:, 0]\n clip_widths = image_shapes[:, 1]\n clip_window = tf.to_float(tf.stack([tf.zeros_like(clip_heights),\n tf.zeros_like(clip_heights),\n clip_heights, clip_widths], axis=1))\n return clip_window\n\n def predict(self, preprocessed_inputs, true_image_shapes):\n \"\"\"Predicts unpostprocessed tensors from input tensor.\n\n This function takes an input batch of images and runs it through the\n forward pass of the network to yield \"raw\" un-postprocessed predictions.\n If `number_of_stages` is 1, this function only returns first stage\n RPN predictions (un-postprocessed). Otherwise it returns both\n first stage RPN predictions as well as second stage box classifier\n predictions.\n\n Other remarks:\n + Anchor pruning vs. clipping: following the recommendation of the Faster\n R-CNN paper, we prune anchors that venture outside the image window at\n training time and clip anchors to the image window at inference time.\n + Proposal padding: as described at the top of the file, proposals are\n padded to self._max_num_proposals and flattened so that proposals from all\n images within the input batch are arranged along the same batch dimension.\n\n Args:\n preprocessed_inputs: a [batch, height, width, channels] float tensor\n representing a batch of images.\n true_image_shapes: int32 tensor of shape [batch, 3] where each row is\n of the form [height, width, channels] indicating the shapes\n of true images in the resized images, as resized images can be padded\n with zeros.\n\n Returns:\n prediction_dict: a dictionary holding \"raw\" prediction tensors:\n 1) rpn_box_predictor_features: A 4-D float32 tensor with shape\n [batch_size, height, width, depth] to be used for predicting proposal\n boxes and corresponding objectness scores.\n 2) rpn_features_to_crop: A 4-D float32 tensor with shape\n [batch_size, height, width, depth] representing image features to crop\n using the proposal boxes predicted by the RPN.\n 3) image_shape: a 1-D tensor of shape [4] representing the input\n image shape.\n 4) rpn_box_encodings: 3-D float tensor of shape\n [batch_size, num_anchors, self._box_coder.code_size] containing\n predicted boxes.\n 5) rpn_objectness_predictions_with_background: 3-D float tensor of shape\n [batch_size, num_anchors, 2] containing class\n predictions (logits) for each of the anchors. Note that this\n tensor *includes* background class predictions (at class index 0).\n 6) anchors: A 2-D tensor of shape [num_anchors, 4] representing anchors\n for the first stage RPN (in absolute coordinates). Note that\n `num_anchors` can differ depending on whether the model is created in\n training or inference mode.\n\n (and if number_of_stages > 1):\n 7) refined_box_encodings: a 3-D tensor with shape\n [total_num_proposals, num_classes, self._box_coder.code_size]\n representing predicted (final) refined box encodings, where\n total_num_proposals=batch_size*self._max_num_proposals. If using\n a shared box across classes the shape will instead be\n [total_num_proposals, 1, self._box_coder.code_size].\n 8) class_predictions_with_background: a 3-D tensor with shape\n [total_num_proposals, num_classes + 1] containing class\n predictions (logits) for each of the anchors, where\n total_num_proposals=batch_size*self._max_num_proposals.\n Note that this tensor *includes* background class predictions\n (at class index 0).\n 9) num_proposals: An int32 tensor of shape [batch_size] representing the\n number of proposals generated by the RPN. `num_proposals` allows us\n to keep track of which entries are to be treated as zero paddings and\n which are not since we always pad the number of proposals to be\n `self.max_num_proposals` for each image.\n 10) proposal_boxes: A float32 tensor of shape\n [batch_size, self.max_num_proposals, 4] representing\n decoded proposal bounding boxes in absolute coordinates.\n 11) mask_predictions: (optional) a 4-D tensor with shape\n [total_num_padded_proposals, num_classes, mask_height, mask_width]\n containing instance mask predictions.\n\n Raises:\n ValueError: If `predict` is called before `preprocess`.\n \"\"\"\n '''(rpn_box_predictor_features, rpn_features_to_crop, anchors_boxlist,\n image_shape) = self._extract_rpn_feature_maps(preprocessed_inputs)'''\n print(\"Predict running\")\n image_shape = tf.shape(preprocessed_inputs)\n rpn_features_to_crop, _ = self._feature_extractor.extract_proposal_features(\n preprocessed_inputs, scope=self.first_stage_feature_extractor_scope)\n #(rpn_box_encodings, rpn_objectness_predictions_with_background\n #) = self._predict_rpn_proposals(rpn_box_predictor_features)\n\n # The Faster R-CNN paper recommends pruning anchors that venture outside\n # the image window at training time and clipping at inference time.\n '''clip_window = tf.to_float(tf.stack([0, 0, image_shape[1], image_shape[2]]))\n if self._is_training:\n if self.clip_anchors_to_image:\n anchors_boxlist = box_list_ops.clip_to_window(\n anchors_boxlist, clip_window, filter_nonoverlapping=False)\n else:\n (rpn_box_encodings, rpn_objectness_predictions_with_background,\n anchors_boxlist) = self._remove_invalid_anchors_and_predictions(\n rpn_box_encodings, rpn_objectness_predictions_with_background,\n anchors_boxlist, clip_window)\n else:\n anchors_boxlist = box_list_ops.clip_to_window(\n anchors_boxlist, clip_window)\n\n self._anchors = anchors_boxlist'''\n prediction_dict = {\n #'rpn_box_predictor_features': rpn_box_predictor_features,\n 'rpn_features_to_crop': rpn_features_to_crop,\n 'image_shape': image_shape,\n #'rpn_box_encodings': rpn_box_encodings,\n #'rpn_objectness_predictions_with_background':\n #rpn_objectness_predictions_with_background,\n #'anchors': self._anchors.get()\n }\n\n if self._number_of_stages >= 2:\n '''prediction_dict.update(self._predict_second_stage(\n rpn_box_encodings,\n rpn_objectness_predictions_with_background,\n rpn_features_to_crop,\n self._anchors.get(), image_shape, true_image_shapes))'''\n prediction_dict.update(self._predict_second_stage(\n rpn_features_to_crop, image_shape, true_image_shapes))\n\n if self._number_of_stages == 3:\n prediction_dict = self._predict_third_stage(\n prediction_dict, true_image_shapes)\n\n return prediction_dict\n\n def _image_batch_shape_2d(self, image_batch_shape_1d):\n \"\"\"Takes a 1-D image batch shape tensor and converts it to a 2-D tensor.\n\n Example:\n If 1-D image batch shape tensor is [2, 300, 300, 3]. The corresponding 2-D\n image batch tensor would be [[300, 300, 3], [300, 300, 3]]\n\n Args:\n image_batch_shape_1d: 1-D tensor of the form [batch_size, height,\n width, channels].\n\n Returns:\n image_batch_shape_2d: 2-D tensor of shape [batch_size, 3] were each row is\n of the form [height, width, channels].\n \"\"\"\n return tf.tile(tf.expand_dims(image_batch_shape_1d[1:], 0),\n [image_batch_shape_1d[0], 1])\n\n '''def _predict_second_stage(self, rpn_box_encodings,\n rpn_objectness_predictions_with_background,\n rpn_features_to_crop,\n anchors,\n image_shape,\n true_image_shapes):\n \"\"\"Predicts the output tensors from second stage of Faster R-CNN.\n\n Args:\n rpn_box_encodings: 4-D float tensor of shape\n [batch_size, num_valid_anchors, self._box_coder.code_size] containing\n predicted boxes.\n rpn_objectness_predictions_with_background: 2-D float tensor of shape\n [batch_size, num_valid_anchors, 2] containing class\n predictions (logits) for each of the anchors. Note that this\n tensor *includes* background class predictions (at class index 0).\n rpn_features_to_crop: A 4-D float32 tensor with shape\n [batch_size, height, width, depth] representing image features to crop\n using the proposal boxes predicted by the RPN.\n anchors: 2-D float tensor of shape\n [num_anchors, self._box_coder.code_size].\n image_shape: A 1D int32 tensors of size [4] containing the image shape.\n true_image_shapes: int32 tensor of shape [batch, 3] where each row is\n of the form [height, width, channels] indicating the shapes\n of true images in the resized images, as resized images can be padded\n with zeros.\n\n Returns:\n prediction_dict: a dictionary holding \"raw\" prediction tensors:\n 1) refined_box_encodings: a 3-D tensor with shape\n [total_num_proposals, num_classes, self._box_coder.code_size]\n representing predicted (final) refined box encodings, where\n total_num_proposals=batch_size*self._max_num_proposals. If using a\n shared box across classes the shape will instead be\n [total_num_proposals, 1, self._box_coder.code_size].\n 2) class_predictions_with_background: a 3-D tensor with shape\n [total_num_proposals, num_classes + 1] containing class\n predictions (logits) for each of the anchors, where\n total_num_proposals=batch_size*self._max_num_proposals.\n Note that this tensor *includes* background class predictions\n (at class index 0).\n 3) num_proposals: An int32 tensor of shape [batch_size] representing the\n number of proposals generated by the RPN. `num_proposals` allows us\n to keep track of which entries are to be treated as zero paddings and\n which are not since we always pad the number of proposals to be\n `self.max_num_proposals` for each image.\n 4) proposal_boxes: A float32 tensor of shape\n [batch_size, self.max_num_proposals, 4] representing\n decoded proposal bounding boxes in absolute coordinates.\n 5) proposal_boxes_normalized: A float32 tensor of shape\n [batch_size, self.max_num_proposals, 4] representing decoded proposal\n bounding boxes in normalized coordinates. Can be used to override the\n boxes proposed by the RPN, thus enabling one to extract features and\n get box classification and prediction for externally selected areas\n of the image.\n 6) box_classifier_features: a 4-D float32 tensor representing the\n features for each proposal.\n \"\"\"\n image_shape_2d = self._image_batch_shape_2d(image_shape)\n proposal_boxes_normalized, _, num_proposals = self._postprocess_rpn(\n rpn_box_encodings, rpn_objectness_predictions_with_background,\n anchors, image_shape_2d, true_image_shapes)\n # Override RPN proposals\n # proposal_boxes_normalized = tf.Print(proposal_boxes_normalized, [], message=(\"original size= \" + str(proposal_boxes_normalized.shape[1])))\n # proposal_boxes_normalized = tf.constant(self.proposals, dtype='float32')\n\n flattened_proposal_feature_maps = (\n self._compute_second_stage_input_feature_maps(\n rpn_features_to_crop, proposal_boxes_normalized))\n\n box_classifier_features = (\n self._feature_extractor.extract_box_classifier_features(\n flattened_proposal_feature_maps,\n scope=self.second_stage_feature_extractor_scope))\n\n if self._mask_rcnn_box_predictor.is_keras_model:\n box_predictions = self._mask_rcnn_box_predictor(\n [box_classifier_features],\n prediction_stage=2)\n else:\n box_predictions = self._mask_rcnn_box_predictor.predict(\n [box_classifier_features],\n num_predictions_per_location=[1],\n scope=self.second_stage_box_predictor_scope,\n prediction_stage=2)\n\n refined_box_encodings = tf.squeeze(\n box_predictions[box_predictor.BOX_ENCODINGS],\n axis=1, name='all_refined_box_encodings')\n class_predictions_with_background = tf.squeeze(\n box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND],\n axis=1, name='all_class_predictions_with_background')\n\n absolute_proposal_boxes = ops.normalized_to_image_coordinates(\n proposal_boxes_normalized, image_shape, self._parallel_iterations)\n\n prediction_dict = {\n 'refined_box_encodings': refined_box_encodings,\n 'class_predictions_with_background':\n class_predictions_with_background,\n 'num_proposals': num_proposals,\n 'proposal_boxes': absolute_proposal_boxes,\n 'box_classifier_features': box_classifier_features,\n 'proposal_boxes_normalized': proposal_boxes_normalized,\n }\n\n return prediction_dict'''\n def _predict_second_stage(self, rpn_features_to_crop,\n image_shape,\n true_image_shapes):\n \"\"\"Predicts the output tensors from second stage of Faster R-CNN.\n\n Args:\n rpn_features_to_crop: A 4-D float32 tensor with shape\n [batch_size, height, width, depth] representing image features to crop\n using the proposal boxes predicted by the RPN.\n image_shape: A 1D int32 tensors of size [4] containing the image shape.\n true_image_shapes: int32 tensor of shape [batch, 3] where each row is\n of the form [height, width, channels] indicating the shapes\n of true images in the resized images, as resized images can be padded\n with zeros.\n\n Returns:\n prediction_dict: a dictionary holding \"raw\" prediction tensors:\n 1) refined_box_encodings: a 3-D tensor with shape\n [total_num_proposals, num_classes, self._box_coder.code_size]\n representing predicted (final) refined box encodings, where\n total_num_proposals=batch_size*self._max_num_proposals. If using a\n shared box across classes the shape will instead be\n [total_num_proposals, 1, self._box_coder.code_size].\n 2) class_predictions_with_background: a 3-D tensor with shape\n [total_num_proposals, num_classes + 1] containing class\n predictions (logits) for each of the anchors, where\n total_num_proposals=batch_size*self._max_num_proposals.\n Note that this tensor *includes* background class predictions\n (at class index 0).\n 3) num_proposals: An int32 tensor of shape [batch_size] representing the\n number of proposals generated by the RPN. `num_proposals` allows us\n to keep track of which entries are to be treated as zero paddings and\n which are not since we always pad the number of proposals to be\n `self.max_num_proposals` for each image.\n 4) proposal_boxes: A float32 tensor of shape\n [batch_size, self.max_num_proposals, 4] representing\n decoded proposal bounding boxes in absolute coordinates.\n 5) proposal_boxes_normalized: A float32 tensor of shape\n [batch_size, self.max_num_proposals, 4] representing decoded proposal\n bounding boxes in normalized coordinates. Can be used to override the\n boxes proposed by the RPN, thus enabling one to extract features and\n get box classification and prediction for externally selected areas\n of the image.\n 6) box_classifier_features: a 4-D float32 tensor representing the\n features for each proposal.\n \"\"\"\n image_shape_2d = self._image_batch_shape_2d(image_shape) # same as true shape\n '''proposal_boxes_normalized, _, num_proposals = self._postprocess_rpn(\n rpn_box_encodings, rpn_objectness_predictions_with_background,\n anchors, image_shape_2d, true_image_shapes)'''\n # Override RPN proposals\n # proposal_boxes_normalized = tf.Print(proposal_boxes_normalized, [], message=(\"original size= \" + str(proposal_boxes_normalized.shape[1])))\n # normalize proposal boxes\n\n def normalize_boxes(args):\n proposal_boxes_per_image = args[0]\n image_shape = args[1]\n normalized_boxes_per_image = box_list_ops.to_normalized_coordinates(\n box_list.BoxList(proposal_boxes_per_image), image_shape[0],\n image_shape[1], check_range=False).get()\n return normalized_boxes_per_image\n def to_absolute_boxes(args):\n proposal_boxes_per_image = args[0]\n image_shape = args[1]\n normalized_boxes_per_image = box_list_ops.to_absolute_coordinates(\n box_list.BoxList(proposal_boxes_per_image), image_shape[0],\n image_shape[1], check_range=False).get()\n return normalized_boxes_per_image \n \n proposal_boxes = tf.constant(self.proposals, dtype='float32')\n proposal_boxes = shape_utils.static_or_dynamic_map_fn(\n to_absolute_boxes, elems=[proposal_boxes, true_image_shapes], dtype=tf.float32)\n\n\n num_proposals = tf.constant([proposal_boxes.shape[1]], dtype='int32')\n # single_image_boxlist = box_list.BoxList(proposals_absolute)\n # proposal_boxes = self._sample_box_classifier_minibatch_single_image(single_image_boxlist, num_proposals, groundtruth_boxlists[0], \n # groundtruth_classes_with_background_list[0], groundtruth_weights_list[0]).get()\n # Minibatch sampling during training\n if self._is_training:\n proposal_boxes = tf.stop_gradient(proposal_boxes)\n if not self._hard_example_miner:\n\n placeholder_scores = tf.zeros((1, proposal_boxes.shape[1], 2))\n #proposal_boxes = tf.Print(proposal_boxes, [proposal_boxes], message=\"1: \")\n\n (groundtruth_boxlists, groundtruth_classes_with_background_list, _,\n groundtruth_weights_list\n ) = self._format_groundtruth_data(true_image_shapes)\n\n (proposal_boxes, _, num_proposals) = self._sample_box_classifier_batch(proposal_boxes, placeholder_scores, num_proposals, \n groundtruth_boxlists, groundtruth_classes_with_background_list, groundtruth_weights_list, true_image_shapes[0])\n #proposal_boxes = tf.Print(proposal_boxes, [proposal_boxes], message=\"2: \")\n\n #proposal_boxes = tf.Print(proposal_boxes, [], message=(\"Shape of pboxes \" + str(proposal_boxes.shape[1])))\n #num_proposals = tf.Print(num_proposals, [num_proposals])\n \n proposal_boxes_normalized = shape_utils.static_or_dynamic_map_fn(\n normalize_boxes, elems=[proposal_boxes, true_image_shapes], dtype=tf.float32)\n #proposal_boxes_normalized = tf.Print(proposal_boxes_normalized, [proposal_boxes_normalized], message=\"3: \")\n\n #proposal_boxes_normalized = tf.Print(proposal_boxes_normalized, [tf.shape(proposal_boxes_normalized)], message=(\"Shape of pboxes \"))\n\n\n #proposal_boxes_normalized = tf.constant(self.proposals[:, 0:64, :], dtype='float32')\n #proposal_boxes_normalized = tf.Print(proposal_boxes_normalized, [], message=(\"Shape of minibatch \" + str(proposal_boxes_normalized.shape[1])))\n\n flattened_proposal_feature_maps = (\n self._compute_second_stage_input_feature_maps(\n rpn_features_to_crop, proposal_boxes_normalized))\n #flattened_proposal_feature_maps = tf.stop_gradient(flattened_proposal_feature_maps)\n #flattened_proposal_feature_maps = tf.Print(flattened_proposal_feature_maps, [], message=(\"Cropped props : \" + str(flattened_proposal_feature_maps.shape)))\n\n box_classifier_features = (\n self._feature_extractor.extract_box_classifier_features(\n flattened_proposal_feature_maps,\n scope=self.second_stage_feature_extractor_scope))\n\n if self._mask_rcnn_box_predictor.is_keras_model:\n box_predictions = self._mask_rcnn_box_predictor(\n [box_classifier_features],\n prediction_stage=2)\n else:\n box_predictions = self._mask_rcnn_box_predictor.predict(\n [box_classifier_features],\n num_predictions_per_location=[1],\n scope=self.second_stage_box_predictor_scope,\n prediction_stage=2)\n\n refined_box_encodings = tf.squeeze(\n box_predictions[box_predictor.BOX_ENCODINGS],\n axis=1, name='all_refined_box_encodings')\n class_predictions_with_background = tf.squeeze(\n box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND],\n axis=1, name='all_class_predictions_with_background')\n\n absolute_proposal_boxes = ops.normalized_to_image_coordinates(\n proposal_boxes_normalized, image_shape, self._parallel_iterations)\n\n prediction_dict = {\n 'refined_box_encodings': refined_box_encodings,\n 'class_predictions_with_background':\n class_predictions_with_background,\n 'num_proposals': num_proposals,\n 'proposal_boxes': absolute_proposal_boxes,\n 'box_classifier_features': box_classifier_features,\n 'proposal_boxes_normalized': proposal_boxes_normalized,\n }\n\n return prediction_dict\n\n def _predict_third_stage(self, prediction_dict, image_shapes):\n \"\"\"Predicts non-box, non-class outputs using refined detections.\n\n For training, masks as predicted directly on the box_classifier_features,\n which are region-features from the initial anchor boxes.\n For inference, this happens after calling the post-processing stage, such\n that masks are only calculated for the top scored boxes.\n\n Args:\n prediction_dict: a dictionary holding \"raw\" prediction tensors:\n 1) refined_box_encodings: a 3-D tensor with shape\n [total_num_proposals, num_classes, self._box_coder.code_size]\n representing predicted (final) refined box encodings, where\n total_num_proposals=batch_size*self._max_num_proposals. If using a\n shared box across classes the shape will instead be\n [total_num_proposals, 1, self._box_coder.code_size].\n 2) class_predictions_with_background: a 3-D tensor with shape\n [total_num_proposals, num_classes + 1] containing class\n predictions (logits) for each of the anchors, where\n total_num_proposals=batch_size*self._max_num_proposals.\n Note that this tensor *includes* background class predictions\n (at class index 0).\n 3) num_proposals: An int32 tensor of shape [batch_size] representing the\n number of proposals generated by the RPN. `num_proposals` allows us\n to keep track of which entries are to be treated as zero paddings and\n which are not since we always pad the number of proposals to be\n `self.max_num_proposals` for each image.\n 4) proposal_boxes: A float32 tensor of shape\n [batch_size, self.max_num_proposals, 4] representing\n decoded proposal bounding boxes in absolute coordinates.\n 5) box_classifier_features: a 4-D float32 tensor representing the\n features for each proposal.\n image_shapes: A 2-D int32 tensors of shape [batch_size, 3] containing\n shapes of images in the batch.\n\n Returns:\n prediction_dict: a dictionary that in addition to the input predictions\n does hold the following predictions as well:\n 1) mask_predictions: a 4-D tensor with shape\n [batch_size, max_detection, mask_height, mask_width] containing\n instance mask predictions.\n \"\"\"\n if self._is_training:\n curr_box_classifier_features = prediction_dict['box_classifier_features']\n detection_classes = prediction_dict['class_predictions_with_background']\n if self._mask_rcnn_box_predictor.is_keras_model:\n mask_predictions = self._mask_rcnn_box_predictor(\n [curr_box_classifier_features],\n prediction_stage=3)\n else:\n mask_predictions = self._mask_rcnn_box_predictor.predict(\n [curr_box_classifier_features],\n num_predictions_per_location=[1],\n scope=self.second_stage_box_predictor_scope,\n prediction_stage=3)\n prediction_dict['mask_predictions'] = tf.squeeze(mask_predictions[\n box_predictor.MASK_PREDICTIONS], axis=1)\n else:\n detections_dict = self._postprocess_box_classifier(\n prediction_dict['refined_box_encodings'],\n prediction_dict['class_predictions_with_background'],\n prediction_dict['proposal_boxes'],\n prediction_dict['num_proposals'],\n image_shapes)\n prediction_dict.update(detections_dict)\n detection_boxes = detections_dict[\n fields.DetectionResultFields.detection_boxes]\n detection_classes = detections_dict[\n fields.DetectionResultFields.detection_classes]\n rpn_features_to_crop = prediction_dict['rpn_features_to_crop']\n batch_size = tf.shape(detection_boxes)[0]\n max_detection = tf.shape(detection_boxes)[1]\n flattened_detected_feature_maps = (\n self._compute_second_stage_input_feature_maps(\n rpn_features_to_crop, detection_boxes))\n curr_box_classifier_features = (\n self._feature_extractor.extract_box_classifier_features(\n flattened_detected_feature_maps,\n scope=self.second_stage_feature_extractor_scope))\n\n if self._mask_rcnn_box_predictor.is_keras_model:\n mask_predictions = self._mask_rcnn_box_predictor(\n [curr_box_classifier_features],\n prediction_stage=3)\n else:\n mask_predictions = self._mask_rcnn_box_predictor.predict(\n [curr_box_classifier_features],\n num_predictions_per_location=[1],\n scope=self.second_stage_box_predictor_scope,\n prediction_stage=3)\n\n detection_masks = tf.squeeze(mask_predictions[\n box_predictor.MASK_PREDICTIONS], axis=1)\n\n _, num_classes, mask_height, mask_width = (\n detection_masks.get_shape().as_list())\n _, max_detection = detection_classes.get_shape().as_list()\n if num_classes > 1:\n detection_masks = self._gather_instance_masks(\n detection_masks, detection_classes)\n\n prediction_dict[fields.DetectionResultFields.detection_masks] = (\n tf.reshape(detection_masks,\n [batch_size, max_detection, mask_height, mask_width]))\n\n return prediction_dict\n\n def _gather_instance_masks(self, instance_masks, classes):\n \"\"\"Gathers the masks that correspond to classes.\n\n Args:\n instance_masks: A 4-D float32 tensor with shape\n [K, num_classes, mask_height, mask_width].\n classes: A 2-D int32 tensor with shape [batch_size, max_detection].\n\n Returns:\n masks: a 3-D float32 tensor with shape [K, mask_height, mask_width].\n \"\"\"\n _, num_classes, height, width = instance_masks.get_shape().as_list()\n k = tf.shape(instance_masks)[0]\n instance_masks = tf.reshape(instance_masks, [-1, height, width])\n classes = tf.to_int32(tf.reshape(classes, [-1]))\n gather_idx = tf.range(k) * num_classes + classes\n return tf.gather(instance_masks, gather_idx)\n\n def _extract_rpn_feature_maps(self, preprocessed_inputs):\n \"\"\"Extracts RPN features.\n\n This function extracts two feature maps: a feature map to be directly\n fed to a box predictor (to predict location and objectness scores for\n proposals) and a feature map from which to crop regions which will then\n be sent to the second stage box classifier.\n\n Args:\n preprocessed_inputs: a [batch, height, width, channels] image tensor.\n\n Returns:\n rpn_box_predictor_features: A 4-D float32 tensor with shape\n [batch, height, width, depth] to be used for predicting proposal boxes\n and corresponding objectness scores.\n rpn_features_to_crop: A 4-D float32 tensor with shape\n [batch, height, width, depth] representing image features to crop using\n the proposals boxes.\n anchors: A BoxList representing anchors (for the RPN) in\n absolute coordinates.\n image_shape: A 1-D tensor representing the input image shape.\n \"\"\"\n image_shape = tf.shape(preprocessed_inputs)\n rpn_features_to_crop, _ = self._feature_extractor.extract_proposal_features(\n preprocessed_inputs, scope=self.first_stage_feature_extractor_scope)\n\n feature_map_shape = tf.shape(rpn_features_to_crop)\n anchors = box_list_ops.concatenate(\n self._first_stage_anchor_generator.generate([(feature_map_shape[1],\n feature_map_shape[2])]))\n\n with slim.arg_scope(self._first_stage_box_predictor_arg_scope_fn()):\n kernel_size = self._first_stage_box_predictor_kernel_size\n rpn_box_predictor_features = slim.conv2d(\n rpn_features_to_crop,\n self._first_stage_box_predictor_depth,\n kernel_size=[kernel_size, kernel_size],\n rate=self._first_stage_atrous_rate,\n activation_fn=tf.nn.relu6)\n return (rpn_box_predictor_features, rpn_features_to_crop,\n anchors, image_shape)\n\n def _predict_rpn_proposals(self, rpn_box_predictor_features):\n \"\"\"Adds box predictors to RPN feature map to predict proposals.\n\n Note resulting tensors will not have been postprocessed.\n\n Args:\n rpn_box_predictor_features: A 4-D float32 tensor with shape\n [batch, height, width, depth] to be used for predicting proposal boxes\n and corresponding objectness scores.\n\n Returns:\n box_encodings: 3-D float tensor of shape\n [batch_size, num_anchors, self._box_coder.code_size] containing\n predicted boxes.\n objectness_predictions_with_background: 3-D float tensor of shape\n [batch_size, num_anchors, 2] containing class\n predictions (logits) for each of the anchors. Note that this\n tensor *includes* background class predictions (at class index 0).\n\n Raises:\n RuntimeError: if the anchor generator generates anchors corresponding to\n multiple feature maps. We currently assume that a single feature map\n is generated for the RPN.\n \"\"\"\n num_anchors_per_location = (\n self._first_stage_anchor_generator.num_anchors_per_location())\n if len(num_anchors_per_location) != 1:\n raise RuntimeError('anchor_generator is expected to generate anchors '\n 'corresponding to a single feature map.')\n if self._first_stage_box_predictor.is_keras_model:\n box_predictions = self._first_stage_box_predictor(\n [rpn_box_predictor_features])\n else:\n box_predictions = self._first_stage_box_predictor.predict(\n [rpn_box_predictor_features],\n num_anchors_per_location,\n scope=self.first_stage_box_predictor_scope)\n\n box_encodings = tf.concat(\n box_predictions[box_predictor.BOX_ENCODINGS], axis=1)\n objectness_predictions_with_background = tf.concat(\n box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND],\n axis=1)\n return (tf.squeeze(box_encodings, axis=2),\n objectness_predictions_with_background)\n\n def _remove_invalid_anchors_and_predictions(\n self,\n box_encodings,\n objectness_predictions_with_background,\n anchors_boxlist,\n clip_window):\n \"\"\"Removes anchors that (partially) fall outside an image.\n\n Also removes associated box encodings and objectness predictions.\n\n Args:\n box_encodings: 3-D float tensor of shape\n [batch_size, num_anchors, self._box_coder.code_size] containing\n predicted boxes.\n objectness_predictions_with_background: 3-D float tensor of shape\n [batch_size, num_anchors, 2] containing class\n predictions (logits) for each of the anchors. Note that this\n tensor *includes* background class predictions (at class index 0).\n anchors_boxlist: A BoxList representing num_anchors anchors (for the RPN)\n in absolute coordinates.\n clip_window: a 1-D tensor representing the [ymin, xmin, ymax, xmax]\n extent of the window to clip/prune to.\n\n Returns:\n box_encodings: 4-D float tensor of shape\n [batch_size, num_valid_anchors, self._box_coder.code_size] containing\n predicted boxes, where num_valid_anchors <= num_anchors\n objectness_predictions_with_background: 2-D float tensor of shape\n [batch_size, num_valid_anchors, 2] containing class\n predictions (logits) for each of the anchors, where\n num_valid_anchors <= num_anchors. Note that this\n tensor *includes* background class predictions (at class index 0).\n anchors: A BoxList representing num_valid_anchors anchors (for the RPN) in\n absolute coordinates.\n \"\"\"\n pruned_anchors_boxlist, keep_indices = box_list_ops.prune_outside_window(\n anchors_boxlist, clip_window)\n def _batch_gather_kept_indices(predictions_tensor):\n return shape_utils.static_or_dynamic_map_fn(\n partial(tf.gather, indices=keep_indices),\n elems=predictions_tensor,\n dtype=tf.float32,\n parallel_iterations=self._parallel_iterations,\n back_prop=True)\n return (_batch_gather_kept_indices(box_encodings),\n _batch_gather_kept_indices(objectness_predictions_with_background),\n pruned_anchors_boxlist)\n\n def _flatten_first_two_dimensions(self, inputs):\n \"\"\"Flattens `K-d` tensor along batch dimension to be a `(K-1)-d` tensor.\n\n Converts `inputs` with shape [A, B, ..., depth] into a tensor of shape\n [A * B, ..., depth].\n\n Args:\n inputs: A float tensor with shape [A, B, ..., depth]. Note that the first\n two and last dimensions must be statically defined.\n Returns:\n A float tensor with shape [A * B, ..., depth] (where the first and last\n dimension are statically defined.\n \"\"\"\n combined_shape = shape_utils.combined_static_and_dynamic_shape(inputs)\n flattened_shape = tf.stack([combined_shape[0] * combined_shape[1]] +\n combined_shape[2:])\n return tf.reshape(inputs, flattened_shape)\n\n def postprocess(self, prediction_dict, true_image_shapes):\n \"\"\"Convert prediction tensors to final detections.\n\n This function converts raw predictions tensors to final detection results.\n See base class for output format conventions. Note also that by default,\n scores are to be interpreted as logits, but if a score_converter is used,\n then scores are remapped (and may thus have a different interpretation).\n\n If number_of_stages=1, the returned results represent proposals from the\n first stage RPN and are padded to have self.max_num_proposals for each\n image; otherwise, the results can be interpreted as multiclass detections\n from the full two-stage model and are padded to self._max_detections.\n\n Args:\n prediction_dict: a dictionary holding prediction tensors (see the\n documentation for the predict method. If number_of_stages=1, we\n expect prediction_dict to contain `rpn_box_encodings`,\n `rpn_objectness_predictions_with_background`, `rpn_features_to_crop`,\n and `anchors` fields. Otherwise we expect prediction_dict to\n additionally contain `refined_box_encodings`,\n `class_predictions_with_background`, `num_proposals`,\n `proposal_boxes` and, optionally, `mask_predictions` fields.\n true_image_shapes: int32 tensor of shape [batch, 3] where each row is\n of the form [height, width, channels] indicating the shapes\n of true images in the resized images, as resized images can be padded\n with zeros.\n\n Returns:\n detections: a dictionary containing the following fields\n detection_boxes: [batch, max_detection, 4]\n detection_scores: [batch, max_detections]\n detection_classes: [batch, max_detections]\n (this entry is only created if rpn_mode=False)\n num_detections: [batch]\n\n Raises:\n ValueError: If `predict` is called before `preprocess`.\n \"\"\"\n\n with tf.name_scope('FirstStagePostprocessor'):\n if self._number_of_stages == 1: \n # Michele's addition\n\n proposal_boxes, proposal_scores, num_proposals = self._postprocess_rpn(\n prediction_dict['rpn_box_encodings'],\n prediction_dict['rpn_objectness_predictions_with_background'],\n prediction_dict['anchors'],\n true_image_shapes,\n true_image_shapes)\n return {\n fields.DetectionResultFields.detection_boxes: proposal_boxes,\n fields.DetectionResultFields.detection_scores: proposal_scores,\n fields.DetectionResultFields.num_detections:\n tf.to_float(num_proposals),\n }\n\n # TODO(jrru): Remove mask_predictions from _post_process_box_classifier.\n with tf.name_scope('SecondStagePostprocessor'):\n if (self._number_of_stages == 2 or\n (self._number_of_stages == 3 and self._is_training)):\n mask_predictions = prediction_dict.get(box_predictor.MASK_PREDICTIONS)\n detections_dict = self._postprocess_box_classifier(\n prediction_dict['refined_box_encodings'],\n prediction_dict['class_predictions_with_background'],\n prediction_dict['proposal_boxes'],\n prediction_dict['num_proposals'],\n true_image_shapes,\n mask_predictions=mask_predictions)\n return detections_dict\n\n if self._number_of_stages == 3:\n # Post processing is already performed in 3rd stage. We need to transfer\n # postprocessed tensors from `prediction_dict` to `detections_dict`.\n detections_dict = {}\n for key in prediction_dict:\n if key == fields.DetectionResultFields.detection_masks:\n detections_dict[key] = tf.sigmoid(prediction_dict[key])\n elif 'detection' in key:\n detections_dict[key] = prediction_dict[key]\n return detections_dict\n\n def _postprocess_rpn(self,\n rpn_box_encodings_batch,\n rpn_objectness_predictions_with_background_batch,\n anchors,\n image_shapes,\n true_image_shapes):\n \"\"\"Converts first stage prediction tensors from the RPN to proposals.\n\n This function decodes the raw RPN predictions, runs non-max suppression\n on the result.\n\n Note that the behavior of this function is slightly modified during\n training --- specifically, we stop the gradient from passing through the\n proposal boxes and we only return a balanced sampled subset of proposals\n with size `second_stage_batch_size`.\n\n Args:\n rpn_box_encodings_batch: A 3-D float32 tensor of shape\n [batch_size, num_anchors, self._box_coder.code_size] containing\n predicted proposal box encodings.\n rpn_objectness_predictions_with_background_batch: A 3-D float tensor of\n shape [batch_size, num_anchors, 2] containing objectness predictions\n (logits) for each of the anchors with 0 corresponding to background\n and 1 corresponding to object.\n anchors: A 2-D tensor of shape [num_anchors, 4] representing anchors\n for the first stage RPN. Note that `num_anchors` can differ depending\n on whether the model is created in training or inference mode.\n image_shapes: A 2-D tensor of shape [batch, 3] containing the shapes of\n images in the batch.\n true_image_shapes: int32 tensor of shape [batch, 3] where each row is\n of the form [height, width, channels] indicating the shapes\n of true images in the resized images, as resized images can be padded\n with zeros.\n\n Returns:\n proposal_boxes: A float tensor with shape\n [batch_size, max_num_proposals, 4] representing the (potentially zero\n padded) proposal boxes for all images in the batch. These boxes are\n represented as normalized coordinates.\n proposal_scores: A float tensor with shape\n [batch_size, max_num_proposals] representing the (potentially zero\n padded) proposal objectness scores for all images in the batch.\n num_proposals: A Tensor of type `int32`. A 1-D tensor of shape [batch]\n representing the number of proposals predicted for each image in\n the batch.\n \"\"\"\n rpn_box_encodings_batch = tf.expand_dims(rpn_box_encodings_batch, axis=2)\n rpn_encodings_shape = shape_utils.combined_static_and_dynamic_shape(\n rpn_box_encodings_batch)\n tiled_anchor_boxes = tf.tile(\n tf.expand_dims(anchors, 0), [rpn_encodings_shape[0], 1, 1])\n proposal_boxes = self._batch_decode_boxes(rpn_box_encodings_batch,\n tiled_anchor_boxes)\n proposal_boxes = tf.squeeze(proposal_boxes, axis=2)\n rpn_objectness_softmax_without_background = tf.nn.softmax(\n rpn_objectness_predictions_with_background_batch)[:, :, 1]\n clip_window = self._compute_clip_window(image_shapes)\n (proposal_boxes, proposal_scores, _, _, _,\n num_proposals) = post_processing.batch_multiclass_non_max_suppression(\n tf.expand_dims(proposal_boxes, axis=2),\n tf.expand_dims(rpn_objectness_softmax_without_background,\n axis=2),\n self._first_stage_nms_score_threshold,\n self._first_stage_nms_iou_threshold,\n self._first_stage_max_proposals,\n self._first_stage_max_proposals,\n clip_window=clip_window)\n if self._is_training:\n proposal_boxes = tf.stop_gradient(proposal_boxes)\n if not self._hard_example_miner:\n (groundtruth_boxlists, groundtruth_classes_with_background_list, _,\n groundtruth_weights_list\n ) = self._format_groundtruth_data(true_image_shapes)\n (proposal_boxes, proposal_scores,\n num_proposals) = self._sample_box_classifier_batch(\n proposal_boxes, proposal_scores, num_proposals,\n groundtruth_boxlists, groundtruth_classes_with_background_list,\n groundtruth_weights_list)\n # normalize proposal boxes\n def normalize_boxes(args):\n proposal_boxes_per_image = args[0]\n image_shape = args[1]\n normalized_boxes_per_image = box_list_ops.to_normalized_coordinates(\n box_list.BoxList(proposal_boxes_per_image), image_shape[0],\n image_shape[1], check_range=False).get()\n return normalized_boxes_per_image\n normalized_proposal_boxes = shape_utils.static_or_dynamic_map_fn(\n normalize_boxes, elems=[proposal_boxes, image_shapes], dtype=tf.float32)\n return normalized_proposal_boxes, proposal_scores, num_proposals\n\n def _sample_box_classifier_batch(\n self,\n proposal_boxes,\n proposal_scores,\n num_proposals,\n groundtruth_boxlists,\n groundtruth_classes_with_background_list,\n groundtruth_weights_list,\n debug=None):\n \"\"\"Samples a minibatch for second stage.\n\n Args:\n proposal_boxes: A float tensor with shape\n [batch_size, num_proposals, 4] representing the (potentially zero\n padded) proposal boxes for all images in the batch. These boxes are\n represented in absolute coordinates.\n proposal_scores: A float tensor with shape\n [batch_size, num_proposals] representing the (potentially zero\n padded) proposal objectness scores for all images in the batch.\n num_proposals: A Tensor of type `int32`. A 1-D tensor of shape [batch]\n representing the number of proposals predicted for each image in\n the batch.\n groundtruth_boxlists: A list of BoxLists containing (absolute) coordinates\n of the groundtruth boxes.\n groundtruth_classes_with_background_list: A list of 2-D one-hot\n (or k-hot) tensors of shape [num_boxes, num_classes+1] containing the\n class targets with the 0th index assumed to map to the background class.\n groundtruth_weights_list: A list of 1-D tensors of shape [num_boxes]\n indicating the weight associated with the groundtruth boxes.\n\n Returns:\n proposal_boxes: A float tensor with shape\n [batch_size, second_stage_batch_size, 4] representing the (potentially\n zero padded) proposal boxes for all images in the batch. These boxes\n are represented in absolute coordinates.\n proposal_scores: A float tensor with shape\n [batch_size, second_stage_batch_size] representing the (potentially zero\n padded) proposal objectness scores for all images in the batch.\n num_proposals: A Tensor of type `int32`. A 1-D tensor of shape [batch]\n representing the number of proposals predicted for each image in\n the batch.\n \"\"\"\n single_image_proposal_box_sample = []\n single_image_proposal_score_sample = []\n single_image_num_proposals_sample = []\n for (single_image_proposal_boxes,\n single_image_proposal_scores,\n single_image_num_proposals,\n single_image_groundtruth_boxlist,\n single_image_groundtruth_classes_with_background,\n single_image_groundtruth_weights) in zip(\n tf.unstack(proposal_boxes),\n tf.unstack(proposal_scores),\n tf.unstack(num_proposals),\n groundtruth_boxlists,\n groundtruth_classes_with_background_list,\n groundtruth_weights_list):\n single_image_boxlist = box_list.BoxList(single_image_proposal_boxes)\n single_image_boxlist.add_field(fields.BoxListFields.scores,\n single_image_proposal_scores)\n sampled_boxlist = self._sample_box_classifier_minibatch_single_image(\n single_image_boxlist,\n single_image_num_proposals,\n single_image_groundtruth_boxlist,\n single_image_groundtruth_classes_with_background,\n single_image_groundtruth_weights,\n debug)\n # sampled_boxlist.set(tf.Print(sampled_boxlist.get(), [sampled_boxlist.num_boxes()], message=\"sample size \"))\n\n sampled_padded_boxlist = box_list_ops.pad_or_clip_box_list(\n sampled_boxlist,\n num_boxes=self._second_stage_batch_size)\n single_image_num_proposals_sample.append(tf.minimum(\n sampled_boxlist.num_boxes(),\n self._second_stage_batch_size))\n bb = sampled_padded_boxlist.get()\n #bb = tf.Print(bb, [single_image_groundtruth_boxlist.num_boxes()], message=(\"After padding and num of GT\" + str(bb.shape)))\n single_image_proposal_box_sample.append(bb)\n single_image_proposal_score_sample.append(\n sampled_padded_boxlist.get_field(fields.BoxListFields.scores))\n return (tf.stack(single_image_proposal_box_sample),\n tf.stack(single_image_proposal_score_sample),\n tf.stack(single_image_num_proposals_sample))\n\n def _format_groundtruth_data(self, true_image_shapes, stage='detection'):\n \"\"\"Helper function for preparing groundtruth data for target assignment.\n\n In order to be consistent with the model.DetectionModel interface,\n groundtruth boxes are specified in normalized coordinates and classes are\n specified as label indices with no assumed background category. To prepare\n for target assignment, we:\n 1) convert boxes to absolute coordinates,\n 2) add a background class at class index 0\n 3) groundtruth instance masks, if available, are resized to match\n image_shape.\n\n Args:\n true_image_shapes: int32 tensor of shape [batch, 3] where each row is\n of the form [height, width, channels] indicating the shapes\n of true images in the resized images, as resized images can be padded\n with zeros.\n\n Returns:\n groundtruth_boxlists: A list of BoxLists containing (absolute) coordinates\n of the groundtruth boxes.\n groundtruth_classes_with_background_list: A list of 2-D one-hot\n (or k-hot) tensors of shape [num_boxes, num_classes+1] containing the\n class targets with the 0th index assumed to map to the background class.\n groundtruth_masks_list: If present, a list of 3-D tf.float32 tensors of\n shape [num_boxes, image_height, image_width] containing instance masks.\n This is set to None if no masks exist in the provided groundtruth.\n \"\"\"\n groundtruth_boxlists = [\n box_list_ops.to_absolute_coordinates(\n box_list.BoxList(boxes), true_image_shapes[i, 0],\n true_image_shapes[i, 1])\n for i, boxes in enumerate(\n self.groundtruth_lists(fields.BoxListFields.boxes))\n ]\n groundtruth_classes_with_background_list = [\n tf.to_float(\n tf.pad(one_hot_encoding, [[0, 0], [1, 0]], mode='CONSTANT'))\n for one_hot_encoding in self.groundtruth_lists(\n fields.BoxListFields.classes)]\n\n groundtruth_masks_list = self._groundtruth_lists.get(\n fields.BoxListFields.masks)\n if groundtruth_masks_list is not None:\n resized_masks_list = []\n for mask in groundtruth_masks_list:\n _, resized_mask, _ = self._image_resizer_fn(\n # Reuse the given `image_resizer_fn` to resize groundtruth masks.\n # `mask` tensor for an image is of the shape [num_masks,\n # image_height, image_width]. Below we create a dummy image of the\n # the shape [image_height, image_width, 1] to use with\n # `image_resizer_fn`.\n image=tf.zeros(tf.stack([tf.shape(mask)[1], tf.shape(mask)[2], 1])),\n masks=mask)\n resized_masks_list.append(resized_mask)\n\n groundtruth_masks_list = resized_masks_list\n if self.groundtruth_has_field(fields.BoxListFields.weights):\n groundtruth_weights_list = self.groundtruth_lists(\n fields.BoxListFields.weights)\n else:\n # Set weights for all batch elements equally to 1.0\n groundtruth_weights_list = []\n for groundtruth_classes in groundtruth_classes_with_background_list:\n num_gt = tf.shape(groundtruth_classes)[0]\n groundtruth_weights = tf.ones(num_gt)\n groundtruth_weights_list.append(groundtruth_weights)\n\n return (groundtruth_boxlists, groundtruth_classes_with_background_list,\n groundtruth_masks_list, groundtruth_weights_list)\n\n def _sample_box_classifier_minibatch_single_image(\n self, proposal_boxlist, num_valid_proposals, groundtruth_boxlist,\n groundtruth_classes_with_background, groundtruth_weights, debug=None):\n \"\"\"Samples a mini-batch of proposals to be sent to the box classifier.\n\n Helper function for self._postprocess_rpn.\n\n Args:\n proposal_boxlist: A BoxList containing K proposal boxes in absolute\n coordinates.\n num_valid_proposals: Number of valid proposals in the proposal boxlist.\n groundtruth_boxlist: A Boxlist containing N groundtruth object boxes in\n absolute coordinates.\n groundtruth_classes_with_background: A tensor with shape\n `[N, self.num_classes + 1]` representing groundtruth classes. The\n classes are assumed to be k-hot encoded, and include background as the\n zero-th class.\n groundtruth_weights: Weights attached to the groundtruth_boxes.\n debug: contains (optional) true_image_shape\n\n Returns:\n a BoxList contained sampled proposals.\n \"\"\"\n (cls_targets, cls_weights, _, _, _) = self._detector_target_assigner.assign(\n proposal_boxlist,\n groundtruth_boxlist,\n groundtruth_classes_with_background,\n unmatched_class_label=tf.constant(\n [1] + self._num_classes * [0], dtype=tf.float32),\n groundtruth_weights=groundtruth_weights)\n # Selects all boxes as candidates if none of them is selected according\n # to cls_weights. This could happen as boxes within certain IOU ranges\n # are ignored. If triggered, the selected boxes will still be ignored\n # during loss computation.\n positive_indicator = tf.greater(tf.argmax(cls_targets, axis=1), 0)\n # Debug target mapping\n #positive_indicator = tf.Print(positive_indicator, [positive_indicator, box_list_ops.to_normalized_coordinates(groundtruth_boxlist, debug[0], debug[1]).get()], summarize=999999)\n\n\n valid_indicator = tf.logical_and(\n tf.range(proposal_boxlist.num_boxes()) < num_valid_proposals,\n cls_weights > 0\n )\n sampled_indices = self._second_stage_sampler.subsample(\n valid_indicator,\n self._second_stage_batch_size,\n positive_indicator)\n return box_list_ops.boolean_mask(proposal_boxlist, sampled_indices)\n\n def _compute_second_stage_input_feature_maps(self, features_to_crop,\n proposal_boxes_normalized):\n \"\"\"Crops to a set of proposals from the feature map for a batch of images.\n\n Helper function for self._postprocess_rpn. This function calls\n `tf.image.crop_and_resize` to create the feature map to be passed to the\n second stage box classifier for each proposal.\n\n Args:\n features_to_crop: A float32 tensor with shape\n [batch_size, height, width, depth]\n proposal_boxes_normalized: A float32 tensor with shape [batch_size,\n num_proposals, box_code_size] containing proposal boxes in\n normalized coordinates.\n\n Returns:\n A float32 tensor with shape [K, new_height, new_width, depth].\n \"\"\"\n def get_box_inds(proposals):\n proposals_shape = proposals.get_shape().as_list()\n if any(dim is None for dim in proposals_shape):\n proposals_shape = tf.shape(proposals)\n ones_mat = tf.ones(proposals_shape[:2], dtype=tf.int32)\n multiplier = tf.expand_dims(\n tf.range(start=0, limit=proposals_shape[0]), 1)\n return tf.reshape(ones_mat * multiplier, [-1])\n\n if self._use_matmul_crop_and_resize:\n def _single_image_crop_and_resize(inputs):\n single_image_features_to_crop, proposal_boxes_normalized = inputs\n return ops.matmul_crop_and_resize(\n tf.expand_dims(single_image_features_to_crop, 0),\n proposal_boxes_normalized,\n [self._initial_crop_size, self._initial_crop_size])\n\n cropped_regions = self._flatten_first_two_dimensions(\n shape_utils.static_or_dynamic_map_fn(\n _single_image_crop_and_resize,\n elems=[features_to_crop, proposal_boxes_normalized],\n dtype=tf.float32,\n parallel_iterations=self._parallel_iterations))\n else:\n cropped_regions = tf.image.crop_and_resize(\n features_to_crop,\n self._flatten_first_two_dimensions(proposal_boxes_normalized),\n get_box_inds(proposal_boxes_normalized),\n (self._initial_crop_size, self._initial_crop_size))\n return slim.max_pool2d(\n cropped_regions,\n [self._maxpool_kernel_size, self._maxpool_kernel_size], # Michele: Being specific to text, we want to preserve width more than height\n stride=[self._maxpool_stride, 1])\n\n def _postprocess_box_classifier(self,\n refined_box_encodings,\n class_predictions_with_background,\n proposal_boxes,\n num_proposals,\n image_shapes,\n mask_predictions=None):\n \"\"\"Converts predictions from the second stage box classifier to detections.\n\n Args:\n refined_box_encodings: a 3-D float tensor with shape\n [total_num_padded_proposals, num_classes, self._box_coder.code_size]\n representing predicted (final) refined box encodings. If using a shared\n box across classes the shape will instead be\n [total_num_padded_proposals, 1, 4]\n class_predictions_with_background: a 3-D tensor float with shape\n [total_num_padded_proposals, num_classes + 1] containing class\n predictions (logits) for each of the proposals. Note that this tensor\n *includes* background class predictions (at class index 0).\n proposal_boxes: a 3-D float tensor with shape\n [batch_size, self.max_num_proposals, 4] representing decoded proposal\n bounding boxes in absolute coordinates.\n num_proposals: a 1-D int32 tensor of shape [batch] representing the number\n of proposals predicted for each image in the batch.\n image_shapes: a 2-D int32 tensor containing shapes of input image in the\n batch.\n mask_predictions: (optional) a 4-D float tensor with shape\n [total_num_padded_proposals, num_classes, mask_height, mask_width]\n containing instance mask prediction logits.\n\n Returns:\n A dictionary containing:\n `detection_boxes`: [batch, max_detection, 4]\n `detection_scores`: [batch, max_detections]\n `detection_classes`: [batch, max_detections]\n `num_detections`: [batch]\n `detection_masks`:\n (optional) [batch, max_detections, mask_height, mask_width]. Note\n that a pixel-wise sigmoid score converter is applied to the detection\n masks.\n \"\"\"\n refined_box_encodings_batch = tf.reshape(\n refined_box_encodings,\n [-1,\n self.max_num_proposals,\n refined_box_encodings.shape[1],\n self._box_coder.code_size])\n class_predictions_with_background_batch = tf.reshape(\n class_predictions_with_background,\n [-1, self.max_num_proposals, self.num_classes + 1]\n )\n refined_decoded_boxes_batch = self._batch_decode_boxes(\n refined_box_encodings_batch, proposal_boxes)\n class_predictions_with_background_batch = (\n self._second_stage_score_conversion_fn(\n class_predictions_with_background_batch))\n class_predictions_batch = tf.reshape(\n tf.slice(class_predictions_with_background_batch,\n [0, 0, 1], [-1, -1, -1]),\n [-1, self.max_num_proposals, self.num_classes])\n clip_window = self._compute_clip_window(image_shapes)\n mask_predictions_batch = None\n if mask_predictions is not None:\n mask_height = mask_predictions.shape[2].value\n mask_width = mask_predictions.shape[3].value\n mask_predictions = tf.sigmoid(mask_predictions)\n mask_predictions_batch = tf.reshape(\n mask_predictions, [-1, self.max_num_proposals,\n self.num_classes, mask_height, mask_width])\n (nmsed_boxes, nmsed_scores, nmsed_classes, nmsed_masks, _,\n num_detections) = self._second_stage_nms_fn(\n refined_decoded_boxes_batch,\n class_predictions_batch,\n clip_window=clip_window,\n change_coordinate_frame=True,\n num_valid_boxes=num_proposals,\n masks=mask_predictions_batch)\n detections = {\n fields.DetectionResultFields.detection_boxes: nmsed_boxes,\n fields.DetectionResultFields.detection_scores: nmsed_scores,\n fields.DetectionResultFields.detection_classes: nmsed_classes,\n fields.DetectionResultFields.num_detections: tf.to_float(num_detections)\n }\n if nmsed_masks is not None:\n detections[fields.DetectionResultFields.detection_masks] = nmsed_masks\n return detections\n\n def _batch_decode_boxes(self, box_encodings, anchor_boxes):\n \"\"\"Decodes box encodings with respect to the anchor boxes.\n\n Args:\n box_encodings: a 4-D tensor with shape\n [batch_size, num_anchors, num_classes, self._box_coder.code_size]\n representing box encodings.\n anchor_boxes: [batch_size, num_anchors, self._box_coder.code_size]\n representing decoded bounding boxes. If using a shared box across\n classes the shape will instead be\n [total_num_proposals, 1, self._box_coder.code_size].\n\n Returns:\n decoded_boxes: a\n [batch_size, num_anchors, num_classes, self._box_coder.code_size]\n float tensor representing bounding box predictions (for each image in\n batch, proposal and class). If using a shared box across classes the\n shape will instead be\n [batch_size, num_anchors, 1, self._box_coder.code_size].\n \"\"\"\n combined_shape = shape_utils.combined_static_and_dynamic_shape(\n box_encodings)\n num_classes = combined_shape[2]\n tiled_anchor_boxes = tf.tile(\n tf.expand_dims(anchor_boxes, 2), [1, 1, num_classes, 1])\n tiled_anchors_boxlist = box_list.BoxList(\n tf.reshape(tiled_anchor_boxes, [-1, 4]))\n decoded_boxes = self._box_coder.decode(\n tf.reshape(box_encodings, [-1, self._box_coder.code_size]),\n tiled_anchors_boxlist)\n return tf.reshape(decoded_boxes.get(),\n tf.stack([combined_shape[0], combined_shape[1],\n num_classes, 4]))\n\n '''def loss(self, prediction_dict, true_image_shapes, scope=None):\n \"\"\"Compute scalar loss tensors given prediction tensors.\n\n If number_of_stages=1, only RPN related losses are computed (i.e.,\n `rpn_localization_loss` and `rpn_objectness_loss`). Otherwise all\n losses are computed.\n\n Args:\n prediction_dict: a dictionary holding prediction tensors (see the\n documentation for the predict method. If number_of_stages=1, we\n expect prediction_dict to contain `rpn_box_encodings`,\n `rpn_objectness_predictions_with_background`, `rpn_features_to_crop`,\n `image_shape`, and `anchors` fields. Otherwise we expect\n prediction_dict to additionally contain `refined_box_encodings`,\n `class_predictions_with_background`, `num_proposals`, and\n `proposal_boxes` fields.\n true_image_shapes: int32 tensor of shape [batch, 3] where each row is\n of the form [height, width, channels] indicating the shapes\n of true images in the resized images, as resized images can be padded\n with zeros.\n scope: Optional scope name.\n\n Returns:\n a dictionary mapping loss keys (`first_stage_localization_loss`,\n `first_stage_objectness_loss`, 'second_stage_localization_loss',\n 'second_stage_classification_loss') to scalar tensors representing\n corresponding loss values.\n \"\"\"\n with tf.name_scope(scope, 'Loss', prediction_dict.values()):\n (groundtruth_boxlists, groundtruth_classes_with_background_list,\n groundtruth_masks_list, groundtruth_weights_list\n ) = self._format_groundtruth_data(true_image_shapes)\n loss_dict = self._loss_rpn(\n prediction_dict['rpn_box_encodings'],\n prediction_dict['rpn_objectness_predictions_with_background'],\n prediction_dict['anchors'], groundtruth_boxlists,\n groundtruth_classes_with_background_list, groundtruth_weights_list)\n if self._number_of_stages > 1:\n loss_dict.update(\n self._loss_box_classifier(\n prediction_dict['refined_box_encodings'],\n prediction_dict['class_predictions_with_background'],\n prediction_dict['proposal_boxes'],\n prediction_dict['num_proposals'],\n groundtruth_boxlists,\n groundtruth_classes_with_background_list,\n groundtruth_weights_list,\n prediction_dict['image_shape'],\n prediction_dict.get('mask_predictions'),\n groundtruth_masks_list,\n ))\n return loss_dict'''\n\n def loss(self, prediction_dict, true_image_shapes, scope=None):\n \"\"\"Compute scalar loss tensors given prediction tensors.\n\n If number_of_stages=1, only RPN related losses are computed (i.e.,\n `rpn_localization_loss` and `rpn_objectness_loss`). Otherwise all\n losses are computed.\n\n Args:\n prediction_dict: a dictionary holding prediction tensors (see the\n documentation for the predict method. If number_of_stages=1, we\n expect prediction_dict to contain `rpn_box_encodings`,\n `rpn_objectness_predictions_with_background`, `rpn_features_to_crop`,\n `image_shape`, and `anchors` fields. Otherwise we expect\n prediction_dict to additionally contain `refined_box_encodings`,\n `class_predictions_with_background`, `num_proposals`, and\n `proposal_boxes` fields.\n true_image_shapes: int32 tensor of shape [batch, 3] where each row is\n of the form [height, width, channels] indicating the shapes\n of true images in the resized images, as resized images can be padded\n with zeros.\n scope: Optional scope name.\n\n Returns:\n a dictionary mapping loss keys (`first_stage_localization_loss`,\n `first_stage_objectness_loss`, 'second_stage_localization_loss',\n 'second_stage_classification_loss') to scalar tensors representing\n corresponding loss values.\n \"\"\"\n with tf.name_scope(scope, 'Loss', prediction_dict.values()):\n (groundtruth_boxlists, groundtruth_classes_with_background_list,\n groundtruth_masks_list, groundtruth_weights_list\n ) = self._format_groundtruth_data(true_image_shapes)\n '''loss_dict = self._loss_rpn(\n prediction_dict['rpn_box_encodings'],\n prediction_dict['rpn_objectness_predictions_with_background'],\n prediction_dict['anchors'], groundtruth_boxlists,\n groundtruth_classes_with_background_list, groundtruth_weights_list)'''\n #if self._number_of_stages > 1:\n # loss_dict.update(\n loss_dict = self._loss_box_classifier(\n prediction_dict['refined_box_encodings'],\n prediction_dict['class_predictions_with_background'],\n prediction_dict['proposal_boxes'],\n prediction_dict['num_proposals'],\n groundtruth_boxlists,\n groundtruth_classes_with_background_list,\n groundtruth_weights_list,\n prediction_dict['image_shape'],\n prediction_dict.get('mask_predictions'),\n groundtruth_masks_list,\n )#)\n return loss_dict\n\n def _loss_rpn(self, rpn_box_encodings,\n rpn_objectness_predictions_with_background, anchors,\n groundtruth_boxlists, groundtruth_classes_with_background_list,\n groundtruth_weights_list):\n \"\"\"Computes scalar RPN loss tensors.\n\n Uses self._proposal_target_assigner to obtain regression and classification\n targets for the first stage RPN, samples a \"minibatch\" of anchors to\n participate in the loss computation, and returns the RPN losses.\n\n Args:\n rpn_box_encodings: A 4-D float tensor of shape\n [batch_size, num_anchors, self._box_coder.code_size] containing\n predicted proposal box encodings.\n rpn_objectness_predictions_with_background: A 2-D float tensor of shape\n [batch_size, num_anchors, 2] containing objectness predictions\n (logits) for each of the anchors with 0 corresponding to background\n and 1 corresponding to object.\n anchors: A 2-D tensor of shape [num_anchors, 4] representing anchors\n for the first stage RPN. Note that `num_anchors` can differ depending\n on whether the model is created in training or inference mode.\n groundtruth_boxlists: A list of BoxLists containing coordinates of the\n groundtruth boxes.\n groundtruth_classes_with_background_list: A list of 2-D one-hot\n (or k-hot) tensors of shape [num_boxes, num_classes+1] containing the\n class targets with the 0th index assumed to map to the background class.\n groundtruth_weights_list: A list of 1-D tf.float32 tensors of shape\n [num_boxes] containing weights for groundtruth boxes.\n\n Returns:\n a dictionary mapping loss keys (`first_stage_localization_loss`,\n `first_stage_objectness_loss`) to scalar tensors representing\n corresponding loss values.\n \"\"\"\n with tf.name_scope('RPNLoss'):\n (batch_cls_targets, batch_cls_weights, batch_reg_targets,\n batch_reg_weights, _) = target_assigner.batch_assign_targets(\n target_assigner=self._proposal_target_assigner,\n anchors_batch=box_list.BoxList(anchors),\n gt_box_batch=groundtruth_boxlists,\n gt_class_targets_batch=(len(groundtruth_boxlists) * [None]),\n gt_weights_batch=groundtruth_weights_list)\n batch_cls_targets = tf.squeeze(batch_cls_targets, axis=2)\n\n def _minibatch_subsample_fn(inputs):\n cls_targets, cls_weights = inputs\n return self._first_stage_sampler.subsample(\n tf.cast(cls_weights, tf.bool),\n self._first_stage_minibatch_size, tf.cast(cls_targets, tf.bool))\n batch_sampled_indices = tf.to_float(shape_utils.static_or_dynamic_map_fn(\n _minibatch_subsample_fn,\n [batch_cls_targets, batch_cls_weights],\n dtype=tf.bool,\n parallel_iterations=self._parallel_iterations,\n back_prop=True))\n\n # Normalize by number of examples in sampled minibatch\n normalizer = tf.reduce_sum(batch_sampled_indices, axis=1)\n batch_one_hot_targets = tf.one_hot(\n tf.to_int32(batch_cls_targets), depth=2)\n sampled_reg_indices = tf.multiply(batch_sampled_indices,\n batch_reg_weights)\n\n localization_losses = self._first_stage_localization_loss(\n rpn_box_encodings, batch_reg_targets, weights=sampled_reg_indices)\n objectness_losses = self._first_stage_objectness_loss(\n rpn_objectness_predictions_with_background,\n batch_one_hot_targets, weights=batch_sampled_indices)\n localization_loss = tf.reduce_mean(\n tf.reduce_sum(localization_losses, axis=1) / normalizer)\n objectness_loss = tf.reduce_mean(\n tf.reduce_sum(objectness_losses, axis=1) / normalizer)\n\n localization_loss = tf.multiply(self._first_stage_loc_loss_weight,\n localization_loss,\n name='localization_loss')\n objectness_loss = tf.multiply(self._first_stage_obj_loss_weight,\n objectness_loss, name='objectness_loss')\n loss_dict = {localization_loss.op.name: localization_loss,\n objectness_loss.op.name: objectness_loss}\n return loss_dict\n\n def _loss_box_classifier(self,\n refined_box_encodings,\n class_predictions_with_background,\n proposal_boxes,\n num_proposals,\n groundtruth_boxlists,\n groundtruth_classes_with_background_list,\n groundtruth_weights_list,\n image_shape,\n prediction_masks=None,\n groundtruth_masks_list=None):\n \"\"\"Computes scalar box classifier loss tensors.\n\n Uses self._detector_target_assigner to obtain regression and classification\n targets for the second stage box classifier, optionally performs\n hard mining, and returns losses. All losses are computed independently\n for each image and then averaged across the batch.\n Please note that for boxes and masks with multiple labels, the box\n regression and mask prediction losses are only computed for one label.\n\n This function assumes that the proposal boxes in the \"padded\" regions are\n actually zero (and thus should not be matched to).\n\n\n Args:\n refined_box_encodings: a 3-D tensor with shape\n [total_num_proposals, num_classes, box_coder.code_size] representing\n predicted (final) refined box encodings. If using a shared box across\n classes this will instead have shape\n [total_num_proposals, 1, box_coder.code_size].\n class_predictions_with_background: a 2-D tensor with shape\n [total_num_proposals, num_classes + 1] containing class\n predictions (logits) for each of the anchors. Note that this tensor\n *includes* background class predictions (at class index 0).\n proposal_boxes: [batch_size, self.max_num_proposals, 4] representing\n decoded proposal bounding boxes.\n num_proposals: A Tensor of type `int32`. A 1-D tensor of shape [batch]\n representing the number of proposals predicted for each image in\n the batch.\n groundtruth_boxlists: a list of BoxLists containing coordinates of the\n groundtruth boxes.\n groundtruth_classes_with_background_list: a list of 2-D one-hot\n (or k-hot) tensors of shape [num_boxes, num_classes + 1] containing the\n class targets with the 0th index assumed to map to the background class.\n groundtruth_weights_list: A list of 1-D tf.float32 tensors of shape\n [num_boxes] containing weights for groundtruth boxes.\n image_shape: a 1-D tensor of shape [4] representing the image shape.\n prediction_masks: an optional 4-D tensor with shape [total_num_proposals,\n num_classes, mask_height, mask_width] containing the instance masks for\n each box.\n groundtruth_masks_list: an optional list of 3-D tensors of shape\n [num_boxes, image_height, image_width] containing the instance masks for\n each of the boxes.\n\n Returns:\n a dictionary mapping loss keys ('second_stage_localization_loss',\n 'second_stage_classification_loss') to scalar tensors representing\n corresponding loss values.\n\n Raises:\n ValueError: if `predict_instance_masks` in\n second_stage_mask_rcnn_box_predictor is True and\n `groundtruth_masks_list` is not provided.\n \"\"\"\n with tf.name_scope('BoxClassifierLoss'):\n paddings_indicator = self._padded_batched_proposals_indicator(\n num_proposals, self.max_num_proposals)\n proposal_boxlists = [\n box_list.BoxList(proposal_boxes_single_image)\n for proposal_boxes_single_image in tf.unstack(proposal_boxes)]\n batch_size = len(proposal_boxlists)\n\n num_proposals_or_one = tf.to_float(tf.expand_dims(\n tf.maximum(num_proposals, tf.ones_like(num_proposals)), 1))\n normalizer = tf.tile(num_proposals_or_one,\n [1, self.max_num_proposals]) * batch_size\n\n (batch_cls_targets_with_background, batch_cls_weights, batch_reg_targets,\n batch_reg_weights, _) = target_assigner.batch_assign_targets(\n target_assigner=self._detector_target_assigner,\n anchors_batch=proposal_boxlists,\n gt_box_batch=groundtruth_boxlists,\n gt_class_targets_batch=groundtruth_classes_with_background_list,\n unmatched_class_label=tf.constant(\n [1] + self._num_classes * [0], dtype=tf.float32),\n gt_weights_batch=groundtruth_weights_list)\n\n class_predictions_with_background = tf.reshape(\n class_predictions_with_background,\n [batch_size, self.max_num_proposals, -1])\n\n flat_cls_targets_with_background = tf.reshape(\n batch_cls_targets_with_background,\n [batch_size * self.max_num_proposals, -1])\n one_hot_flat_cls_targets_with_background = tf.argmax(\n flat_cls_targets_with_background, axis=1)\n one_hot_flat_cls_targets_with_background = tf.one_hot(\n one_hot_flat_cls_targets_with_background,\n flat_cls_targets_with_background.get_shape()[1])\n\n # If using a shared box across classes use directly\n if refined_box_encodings.shape[1] == 1:\n reshaped_refined_box_encodings = tf.reshape(\n refined_box_encodings,\n [batch_size, self.max_num_proposals, self._box_coder.code_size])\n # For anchors with multiple labels, picks refined_location_encodings\n # for just one class to avoid over-counting for regression loss and\n # (optionally) mask loss.\n else:\n # We only predict refined location encodings for the non background\n # classes, but we now pad it to make it compatible with the class\n # predictions\n refined_box_encodings_with_background = tf.pad(\n refined_box_encodings, [[0, 0], [1, 0], [0, 0]])\n refined_box_encodings_masked_by_class_targets = tf.boolean_mask(\n refined_box_encodings_with_background,\n tf.greater(one_hot_flat_cls_targets_with_background, 0))\n reshaped_refined_box_encodings = tf.reshape(\n refined_box_encodings_masked_by_class_targets,\n [batch_size, self.max_num_proposals, self._box_coder.code_size])\n\n second_stage_loc_losses = self._second_stage_localization_loss(\n reshaped_refined_box_encodings,\n batch_reg_targets, weights=batch_reg_weights) / normalizer\n second_stage_cls_losses = ops.reduce_sum_trailing_dimensions(\n self._second_stage_classification_loss(\n class_predictions_with_background,\n batch_cls_targets_with_background,\n weights=batch_cls_weights),\n ndims=2) / normalizer\n\n second_stage_loc_loss = tf.reduce_sum(\n tf.boolean_mask(second_stage_loc_losses, paddings_indicator))\n second_stage_cls_loss = tf.reduce_sum(\n tf.boolean_mask(second_stage_cls_losses, paddings_indicator))\n\n if self._hard_example_miner:\n (second_stage_loc_loss, second_stage_cls_loss\n ) = self._unpad_proposals_and_apply_hard_mining(\n proposal_boxlists, second_stage_loc_losses,\n second_stage_cls_losses, num_proposals)\n localization_loss = tf.multiply(self._second_stage_loc_loss_weight,\n second_stage_loc_loss,\n name='localization_loss')\n\n classification_loss = tf.multiply(self._second_stage_cls_loss_weight,\n second_stage_cls_loss,\n name='classification_loss')\n\n loss_dict = {localization_loss.op.name: localization_loss,\n classification_loss.op.name: classification_loss}\n second_stage_mask_loss = None\n if prediction_masks is not None:\n if groundtruth_masks_list is None:\n raise ValueError('Groundtruth instance masks not provided. '\n 'Please configure input reader.')\n\n unmatched_mask_label = tf.zeros(image_shape[1:3], dtype=tf.float32)\n (batch_mask_targets, _, _, batch_mask_target_weights,\n _) = target_assigner.batch_assign_targets(\n target_assigner=self._detector_target_assigner,\n anchors_batch=proposal_boxlists,\n gt_box_batch=groundtruth_boxlists,\n gt_class_targets_batch=groundtruth_masks_list,\n unmatched_class_label=unmatched_mask_label,\n gt_weights_batch=groundtruth_weights_list)\n\n # Pad the prediction_masks with to add zeros for background class to be\n # consistent with class predictions.\n if prediction_masks.get_shape().as_list()[1] == 1:\n # Class agnostic masks or masks for one-class prediction. Logic for\n # both cases is the same since background predictions are ignored\n # through the batch_mask_target_weights.\n prediction_masks_masked_by_class_targets = prediction_masks\n else:\n prediction_masks_with_background = tf.pad(\n prediction_masks, [[0, 0], [1, 0], [0, 0], [0, 0]])\n prediction_masks_masked_by_class_targets = tf.boolean_mask(\n prediction_masks_with_background,\n tf.greater(one_hot_flat_cls_targets_with_background, 0))\n\n mask_height = prediction_masks.shape[2].value\n mask_width = prediction_masks.shape[3].value\n reshaped_prediction_masks = tf.reshape(\n prediction_masks_masked_by_class_targets,\n [batch_size, -1, mask_height * mask_width])\n\n batch_mask_targets_shape = tf.shape(batch_mask_targets)\n flat_gt_masks = tf.reshape(batch_mask_targets,\n [-1, batch_mask_targets_shape[2],\n batch_mask_targets_shape[3]])\n\n # Use normalized proposals to crop mask targets from image masks.\n flat_normalized_proposals = box_list_ops.to_normalized_coordinates(\n box_list.BoxList(tf.reshape(proposal_boxes, [-1, 4])),\n image_shape[1], image_shape[2]).get()\n\n flat_cropped_gt_mask = tf.image.crop_and_resize(\n tf.expand_dims(flat_gt_masks, -1),\n flat_normalized_proposals,\n tf.range(flat_normalized_proposals.shape[0].value),\n [mask_height, mask_width])\n\n batch_cropped_gt_mask = tf.reshape(\n flat_cropped_gt_mask,\n [batch_size, -1, mask_height * mask_width])\n\n second_stage_mask_losses = ops.reduce_sum_trailing_dimensions(\n self._second_stage_mask_loss(\n reshaped_prediction_masks,\n batch_cropped_gt_mask,\n weights=batch_mask_target_weights),\n ndims=2) / (\n mask_height * mask_width * tf.maximum(\n tf.reduce_sum(\n batch_mask_target_weights, axis=1, keep_dims=True\n ), tf.ones((batch_size, 1))))\n second_stage_mask_loss = tf.reduce_sum(\n tf.boolean_mask(second_stage_mask_losses, paddings_indicator))\n\n if second_stage_mask_loss is not None:\n mask_loss = tf.multiply(self._second_stage_mask_loss_weight,\n second_stage_mask_loss, name='mask_loss')\n loss_dict[mask_loss.op.name] = mask_loss\n return loss_dict\n\n def _padded_batched_proposals_indicator(self,\n num_proposals,\n max_num_proposals):\n \"\"\"Creates indicator matrix of non-pad elements of padded batch proposals.\n\n Args:\n num_proposals: Tensor of type tf.int32 with shape [batch_size].\n max_num_proposals: Maximum number of proposals per image (integer).\n\n Returns:\n A Tensor of type tf.bool with shape [batch_size, max_num_proposals].\n \"\"\"\n batch_size = tf.size(num_proposals)\n tiled_num_proposals = tf.tile(\n tf.expand_dims(num_proposals, 1), [1, max_num_proposals])\n tiled_proposal_index = tf.tile(\n tf.expand_dims(tf.range(max_num_proposals), 0), [batch_size, 1])\n return tf.greater(tiled_num_proposals, tiled_proposal_index)\n\n def _unpad_proposals_and_apply_hard_mining(self,\n proposal_boxlists,\n second_stage_loc_losses,\n second_stage_cls_losses,\n num_proposals):\n \"\"\"Unpads proposals and applies hard mining.\n\n Args:\n proposal_boxlists: A list of `batch_size` BoxLists each representing\n `self.max_num_proposals` representing decoded proposal bounding boxes\n for each image.\n second_stage_loc_losses: A Tensor of type `float32`. A tensor of shape\n `[batch_size, self.max_num_proposals]` representing per-anchor\n second stage localization loss values.\n second_stage_cls_losses: A Tensor of type `float32`. A tensor of shape\n `[batch_size, self.max_num_proposals]` representing per-anchor\n second stage classification loss values.\n num_proposals: A Tensor of type `int32`. A 1-D tensor of shape [batch]\n representing the number of proposals predicted for each image in\n the batch.\n\n Returns:\n second_stage_loc_loss: A scalar float32 tensor representing the second\n stage localization loss.\n second_stage_cls_loss: A scalar float32 tensor representing the second\n stage classification loss.\n \"\"\"\n for (proposal_boxlist, single_image_loc_loss, single_image_cls_loss,\n single_image_num_proposals) in zip(\n proposal_boxlists,\n tf.unstack(second_stage_loc_losses),\n tf.unstack(second_stage_cls_losses),\n tf.unstack(num_proposals)):\n proposal_boxlist = box_list.BoxList(\n tf.slice(proposal_boxlist.get(),\n [0, 0], [single_image_num_proposals, -1]))\n single_image_loc_loss = tf.slice(single_image_loc_loss,\n [0], [single_image_num_proposals])\n single_image_cls_loss = tf.slice(single_image_cls_loss,\n [0], [single_image_num_proposals])\n return self._hard_example_miner(\n location_losses=tf.expand_dims(single_image_loc_loss, 0),\n cls_losses=tf.expand_dims(single_image_cls_loss, 0),\n decoded_boxlist_list=[proposal_boxlist])\n\n def restore_map(self,\n fine_tune_checkpoint_type='detection',\n load_all_detection_checkpoint_vars=False):\n \"\"\"Returns a map of variables to load from a foreign checkpoint.\n\n See parent class for details.\n\n Args:\n fine_tune_checkpoint_type: whether to restore from a full detection\n checkpoint (with compatible variable names) or to restore from a\n classification checkpoint for initialization prior to training.\n Valid values: `detection`, `classification`. Default 'detection'.\n load_all_detection_checkpoint_vars: whether to load all variables (when\n `fine_tune_checkpoint_type` is `detection`). If False, only variables\n within the feature extractor scopes are included. Default False.\n\n Returns:\n A dict mapping variable names (to load from a checkpoint) to variables in\n the model graph.\n Raises:\n ValueError: if fine_tune_checkpoint_type is neither `classification`\n nor `detection`.\n \"\"\"\n if fine_tune_checkpoint_type not in ['detection', 'classification']:\n raise ValueError('Not supported fine_tune_checkpoint_type: {}'.format(\n fine_tune_checkpoint_type))\n if fine_tune_checkpoint_type == 'classification':\n return self._feature_extractor.restore_from_classification_checkpoint_fn(\n self.first_stage_feature_extractor_scope,\n self.second_stage_feature_extractor_scope)\n\n variables_to_restore = tf.global_variables()\n variables_to_restore.append(slim.get_or_create_global_step())\n # Only load feature extractor variables to be consistent with loading from\n # a classification checkpoint.\n include_patterns = None\n if not load_all_detection_checkpoint_vars:\n include_patterns = [\n self.first_stage_feature_extractor_scope,\n self.second_stage_feature_extractor_scope\n ]\n feature_extractor_variables = tf.contrib.framework.filter_variables(\n variables_to_restore, include_patterns=include_patterns)\n return {var.op.name: var for var in feature_extractor_variables}\n"
] | [
[
"tensorflow.reshape",
"tensorflow.unstack",
"tensorflow.sigmoid",
"tensorflow.ones",
"tensorflow.variable_scope",
"tensorflow.squeeze",
"tensorflow.name_scope",
"tensorflow.concat",
"tensorflow.slice",
"tensorflow.nn.softmax",
"tensorflow.reduce_sum",
"tensorflow.greater",
"tensorflow.multiply",
"tensorflow.contrib.framework.filter_variables",
"tensorflow.constant",
"tensorflow.stack",
"tensorflow.shape",
"tensorflow.ones_like",
"tensorflow.to_float",
"tensorflow.expand_dims",
"tensorflow.zeros_like",
"tensorflow.global_variables",
"tensorflow.cast",
"tensorflow.boolean_mask",
"tensorflow.tile",
"tensorflow.size",
"tensorflow.pad",
"tensorflow.zeros",
"tensorflow.range",
"tensorflow.stop_gradient",
"tensorflow.to_int32",
"tensorflow.argmax",
"tensorflow.gather"
]
] |
Sakura176/PointRCNN | [
"a7fbb25e931609a39c32cb821a7c98a326e8b0c0"
] | [
"tools/train_eval.py"
] | [
"import os\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\nfrom lib.utils.bbox_transform import decode_bbox_target\nfrom tools.kitti_object_eval_python.evaluate import evaluate as kitti_evaluate\n\nfrom lib.config import cfg\nimport lib.utils.kitti_utils as kitti_utils\nimport lib.utils.iou3d.iou3d_utils as iou3d_utils\nfrom datetime import datetime\nfrom tensorboardX import SummaryWriter\nimport tqdm\n\nnp.random.seed(1024) # set the same seed\n\n\ndef save_kitti_format(sample_id, calib, bbox3d, kitti_output_dir, scores, img_shape):\n corners3d = kitti_utils.boxes3d_to_corners3d(bbox3d)\n img_boxes, _ = calib.corners3d_to_img_boxes(corners3d)\n\n img_boxes[:, 0] = np.clip(img_boxes[:, 0], 0, img_shape[1] - 1)\n img_boxes[:, 1] = np.clip(img_boxes[:, 1], 0, img_shape[0] - 1)\n img_boxes[:, 2] = np.clip(img_boxes[:, 2], 0, img_shape[1] - 1)\n img_boxes[:, 3] = np.clip(img_boxes[:, 3], 0, img_shape[0] - 1)\n\n img_boxes_w = img_boxes[:, 2] - img_boxes[:, 0]\n img_boxes_h = img_boxes[:, 3] - img_boxes[:, 1]\n box_valid_mask = np.logical_and(\n img_boxes_w < img_shape[1] * 0.8, img_boxes_h < img_shape[0] * 0.8)\n\n kitti_output_file = os.path.join(kitti_output_dir, '%06d.txt' % sample_id)\n with open(kitti_output_file, 'w') as f:\n for k in range(bbox3d.shape[0]):\n if box_valid_mask[k] == 0:\n continue\n x, z, ry = bbox3d[k, 0], bbox3d[k, 2], bbox3d[k, 6]\n beta = np.arctan2(z, x)\n alpha = -np.sign(beta) * np.pi / 2 + beta + ry\n\n print('%s -1 -1 %.4f %.4f %.4f %.4f %.4f %.4f %.4f %.4f %.4f %.4f %.4f %.4f %.4f' %\n (cfg.CLASSES, alpha, img_boxes[k, 0], img_boxes[k, 1], img_boxes[k, 2], img_boxes[k, 3],\n bbox3d[k, 3], bbox3d[k, 4], bbox3d[k,\n 5], bbox3d[k, 0], bbox3d[k, 1], bbox3d[k, 2],\n bbox3d[k, 6], scores[k]), file=f)\n\n\ndef eval_one_epoch_joint(model, dataloader, epoch_id, result_dir):\n # print(\"-----------------joint____________________________*******\")\n np.random.seed(666)\n MEAN_SIZE = torch.from_numpy(cfg.CLS_MEAN_SIZE[0]).cuda()\n mode = 'EVAL'\n\n final_output_dir = os.path.join(result_dir, 'final_result', 'data')\n os.makedirs(final_output_dir, exist_ok=True)\n\n if True:\n # print(\"------------save_result__________________*******\")\n roi_output_dir = os.path.join(result_dir, 'roi_result', 'data')\n refine_output_dir = os.path.join(result_dir, 'refine_result', 'data')\n rpn_output_dir = os.path.join(result_dir, 'rpn_result', 'data')\n os.makedirs(rpn_output_dir, exist_ok=True)\n os.makedirs(roi_output_dir, exist_ok=True)\n os.makedirs(refine_output_dir, exist_ok=True)\n\n model.eval()\n\n thresh_list = [0.1, 0.3, 0.5, 0.7, 0.9]\n total_recalled_bbox_list, total_gt_bbox = [0] * 5, 0\n total_roi_recalled_bbox_list = [0] * 5\n dataset = dataloader.dataset\n cnt = final_total = total_cls_acc = total_cls_acc_refined = total_rpn_iou = 0\n\n progress_bar = tqdm.tqdm(total=len(dataloader), leave=True, desc='eval')\n for data in dataloader:\n cnt += 1\n calib = data['calib']\n sample_id, pts_rect, pts_features, pts_input = \\\n data['sample_id'], data['pts_rect'], data['pts_features'], data['pts_input']\n batch_size = len(sample_id)\n inputs = torch.from_numpy(pts_input).cuda(non_blocking=True).float()\n input_data = {'pts_input': inputs, 'calib': calib}\n\n # model inference\n ret_dict = model(input_data)\n print(ret_dict.key())\n\n roi_scores_raw = ret_dict['roi_scores_raw'] # (B, M)\n roi_boxes3d = ret_dict['rois'] # (B, M, 7)\n seg_result = ret_dict['seg_result'].long() # (B, N)\n\n rcnn_cls = ret_dict['rcnn_cls'].view(\n batch_size, -1, ret_dict['rcnn_cls'].shape[1])\n rcnn_reg = ret_dict['rcnn_reg'].view(\n batch_size, -1, ret_dict['rcnn_reg'].shape[1]) # (B, M, C)\n\n # bounding box regression\n anchor_size = MEAN_SIZE\n if cfg.RCNN.SIZE_RES_ON_ROI:\n assert False\n\n pred_boxes3d = decode_bbox_target(roi_boxes3d.view(-1, 7), rcnn_reg.view(-1, rcnn_reg.shape[-1]),\n anchor_size=anchor_size,\n loc_scope=cfg.RCNN.LOC_SCOPE,\n loc_bin_size=cfg.RCNN.LOC_BIN_SIZE,\n num_head_bin=cfg.RCNN.NUM_HEAD_BIN,\n get_xz_fine=True, get_y_by_bin=cfg.RCNN.LOC_Y_BY_BIN,\n loc_y_scope=cfg.RCNN.LOC_Y_SCOPE, loc_y_bin_size=cfg.RCNN.LOC_Y_BIN_SIZE,\n get_ry_fine=True).view(batch_size, -1, 7)\n\n # scoring\n if rcnn_cls.shape[2] == 1:\n raw_scores = rcnn_cls # (B, M, 1)\n\n norm_scores = torch.sigmoid(raw_scores)\n pred_classes = (norm_scores > cfg.RCNN.SCORE_THRESH).long()\n else:\n pred_classes = torch.argmax(rcnn_cls, dim=1).view(-1)\n cls_norm_scores = F.softmax(rcnn_cls, dim=1)\n raw_scores = rcnn_cls[:, pred_classes]\n norm_scores = cls_norm_scores[:, pred_classes]\n\n # evaluation\n recalled_num = gt_num = rpn_iou = 0\n if not False:\n if not cfg.RPN.FIXED:\n rpn_cls_label, rpn_reg_label = data['rpn_cls_label'], data['rpn_reg_label']\n rpn_cls_label = torch.from_numpy(\n rpn_cls_label).cuda(non_blocking=True).long()\n\n gt_boxes3d = data['gt_boxes3d']\n\n for k in range(batch_size):\n # calculate recall\n cur_gt_boxes3d = gt_boxes3d[k]\n tmp_idx = cur_gt_boxes3d.__len__() - 1\n\n while tmp_idx >= 0 and cur_gt_boxes3d[tmp_idx].sum() == 0:\n tmp_idx -= 1\n\n if tmp_idx >= 0:\n cur_gt_boxes3d = cur_gt_boxes3d[:tmp_idx + 1]\n\n cur_gt_boxes3d = torch.from_numpy(\n cur_gt_boxes3d).cuda(non_blocking=True).float()\n iou3d = iou3d_utils.boxes_iou3d_gpu(\n pred_boxes3d[k], cur_gt_boxes3d)\n gt_max_iou, _ = iou3d.max(dim=0)\n refined_iou, _ = iou3d.max(dim=1)\n\n for idx, thresh in enumerate(thresh_list):\n total_recalled_bbox_list[idx] += (\n gt_max_iou > thresh).sum().item()\n recalled_num += (gt_max_iou > 0.7).sum().item()\n gt_num += cur_gt_boxes3d.shape[0]\n total_gt_bbox += cur_gt_boxes3d.shape[0]\n\n # original recall\n iou3d_in = iou3d_utils.boxes_iou3d_gpu(\n roi_boxes3d[k], cur_gt_boxes3d)\n gt_max_iou_in, _ = iou3d_in.max(dim=0)\n\n for idx, thresh in enumerate(thresh_list):\n total_roi_recalled_bbox_list[idx] += (\n gt_max_iou_in > thresh).sum().item()\n\n if not cfg.RPN.FIXED:\n fg_mask = rpn_cls_label > 0\n correct = ((seg_result == rpn_cls_label)\n & fg_mask).sum().float()\n union = fg_mask.sum().float() + (seg_result > 0).sum().float() - correct\n rpn_iou = correct / torch.clamp(union, min=1.0)\n total_rpn_iou += rpn_iou.item()\n\n disp_dict = {\n 'mode': mode, 'recall': '%d/%d' % (total_recalled_bbox_list[3], total_gt_bbox)}\n progress_bar.set_postfix(disp_dict)\n progress_bar.update()\n\n if True:\n # save roi and refine results\n roi_boxes3d_np = roi_boxes3d.cpu().numpy()\n pred_boxes3d_np = pred_boxes3d.cpu().numpy()\n roi_scores_raw_np = roi_scores_raw.cpu().numpy()\n raw_scores_np = raw_scores.cpu().numpy()\n\n rpn_cls_np = ret_dict['rpn_cls'].cpu().numpy()\n rpn_xyz_np = ret_dict['backbone_xyz'].cpu().numpy()\n seg_result_np = seg_result.cpu().numpy()\n output_data = np.concatenate((rpn_xyz_np, rpn_cls_np.reshape(batch_size, -1, 1),\n seg_result_np.reshape(batch_size, -1, 1)), axis=2)\n\n for k in range(batch_size):\n cur_sample_id = sample_id[k]\n calib = dataset.get_calib(cur_sample_id)\n image_shape = dataset.get_image_shape(cur_sample_id)\n save_kitti_format(cur_sample_id, calib, roi_boxes3d_np[k], roi_output_dir,\n roi_scores_raw_np[k], image_shape)\n save_kitti_format(cur_sample_id, calib, pred_boxes3d_np[k], refine_output_dir,\n raw_scores_np[k], image_shape)\n\n output_file = os.path.join(\n rpn_output_dir, '%06d.npy' % cur_sample_id)\n np.save(output_file, output_data.astype(np.float32))\n\n # scores thresh\n inds = norm_scores > cfg.RCNN.SCORE_THRESH\n\n for k in range(batch_size):\n cur_inds = inds[k].view(-1)\n if cur_inds.sum() == 0:\n continue\n\n pred_boxes3d_selected = pred_boxes3d[k, cur_inds]\n raw_scores_selected = raw_scores[k, cur_inds]\n norm_scores_selected = norm_scores[k, cur_inds]\n\n # NMS thresh\n # rotated nms\n boxes_bev_selected = kitti_utils.boxes3d_to_bev_torch(\n pred_boxes3d_selected)\n keep_idx = iou3d_utils.nms_gpu(\n boxes_bev_selected, raw_scores_selected, cfg.RCNN.NMS_THRESH).view(-1)\n pred_boxes3d_selected = pred_boxes3d_selected[keep_idx]\n scores_selected = raw_scores_selected[keep_idx]\n pred_boxes3d_selected, scores_selected = pred_boxes3d_selected.cpu(\n ).numpy(), scores_selected.cpu().numpy()\n\n cur_sample_id = sample_id[k]\n calib = dataset.get_calib(cur_sample_id)\n final_total += pred_boxes3d_selected.shape[0]\n image_shape = dataset.get_image_shape(cur_sample_id)\n save_kitti_format(cur_sample_id, calib, pred_boxes3d_selected,\n final_output_dir, scores_selected, image_shape)\n\n progress_bar.close()\n # dump empty files\n split_file = os.path.join(dataset.imageset_dir,\n '..', '..', 'ImageSets', dataset.split + '.txt')\n split_file = os.path.abspath(split_file)\n image_idx_list = [x.strip() for x in open(split_file).readlines()]\n empty_cnt = 0\n for k in range(image_idx_list.__len__()):\n cur_file = os.path.join(final_output_dir, '%s.txt' % image_idx_list[k])\n if not os.path.exists(cur_file):\n with open(cur_file, 'w') as temp_f:\n pass\n empty_cnt += 1\n\n ret_dict = {'empty_cnt': empty_cnt}\n\n avg_rpn_iou = (total_rpn_iou / max(cnt, 1.0))\n avg_cls_acc = (total_cls_acc / max(cnt, 1.0))\n avg_cls_acc_refined = (total_cls_acc_refined / max(cnt, 1.0))\n avg_det_num = (final_total / max(len(dataset), 1.0))\n\n ret_dict['rpn_iou'] = avg_rpn_iou\n ret_dict['rcnn_cls_acc'] = avg_cls_acc\n ret_dict['rcnn_cls_acc_refined'] = avg_cls_acc_refined\n ret_dict['rcnn_avg_num'] = avg_det_num\n\n for idx, thresh in enumerate(thresh_list):\n cur_roi_recall = total_roi_recalled_bbox_list[idx] / max(\n total_gt_bbox, 1.0)\n\n ret_dict['rpn_recall(thresh=%.2f)' % thresh] = cur_roi_recall\n\n for idx, thresh in enumerate(thresh_list):\n cur_recall = total_recalled_bbox_list[idx] / max(total_gt_bbox, 1.0)\n\n ret_dict['rcnn_recall(thresh=%.2f)' % thresh] = cur_recall\n\n if cfg.TEST.SPLIT != 'test':\n name_to_class = {'Car': 0, 'Pedestrian': 1, 'Cyclist': 2}\n ap_result_str, ap_dict = kitti_evaluate(dataset.label_dir, final_output_dir, label_split_file=split_file,\n current_class=name_to_class[cfg.CLASSES])\n\n ret_dict.update(ap_dict)\n\n return ap_result_str\n"
] | [
[
"numpy.arctan2",
"numpy.sign",
"torch.argmax",
"torch.nn.functional.softmax",
"numpy.logical_and",
"numpy.random.seed",
"numpy.clip",
"torch.from_numpy",
"torch.sigmoid",
"torch.clamp"
]
] |
zblanks/parallel_esn | [
"25a979d0863ce54a4a588f4216dc473d4e9c5e8a"
] | [
"parallel_esn/bo.py"
] | [
"from math import log10\nfrom sklearn.gaussian_process import GaussianProcessRegressor\nfrom sklearn.gaussian_process.kernels import Matern\nimport numpy as np\nfrom .utils import create_rng\n\n\nclass BO:\n \"\"\"\n Bayesian Optimization framework\n \"\"\"\n\n def __init__(self, k, hidden_dim=(100, 10000),\n spectral_radius=(.9, 1.3), p=(0, 1),\n alpha=(0, 1), beta=(1e-5, 1e3), random_state=None):\n \"\"\"\n\n Parameters\n ----------\n k : tuple\n Range of values for nearest neighbors in small-world network\n hidden_dim : tuple, optional\n Range values for the number of nodes in the reservoir\n spectral_radius : tuple, optional\n Range of values for the spectral radius for the reservoir\n p : tuple, optional\n Range of values to consider for the rewire probability\n alpha : tuple, optional\n Range of values for the leaking rate\n beta : tuple, optional\n Range of values for the L2 regression regularization\n random_state : int or np.random.RandomState, optional\n Random state initializer\n \"\"\"\n # Check that all the hyper-parameters are tuples with two entries\n # which define the lower and upper bounds for the search space\n hyper_params = [k, hidden_dim, spectral_radius, p, alpha, beta]\n for param in hyper_params:\n assert isinstance(param, tuple), \"{} must be a tuple\".format(param)\n assert len(param) == 2, \"{} must have two arguments; the upper\" \\\n \"and lower bound\".format(param)\n\n self.lwr_k = k[0]\n self.upr_k = k[1]\n self.lwr_hidden_dim = hidden_dim[0]\n self.upr_hidden_dim = hidden_dim[1]\n self.lwr_spectral_radius = spectral_radius[0]\n self.upr_spectral_radius = spectral_radius[1]\n self.lwr_p = p[0]\n self.upr_p = p[1]\n self.lwr_alpha = alpha[0]\n self.upr_alpha = alpha[1]\n self.lwr_beta = beta[0]\n self.upr_beta = beta[1]\n\n self.rng = create_rng(random_state)\n self.gpr = GaussianProcessRegressor(kernel=Matern(),\n random_state=self.rng)\n\n # We need a placeholder for different hyper-parameter values that\n # arrive and the corresponding error values\n self.H = []\n self.y = []\n\n def update_gpr(self, X, y):\n \"\"\"\n Updates the Gaussian process with new data and error value\n\n Updates the Gaussian process by adding, `H`, the list of\n hyper-parameter values that were used with true function and y\n is the resulting error from the model\n\n Parameters\n ----------\n X : list\n Hyper-parameter values that were tried\n y : float\n Error that resulted from using X on the true function\n\n Returns\n -------\n None\n\n \"\"\"\n self.H.append(X)\n self.y.append(y)\n\n self.gpr.fit(self.H, self.y)\n\n def _sample_uniformly(self, num_samples, lwr_bound, upr_bound):\n \"\"\"\n Samples uniformly from a non-uniform space\n\n Parameters\n ----------\n num_samples : int\n Number of samples to generate\n lwr_bound : float\n Hyper-parameter lower bound\n upr_bound : float\n Hyper-parameter upper bound\n\n Returns\n -------\n param_vals : np.ndarray\n Uniformly sampled hyper-parameter values\n\n \"\"\"\n # To sample in a uniform fashion we need the base ten representation\n # of the upper and lower bounds and then we treat this as a region\n # to sample\n new_lwr_bound = log10(lwr_bound)\n new_upr_bound = log10(upr_bound)\n samples = self.rng.uniform(low=new_lwr_bound, high=new_upr_bound,\n size=(num_samples, 1))\n param_vals = np.power(10, samples)\n return param_vals\n\n def _build_options(self, num_samples=1000):\n \"\"\"\n Builds matrix which defines possible options for this iteration\n\n Parameters\n ----------\n num_samples : int, optional\n Number of hyper-parameter samples to generate\n\n Returns\n -------\n H_space : np.ndarray\n Matrix of options for the ESN hyper-parameters\n\n \"\"\"\n k_vals = self.rng.randint(low=self.lwr_k, high=self.upr_k,\n size=(num_samples, 1), dtype=np.int32)\n\n hidden_dim_vals = self.rng.randint(low=self.lwr_hidden_dim,\n high=self.upr_hidden_dim,\n size=(num_samples, 1),\n dtype=np.int32)\n\n spectral_radius_vals = self.rng.uniform(low=self.lwr_spectral_radius,\n high=self.upr_spectral_radius,\n size=(num_samples, 1))\n\n p_vals = self.rng.uniform(low=self.lwr_p, high=self.upr_p,\n size=(num_samples, 1))\n\n alpha_vals = self.rng.uniform(low=self.lwr_alpha, high=self.upr_alpha,\n size=(num_samples, 1))\n\n beta_vals = self._sample_uniformly(num_samples, self.lwr_beta,\n self.upr_beta)\n\n H_space = np.concatenate([k_vals, hidden_dim_vals,\n spectral_radius_vals, p_vals, alpha_vals,\n beta_vals], axis=1)\n return H_space\n\n def find_best_choices(self, num_samples=1000, num_choices=1):\n \"\"\"\n Finds the best hyper-parameter combination\n\n Parameters\n ----------\n num_samples : int, optional\n Number of hyper-parameter samples to generate\n num_choices : int, optional\n Number of choices to select\n\n Returns\n -------\n param_vals : dict\n Best hyper-parameter values for the current Gaussian process\n\n \"\"\"\n H_space = self._build_options(num_samples)\n\n # For the first MPI iteration because there is no prior, randomly\n # sample num_choices points\n if num_choices > 1:\n idx = self.rng.choice(np.arange(num_samples), size=num_choices,\n replace=False)\n best_vals = H_space[idx, :]\n else:\n y_pred = self.gpr.sample_y(H_space, random_state=self.rng)\n choices = np.argmin(y_pred)\n best_vals = H_space[choices, :]\n\n hyper_parameters = ['k', 'hidden_dim', 'spectral_radius', 'p', 'alpha',\n 'beta']\n\n param_vals = {}\n for (i, val) in enumerate(hyper_parameters):\n if num_choices == 1:\n param_vals[val] = best_vals[i]\n\n if (val == 'k') or (val == 'hidden_dim'):\n param_vals[val] = int(param_vals[val])\n else:\n param_vals[val] = best_vals[:, i]\n\n if (val == 'k') or (val == 'hidden_dim'):\n param_vals[val] = param_vals[val].astype(int)\n\n return param_vals\n\n def return_best_parameters(self):\n min_error = min(self.y)\n index = self.y.index(min_error)\n print(\"Minimum Validation Error = \", min_error)\n print(\"Best parameters found = \", self.H[index])\n return min_error, self.H[index]\n"
] | [
[
"numpy.argmin",
"numpy.arange",
"numpy.power",
"sklearn.gaussian_process.kernels.Matern",
"numpy.concatenate"
]
] |
hjonnala/deeplab2 | [
"1868757c4333ec5287cc0bf0a6bbf38fbbe34c2e"
] | [
"model/encoder/model_export_test.py"
] | [
"# coding=utf-8\n# Copyright 2022 The Deeplab2 Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests of model exports for axial_resnet_instances.\"\"\"\n\nimport os\n\nfrom absl import flags\nfrom absl.testing import parameterized\nimport tensorflow as tf\n\nfrom deeplab2.model.encoder import axial_resnet_instances\n\nFLAGS = flags.FLAGS\n\n\nclass ModelExportTest(tf.test.TestCase, parameterized.TestCase):\n\n @parameterized.parameters(\n ('resnet50',),\n ('resnet50_beta',),\n ('max_deeplab_s_backbone',),\n ('max_deeplab_l_backbone',),\n ('axial_resnet_s',),\n ('axial_resnet_l',),\n ('axial_deeplab_s',),\n ('axial_deeplab_l',),\n ('swidernet',),\n ('axial_swidernet',),\n )\n def test_model_export(self, model_name):\n model = axial_resnet_instances.get_model(\n model_name,\n output_stride=16,\n backbone_layer_multiplier=1.0,\n bn_layer=tf.keras.layers.BatchNormalization,\n conv_kernel_weight_decay=0.0001,\n # Test with small models only.\n num_blocks=[2, 2, 2, 2],\n # Disable drop path as it is not compatible with model exporting.\n block_group_config={'drop_path_keep_prob': 1.0})\n model(tf.keras.Input([257, 257, 3], batch_size=1), training=False)\n export_dir = os.path.join(\n FLAGS.test_tmpdir, 'test_model_export', model_name)\n model.save(export_dir)\n\n\nif __name__ == '__main__':\n tf.test.main()\n"
] | [
[
"tensorflow.test.main",
"tensorflow.keras.Input"
]
] |
kagemeka/atcoder-submissions | [
"91d8ad37411ea2ec582b10ba41b1e3cae01d4d6e",
"91d8ad37411ea2ec582b10ba41b1e3cae01d4d6e"
] | [
"jp.atcoder/abc012/abc012_4/21865313.py",
"jp.atcoder/abc081/arc086_b/17664033.py"
] | [
"from __future__ import annotations\n\nfrom typing import Generator, NoReturn\n\n\nclass StdReader:\n def __init__(\n self,\n ) -> NoReturn:\n import sys\n\n self.buf = sys.stdin.buffer\n self.lines = self.async_readlines()\n self.chunks: Generator\n\n def async_readlines(\n self,\n ) -> Generator:\n while True:\n gen = self.line_chunks()\n yield gen\n\n def line_chunks(\n self,\n ) -> Generator:\n ln = self.buf.readline()\n for chunk in ln.split():\n yield chunk\n\n def __call__(\n self,\n ) -> bytes:\n try:\n chunk = next(self.chunks)\n except:\n self.chunks = next(\n self.lines,\n )\n chunk = self()\n return chunk\n\n def str(\n self,\n ) -> str:\n b = self()\n return b.decode()\n\n def int(\n self,\n ) -> int:\n return int(self.str())\n\n\nfrom abc import ABC, abstractmethod\n\n\nclass Solver(ABC):\n def __init__(self):\n self.reader = StdReader()\n\n def __call__(\n self,\n ):\n self.prepare()\n self.solve()\n\n @abstractmethod\n def prepare(self):\n ...\n\n @abstractmethod\n def solve(self):\n ...\n\n\nimport numpy as np\nfrom scipy.sparse import csr_matrix\nfrom scipy.sparse.csgraph import floyd_warshall\n\n\nclass Problem(\n Solver,\n):\n def prepare(self):\n reader = self.reader\n n = reader.int()\n m = reader.int()\n a = [reader.int() for _ in range(3 * m)]\n a = np.array(\n a,\n ).reshape(m, 3)\n a, b, t = a.T\n self.n, self.m = n, m\n self.a = a - 1\n self.b = b - 1\n self.t = t\n\n def solve(self):\n self.compute_dist_mat()\n dist = self.dist\n d = dist.max(axis=1).min()\n print(int(d))\n\n def compute_dist_mat(\n self,\n ):\n n = self.n\n a = self.a\n b = self.b\n t = self.t\n g = csr_matrix(\n (t, (a, b)),\n shape=(n, n),\n )\n dist = floyd_warshall(\n csgraph=g,\n directed=False,\n )\n self.dist = dist\n\n\ndef main():\n t = 1\n # t = StdReader().int()\n for _ in range(t):\n Problem()()\n\n\nif __name__ == \"__main__\":\n main()\n",
"import itertools\r\nimport math\r\nimport string\r\nimport sys\r\nfrom bisect import bisect_left as bi_l\r\nfrom bisect import bisect_right as bi_r\r\nfrom collections import Counter, defaultdict, deque\r\nfrom functools import lru_cache, reduce\r\nfrom heapq import heapify, heappop, heappush\r\nfrom operator import or_, xor\r\n\r\nsys.setrecursionlimit(10**7)\r\ninf = float(\"inf\")\r\nMOD = 10**9 + 7\r\n# MOD = 998244353\r\n\r\n\r\nusing_numpy = 1\r\nimport networkx as nx\r\nimport numpy as np\r\nfrom numba import i8, njit\r\nfrom scipy import optimize\r\nfrom scipy.ndimage import distance_transform_cdt\r\nfrom scipy.sparse import csr_matrix\r\nfrom scipy.sparse.csgraph import (\r\n connected_components,\r\n csgraph_to_dense,\r\n maximum_flow,\r\n minimum_spanning_tree,\r\n shortest_path,\r\n)\r\nfrom scipy.spatial import ConvexHull\r\nfrom scipy.special import comb\r\n\r\n\r\nclass Algebra:\r\n class Modular(int):\r\n def __init__(self, n, mod=MOD):\r\n self.value = n\r\n self.mod = mod\r\n\r\n def __str__(self):\r\n return f\"{self.value}\"\r\n\r\n def __add__(self, other):\r\n return self.__class__((self.value + other.value) % self.mod)\r\n\r\n def __sub__(self, x):\r\n return self.__class__((self.value - x.value) % self.mod)\r\n\r\n def __mul__(self, x):\r\n return self.__class__((self.value * x.value) % self.mod)\r\n\r\n def __pow__(self, x):\r\n return self.__class__(pow(self.value, x.value, self.mod))\r\n\r\n def __lt__(self, x):\r\n return self.value < x.value\r\n\r\n def __le__(self, x):\r\n return self.value <= x.value\r\n\r\n def __eq__(self, x):\r\n return self.value == x.value\r\n\r\n def __ne__(self, x):\r\n return self.value != x.value\r\n\r\n def __gt__(self, x):\r\n return self.value > x.value\r\n\r\n def __ge__(self, x):\r\n return self.value >= x.value\r\n\r\n class SemiGroup:\r\n pass\r\n\r\n class Monoid:\r\n pass\r\n\r\n class Group:\r\n pass\r\n\r\n class SemiRing:\r\n pass\r\n\r\n class Ring:\r\n pass\r\n\r\n @staticmethod\r\n def identity(n):\r\n if using_numpy:\r\n return np.identity(n, dtype=np.int64)\r\n else:\r\n a = [[0] * n for _ in range(n)]\r\n for i in range(n):\r\n a[i][i] = 1\r\n return a\r\n\r\n @staticmethod\r\n def dot(a, b):\r\n if using_numpy:\r\n return np.dot(a, b)\r\n else:\r\n h, w, l = len(a), len(b[0]), len(b)\r\n assert len(a[0]) == l\r\n c = [[0] * w for _ in range(h)]\r\n for i in range(h):\r\n for j in range(w):\r\n for k in range(l):\r\n c[i][j] += a[i][k] * b[k][j]\r\n return c\r\n\r\n @classmethod\r\n def matrix_pow(cls, a, n, mod=10**9 + 7):\r\n m = len(a)\r\n b = cls.identity(m)\r\n while n:\r\n if n & 1:\r\n b = cls.dot(b, a)\r\n n >>= 1\r\n a = cls.dot(a, a)\r\n if using_numpy:\r\n a %= mod\r\n b %= mod\r\n else:\r\n for i in range(m):\r\n for j in range(m):\r\n a[i][j] %= mod\r\n b[i][j] %= mod\r\n return b\r\n\r\n @staticmethod\r\n def bitwise_dot(a, b):\r\n if using_numpy:\r\n return np.bitwise_xor.reduce(\r\n a[:, None, :] & b.T[None, :, :], axis=-1\r\n )\r\n else:\r\n h, w, l = len(a), len(b[0]), len(b)\r\n assert len(a[0]) == l\r\n c = [[0] * w for _ in range(h)]\r\n for i in range(h):\r\n for j in range(w):\r\n for k in range(l):\r\n c[i][j] ^= a[i][k] & b[k][j]\r\n return c\r\n\r\n @classmethod\r\n def bitwise_mat_pow(cls, a, n):\r\n if n == 0:\r\n return np.eye(len(a), dtype=np.uint32) * ((1 << 32) - 1)\r\n res = cls.bitwise_mat_pow(a, n // 2)\r\n res = cls.bitwise_dot(res, res)\r\n return cls.bitwise_dot(res, a) if n & 1 else res\r\n\r\n @staticmethod\r\n def cumprod(a, mod):\r\n l = len(a)\r\n sql = int(np.sqrt(l) + 1)\r\n a = np.resize(a, sql**2).reshape(sql, sql)\r\n for i in range(sql - 1):\r\n a[:, i + 1] *= a[:, i]\r\n a[:, i + 1] %= mod\r\n for i in range(sql - 1):\r\n a[i + 1] *= a[i, -1]\r\n a[i + 1] %= mod\r\n return np.ravel(a)[:l]\r\n\r\n @classmethod\r\n def generate_fac_ifac(cls, n, p=MOD):\r\n if using_numpy:\r\n fac = np.arange(n + 1)\r\n fac[0] = 1\r\n fac = cls.cumprod(fac, p)\r\n ifac = np.arange(n + 1, 0, -1)\r\n ifac[0] = pow(int(fac[-1]), p - 2, p)\r\n ifac = cls.cumprod(ifac, p)[n::-1]\r\n else:\r\n fac = [None] * (n + 1)\r\n fac[0] = 1\r\n for i in range(n):\r\n fac[i + 1] = fac[i] * (i + 1) % p\r\n ifac = [None] * (n + 1)\r\n ifac[n] = pow(fac[n], p - 2, p)\r\n for i in range(n, 0, -1):\r\n ifac[i - 1] = ifac[i] * i % p\r\n return fac, ifac\r\n\r\n class Kitamasa:\r\n pass\r\n\r\n\r\nmint = Algebra.Modular\r\n\r\n\r\nclass NumberTheory:\r\n class PrimeNumbers: # pn\r\n def __init__(self, n=2 * 10**6):\r\n self.is_prime, self.prime_nums = self.find(n)\r\n\r\n def __call__(self, n):\r\n return self.is_prime[n]\r\n\r\n def __iter__(self):\r\n return iter(self.prime_nums)\r\n\r\n def __getitem__(self, key):\r\n return self.prime_nums[key]\r\n\r\n @staticmethod\r\n def find(n): # Sieve of eratosthenes\r\n if using_numpy:\r\n is_prime = np.ones(n + 1, dtype=np.bool)\r\n is_prime[:2] = 0\r\n for i in range(2, int(n**0.5) + 1):\r\n if is_prime[i]:\r\n is_prime[i * 2 :: i] = 0\r\n prime_nums = np.flatnonzero(is_prime)\r\n else:\r\n is_prime = [True] * (n + 1)\r\n is_prime[0] = is_prime[1] = 0\r\n for i in range(2, int(n**0.5) + 1):\r\n if not is_prime[i]:\r\n continue\r\n for j in range(i * 2, n + 1, i):\r\n is_prime[j] = 0\r\n prime_nums = [i for i in range(2, n + 1) if is_prime[i]]\r\n return is_prime, prime_nums\r\n\r\n @lru_cache(maxsize=None)\r\n def factorize(self, n):\r\n res = defaultdict(int)\r\n if n < 2:\r\n return res\r\n for p in self:\r\n if p * p > n:\r\n break\r\n while n % p == 0:\r\n res[p] += 1\r\n n //= p\r\n if n == 1:\r\n return res\r\n res[n] = 1\r\n return res\r\n\r\n def factorize_factorial(self, n):\r\n res = defaultdict(int)\r\n for i in range(2, n + 1):\r\n for p, c in self.factorize(i).items():\r\n res[p] += c\r\n return res\r\n\r\n @classmethod\r\n @lru_cache(maxsize=None)\r\n def gcd(cls, a, b):\r\n return cls.gcd(b, a % b) if b else abs(a)\r\n\r\n @classmethod\r\n def lcm(cls, a, b):\r\n return abs(a // cls.gcd(a, b) * b)\r\n\r\n @staticmethod\r\n def find_divisors(n):\r\n divisors = []\r\n for i in range(1, int(n**0.5) + 1):\r\n if n % i:\r\n continue\r\n divisors.append(i)\r\n j = n // i\r\n if j != i:\r\n divisors.append(j)\r\n return sorted(divisors)\r\n\r\n @staticmethod\r\n def base_convert(n, b):\r\n if not n:\r\n return [0]\r\n res = []\r\n while n:\r\n n, r = divmod(n, b)\r\n if r < 0:\r\n n += 1\r\n r -= b\r\n res.append(r)\r\n return res\r\n\r\n\r\nclass Combinatorics:\r\n @classmethod\r\n @lru_cache(maxsize=None)\r\n def choose(cls, n, r, mod=None):\r\n if r > n or r < 0:\r\n return 0\r\n if r == 0:\r\n return 1\r\n res = cls.choose(n - 1, r, mod) + cls.choose(n - 1, r - 1, mod)\r\n if mod:\r\n res %= mod\r\n return res\r\n\r\n class CombinationsMod:\r\n def __init__(self, n=2 * 10**6, mod=MOD):\r\n self.__mod = mod\r\n self.fac, self.ifac = Algebra.generate_fac_ifac(n, mod)\r\n\r\n def __call__(self, n, r):\r\n return self.__choose(n, r)\r\n\r\n def __choose(self, n, r):\r\n bl = (0 <= r) & (r <= n)\r\n p = self.__mod\r\n return bl * self.fac[n] * self.ifac[r] % p * self.ifac[n - r] % p\r\n\r\n def make_nchoose_table(self, n):\r\n p = self.__mod\r\n r = len(self.__fac) - 1\r\n if using_numpy:\r\n n_choose = np.arange(n + 1, n - r, -1)\r\n n_choose[0] = 1\r\n n_choose = Algebra.cumprod(n_choose, p) * self.ifac % p\r\n else:\r\n n_choose = [None] * (r + 1)\r\n n_choose[0] = 1\r\n for i in range(r):\r\n n_choose[i + 1] = n_choose[i] * (n - i) % p\r\n for i in range(1, r + 1):\r\n n_choose[i] = n_choose[i] * self.ifac[i] % p\r\n return n_choose\r\n\r\n @classmethod\r\n def permutations(cls, a, r=None, i=0):\r\n a = list(a)\r\n n = len(a)\r\n if r is None:\r\n r = n\r\n res = []\r\n if r > n or i > r:\r\n return res\r\n if i == r:\r\n return [tuple(a[:r])]\r\n for j in range(i, n):\r\n a[i], a[j] = a[j], a[i]\r\n res += cls.permutations(a, r, i + 1)\r\n return res\r\n\r\n @staticmethod\r\n def combinations(a, r):\r\n a = tuple(a)\r\n n = len(a)\r\n if r > n:\r\n return\r\n indices = list(range(r))\r\n yield a[:r]\r\n while True:\r\n for i in range(r - 1, -1, -1):\r\n if indices[i] != i + n - r:\r\n break\r\n else:\r\n return\r\n indices[i] += 1\r\n for j in range(i + 1, r):\r\n indices[j] = indices[j - 1] + 1\r\n yield tuple(a[i] for i in indices)\r\n\r\n\r\nclass DP:\r\n @staticmethod\r\n def LIS(a):\r\n res = [inf] * len(a)\r\n for x in a:\r\n res[bi_l(res, x)] = x\r\n return res\r\n\r\n\r\nclass String:\r\n @staticmethod\r\n def z_algorithm(s):\r\n n = len(s)\r\n a = [0] * n\r\n a[0] = n\r\n l = r = -1\r\n for i in range(1, n):\r\n if r >= i:\r\n a[i] = min(a[i - l], r - i)\r\n while i + a[i] < n and s[i + a[i]] == s[a[i]]:\r\n a[i] += 1\r\n if i + a[i] >= r:\r\n l, r = i, i + a[i]\r\n return a\r\n\r\n\r\nclass GeometryTopology:\r\n class Graph:\r\n class __Edge:\r\n def __init__(self, weight=1, capacity=1, **args):\r\n self.weight = weight\r\n self.capacity = capacity\r\n\r\n def __str__(self):\r\n return f\"weight: {self.weight}, cap: {self.capacity}\"\r\n\r\n class __Node:\r\n def __init__(self, **args):\r\n pass\r\n\r\n def __init__(self, n=0):\r\n self.__N = n\r\n self.nodes = [None] * n\r\n self.edges = [{} for _ in range(n)]\r\n\r\n def add_node_info(self, v, **args):\r\n self.nodes[v] = self.__Node(**args)\r\n\r\n def add_edge(self, u, v, update=False, **args):\r\n if not update and v in self.edges[u]:\r\n return\r\n self.edges[u][v] = self.__Edge(**args)\r\n\r\n def get_size(self):\r\n return self.__N\r\n\r\n def bfs(self, src=0):\r\n n = self.__N\r\n self.depth = self.lv = lv = [None] * n\r\n lv[src] = 0 # depth in tree, or level in general graph.\r\n self.dist = dist = [inf] * n\r\n dist[src] = 0 # dist for only tree.\r\n self.parent = par = [None] * n\r\n par[src] = src\r\n q = deque([src])\r\n while q:\r\n u = q.popleft()\r\n for v, e in self.edges[u].items():\r\n if e.capacity == 0 or lv[v] is not None:\r\n continue\r\n lv[v], dist[v], par[v] = lv[u] + 1, dist[u] + e.weight, u\r\n q.append(v)\r\n return dist\r\n\r\n def dinic(self, src, sink):\r\n def flow_to_sink(u, flow_in):\r\n if u == sink:\r\n return flow_in\r\n flow = 0\r\n for v, e in self.edges[u].items():\r\n if e.capacity == 0 or self.lv[v] <= self.lv[u]:\r\n continue\r\n f = flow_to_sink(v, min(flow_in, e.capacity))\r\n if not f:\r\n continue\r\n self.edges[u][v].capacity -= f\r\n if u in self.edges[v]:\r\n self.edges[v][u].capacity += f\r\n else:\r\n self.add_edge(v, u, capacity=f)\r\n flow_in -= f\r\n flow += f\r\n return flow\r\n\r\n flow = 0\r\n while True:\r\n self.bfs(src)\r\n if self.lv[sink] is None:\r\n return flow\r\n flow += flow_to_sink(src, inf)\r\n\r\n def ford_fulkerson(self):\r\n pass\r\n\r\n def push_relabel(self):\r\n pass\r\n\r\n def floyd_warshall(self):\r\n n = self.__N\r\n d = [[inf] * n for _ in range(n)]\r\n for u in range(n):\r\n d[u][u] = 0\r\n for v, e in self.edges[u].items():\r\n d[u][v] = e.weight\r\n for w in range(n):\r\n for u in range(n):\r\n for v in range(n):\r\n d[u][v] = min(d[u][v], d[u][w] + d[w][v])\r\n return d\r\n\r\n def dijkstra(self, src, paths_cnt=False, mod=None):\r\n dist = [inf] * self.__N\r\n dist[src] = 0\r\n visited = [False] * self.__N\r\n paths = [0] * self.__N\r\n paths[src] = 1\r\n q = [(0, src)]\r\n while q:\r\n d, u = heappop(q)\r\n if visited[u]:\r\n continue\r\n visited[u] = True\r\n for v, e in self.edges[u].items():\r\n dv = d + e.weight\r\n if dv > dist[v]:\r\n continue\r\n elif dv == dist[v]:\r\n paths[v] += paths[u]\r\n if mod:\r\n paths[v] %= mod\r\n continue\r\n paths[v], dist[v] = paths[u], dv\r\n heappush(q, (dv, v))\r\n if paths_cnt:\r\n return dist, paths\r\n else:\r\n return dist\r\n\r\n def astar(self, src, tgt, heuristic_func):\r\n cost = [inf] * self.__N\r\n q = [(heuristic_func(src, tgt), 0, src)]\r\n while q:\r\n _, c, u = heappop(q)\r\n if u == tgt:\r\n return c\r\n if cost[u] != inf:\r\n continue\r\n cost[u] = c\r\n for v, e in self.edges[u].items():\r\n if cost[v] != inf:\r\n continue\r\n h = heuristic_func(v, tgt)\r\n nc = c + e.weight\r\n heappush(q, (h + nc, nc, v))\r\n return inf\r\n\r\n def bellman_ford(self, src):\r\n n = self.__N\r\n d = [inf] * n\r\n d[src] = 0\r\n for _ in range(n - 1):\r\n for u in range(n):\r\n for v, e in self.edges[u].items():\r\n d[v] = min(d[v], d[u] + e.weight)\r\n for u in range(n):\r\n for v, e in self.edges[u].items():\r\n if d[u] + e.weight < d[v]:\r\n raise Exception(\"found negative cycle.\")\r\n return d\r\n\r\n def bfs01(self, src=0):\r\n d = [inf] * self.__N\r\n d[src] = 0\r\n q = deque([src])\r\n while q:\r\n u = q.popleft()\r\n for v, e in self.edges[u].items():\r\n dv = d[u] + e.weight\r\n if d[v] <= dv:\r\n continue\r\n d[v] = dv\r\n if e.weight:\r\n q.append(v)\r\n else:\r\n q.appendleft(v)\r\n return d\r\n\r\n def find_ancestors(self): # tree doubling.\r\n self.__ancestors = ancestors = [self.parent]\r\n for _ in range(max(self.depth).bit_length()):\r\n ancestors.append([ancestors[-1][u] for u in ancestors[-1]])\r\n\r\n def find_dist(self, u, v):\r\n return (\r\n self.dist[u]\r\n + self.dist[v]\r\n - 2 * self.dist[self.__find_lca(u, v)]\r\n )\r\n\r\n def __find_lca(self, u, v):\r\n du, dv = self.depth[u], self.depth[v]\r\n if du > dv:\r\n u, v = v, u\r\n du, dv = dv, du\r\n\r\n d = dv - du\r\n for i in range(d.bit_length()): # up-stream\r\n if d >> i & 1:\r\n v = self.__ancestors[i][v]\r\n if v == u:\r\n return v\r\n\r\n for i in range(\r\n du.bit_length() - 1, -1, -1\r\n ): # find direct child of LCA.\r\n nu, nv = self.__ancestors[i][u], self.__ancestors[i][v]\r\n if nu == nv:\r\n continue\r\n u, v = nu, nv\r\n\r\n return self.__ancestors[0][u]\r\n\r\n def init_dsu(self): # disjoint set union (union-find)\r\n n = self.__N\r\n self.parent = list(range(n))\r\n self.rank = [0] * n\r\n self.size = [1] * n\r\n\r\n def find(self, u):\r\n if self.parent[u] == u:\r\n return u\r\n self.parent[u] = self.find(self.parent[u])\r\n return self.parent[u]\r\n\r\n def unite(self, u, v):\r\n u, v = self.find(u), self.find(v)\r\n if u == v:\r\n return\r\n if self.rank[u] < self.rank[v]:\r\n u, v = v, u\r\n self.parent[v] = u\r\n self.size[u] += self.size[v]\r\n self.rank[u] = max(self.rank[u], self.rank[v] + 1)\r\n\r\n def same(self, u, v):\r\n return self.find(u) == self.find(v)\r\n\r\n def scc(self): # strongly connected components\r\n n = self.__N\r\n visited, q, root, r = [False] * n, [], [None] * n, 0\r\n gg = self.__class__(n)\r\n for u in range(n):\r\n for v in self.edges[u]:\r\n gg.add_edge(v, u)\r\n\r\n def dfs(u):\r\n if visited[u]:\r\n return\r\n visited[u] = True\r\n for v in self.edges[u]:\r\n dfs(v)\r\n q.append(u)\r\n\r\n def rev_dfs(u, r):\r\n if root[u] is not None:\r\n return\r\n root[u] = r\r\n for v in gg.edges[u]:\r\n rev_dfs(v, r)\r\n\r\n for u in range(n):\r\n dfs(u)\r\n for u in q[::-1]:\r\n rev_dfs(u, r)\r\n r += 1\r\n return root\r\n\r\n def kruskal(self): # minimum spanning tree\r\n n = self.__N\r\n uf = self.__class__(n)\r\n uf.init_dsu()\r\n edges = sorted(\r\n [\r\n (u, v, e.weight)\r\n for u in range(n)\r\n for v, e in self.edges[u].items()\r\n ],\r\n key=lambda x: x[2],\r\n )\r\n g = self.__class__(n)\r\n d = 0\r\n for u, v, w in edges:\r\n if uf.same(u, v):\r\n continue\r\n uf.unite(u, v)\r\n g.add_edge(u, v, weight=w)\r\n d += w\r\n return g, d\r\n\r\n def prim(self, src=0, return_parent=False): # minimum spanning tree\r\n n = self.__N\r\n g = self.__class__(n)\r\n parent, visited, dist = [None] * n, [False] * n, 0\r\n q = [(0, (src, src))]\r\n while q:\r\n d, (w, u) = heappop(q)\r\n if visited[u]:\r\n continue\r\n visited[u], parent[u] = True, w\r\n dist += d\r\n g.add_edge(w, u, weight=d)\r\n for v, e in self.edges[u].items():\r\n if not visited[v]:\r\n heappush(q, (e.weight, (u, v)))\r\n if return_parent:\r\n return g, dist, parent\r\n return g, dist\r\n\r\n def boruvka(self): # minimum spanning tree\r\n n = self.__N\r\n uf = self.__class__(n)\r\n uf.init_dsu()\r\n g = self.__class__(n)\r\n d = 0\r\n\r\n def dfs(u):\r\n if visited[u]:\r\n return (inf, (None, None))\r\n visited[u] = True\r\n cand = []\r\n for v, e in self.edges[u].items():\r\n if uf.same(u, v):\r\n cand.append(dfs(v))\r\n continue\r\n cand.append((e.weight, (u, v)))\r\n return sorted(cand)[0]\r\n\r\n while len(set(uf.parent)) != 1:\r\n edges, visited = [], [False] * n\r\n for u in range(n):\r\n if visited[u]:\r\n continue\r\n edges.append(dfs(u))\r\n for w, (u, v) in edges:\r\n if uf.same(u, v):\r\n continue\r\n g.add_edge(u, v, weight=w)\r\n uf.unite(u, v)\r\n d += w\r\n for u in range(n):\r\n uf.find(u)\r\n\r\n return g, d\r\n\r\n def tsp(self): # traveling salesperson problem\r\n pass\r\n\r\n @staticmethod\r\n def triangle_area(p0, p1, p2, signed=False):\r\n x1, y1, x2, y2 = (\r\n p1[0] - p0[0],\r\n p1[1] - p0[1],\r\n p2[0] - p0[0],\r\n p2[1] - p0[1],\r\n )\r\n return (\r\n (x1 * y2 - x2 * y1) / 2 if signed else abs(x1 * y2 - x2 * y1) / 2\r\n )\r\n\r\n @classmethod\r\n def intersect(cls, seg1, seg2):\r\n (p1, p2), (p3, p4) = seg1, seg2\r\n t1 = cls.triangle_area(p1, p2, p3, signed=True)\r\n t2 = cls.triangle_area(p1, p2, p4, signed=True)\r\n t3 = cls.triangle_area(p3, p4, p1, signed=True)\r\n t4 = cls.triangle_area(p3, p4, p2, signed=True)\r\n return (t1 * t2 < 0) & (t3 * t4 < 0)\r\n\r\n\r\ndef cumxor(a):\r\n return reduce(xor, a, 0)\r\n\r\n\r\ndef cumor(a):\r\n return reduce(or_, a, 0)\r\n\r\n\r\ndef bit_count(n):\r\n cnt = 0\r\n while n:\r\n cnt += n & 1\r\n n >>= 1\r\n return cnt\r\n\r\n\r\nclass AtCoder:\r\n class ABC001:\r\n @staticmethod\r\n def a():\r\n h1, h2 = map(int, sys.stdin.read().split())\r\n print(h1 - h2)\r\n\r\n @staticmethod\r\n def d():\r\n def to_minuites(x):\r\n q, r = divmod(x, 100)\r\n return 60 * q + r\r\n\r\n def to_hmform(x):\r\n q, r = divmod(x, 60)\r\n return 100 * q + r\r\n\r\n n = int(sys.stdin.readline().rstrip())\r\n term = [0] * 2001\r\n for _ in range(n):\r\n s, e = map(\r\n to_minuites,\r\n map(int, sys.stdin.readline().rstrip().split(\"-\")),\r\n )\r\n s = s // 5 * 5\r\n e = (e + 4) // 5 * 5\r\n term[s] += 1\r\n term[e + 1] -= 1\r\n for i in range(2000):\r\n term[i + 1] += term[i]\r\n\r\n res = []\r\n raining = False\r\n for i in range(2001):\r\n if term[i]:\r\n if not raining:\r\n s = i\r\n raining = True\r\n elif raining:\r\n res.append((s, i - 1))\r\n raining = False\r\n for s, e in res:\r\n print(f\"{to_hmform(s):04}-{to_hmform(e):04}\")\r\n\r\n class ABC002:\r\n @staticmethod\r\n def a():\r\n print(max(map(int, sys.stdin.readline().split())))\r\n\r\n @staticmethod\r\n def b():\r\n vowels = set(\"aeiou\")\r\n print(\r\n \"\".join(\r\n [\r\n c\r\n for c in sys.stdin.readline().rstrip()\r\n if c not in vowels\r\n ]\r\n )\r\n )\r\n\r\n @staticmethod\r\n def c():\r\n print(\r\n GeometryTopology.triangle_area(\r\n *map(int, sys.stdin.readline().split())\r\n )\r\n )\r\n\r\n @staticmethod\r\n def d():\r\n n, m = map(int, sys.stdin.readline().split())\r\n edges = set(\r\n (x - 1, y - 1)\r\n for x, y in zip(*[map(int, sys.stdin.read().split())] * 2)\r\n )\r\n print(\r\n max(\r\n len(s)\r\n for i in range(1, 1 << n)\r\n for s in [[j for j in range(n) if i >> j & 1]]\r\n if all(\r\n (x, y) in edges\r\n for x, y in itertools.combinations(s, 2)\r\n )\r\n )\r\n )\r\n\r\n @staticmethod\r\n def d_2():\r\n n, m = map(int, sys.stdin.readline().split())\r\n relations = [1 << i for i in range(n)]\r\n for x, y in zip(*[map(int, sys.stdin.read().split())] * 2):\r\n relations[x] |= 1 << (y - 1)\r\n relations[y] |= 1 << (x - 1)\r\n res = 0\r\n for i in range(1 << n):\r\n s, cnt = (1 << n) - 1, 0\r\n for j in range(n):\r\n if i >> j & 1:\r\n t &= relations[j] | 1 << j\r\n cnt += 1\r\n if s & i == i:\r\n res = max(res, cnt)\r\n print(res)\r\n\r\n class ABC003:\r\n @staticmethod\r\n def a():\r\n print((int(sys.stdin.readline().rstrip()) + 1) * 5000)\r\n\r\n @staticmethod\r\n def b():\r\n atcoder = set(\"atcoder\")\r\n s, t = sys.stdin.read().split()\r\n print(\r\n all(\r\n s[i] == t[i]\r\n or s[i] == \"@\"\r\n and t[i] in atcoder\r\n or t[i] == \"@\"\r\n and s[i] in atcoder\r\n for i in range(len(s))\r\n )\r\n and \"You can win\"\r\n or \"You will lose\"\r\n )\r\n\r\n @staticmethod\r\n def c():\r\n n, k, *r = map(int, sys.stdin.read().split())\r\n print(reduce(lambda x, y: (x + y) / 2, sorted(r)[-k:], 0))\r\n\r\n class ABC004:\r\n @staticmethod\r\n def a():\r\n print(int(sys.stdin.readline().rstrip()) * 2)\r\n\r\n @staticmethod\r\n def b():\r\n for l in [sys.stdin.readline().rstrip() for _ in range(4)][::-1]:\r\n print(l[::-1])\r\n\r\n @staticmethod\r\n def c():\r\n n = int(sys.stdin.readline().rstrip()) % 30\r\n res = list(range(1, 7))\r\n for i in range(n):\r\n i %= 5\r\n res[i], res[i + 1] = res[i + 1], res[i]\r\n print(*res, sep=\"\")\r\n\r\n class ABC005:\r\n @staticmethod\r\n def a():\r\n x, y = map(int, sys.stdin.readline().split())\r\n print(y // x)\r\n\r\n @staticmethod\r\n def b():\r\n n, *t = map(int, sys.stdin.read().split())\r\n print(min(t))\r\n\r\n @staticmethod\r\n def c():\r\n t = int(sys.stdin.readline().rstrip())\r\n n = int(sys.stdin.readline().rstrip())\r\n a = [int(x) for x in sys.stdin.readline().split()]\r\n m = int(sys.stdin.readline().rstrip())\r\n b = [int(x) for x in sys.stdin.readline().split()]\r\n i = 0\r\n for p in b:\r\n if i == n:\r\n print(\"no\")\r\n return\r\n while p - a[i] > t:\r\n i += 1\r\n if i == n:\r\n print(\"no\")\r\n return\r\n if a[i] > p:\r\n print(\"no\")\r\n return\r\n i += 1\r\n print(\"yes\")\r\n\r\n @staticmethod\r\n def d():\r\n n = int(sys.stdin.readline().rstrip())\r\n d = np.array(\r\n [sys.stdin.readline().split() for _ in range(n)], np.int64\r\n )\r\n s = d.cumsum(axis=0).cumsum(axis=1)\r\n s = np.pad(s, 1)\r\n max_del = np.zeros((n + 1, n + 1), dtype=np.int64)\r\n for y in range(1, n + 1):\r\n for x in range(1, n + 1):\r\n max_del[y, x] = np.amax(\r\n s[y : n + 1, x : n + 1]\r\n - s[0 : n - y + 1, x : n + 1]\r\n - s[y : n + 1, 0 : n - x + 1]\r\n + s[0 : n - y + 1, 0 : n - x + 1]\r\n )\r\n res = np.arange(n**2 + 1)[:, None]\r\n i = np.arange(1, n + 1)\r\n res = max_del[i, np.minimum(res // i, n)].max(axis=1)\r\n q = int(sys.stdin.readline().rstrip())\r\n p = np.array(sys.stdin.read().split(), dtype=np.int64)\r\n print(*res[p], sep=\"\\n\")\r\n\r\n class ABC006:\r\n @staticmethod\r\n def a():\r\n n = sys.stdin.readline().rstrip()\r\n if \"3\" in n:\r\n print(\"YES\")\r\n elif int(n) % 3 == 0:\r\n print(\"YES\")\r\n else:\r\n print(\"NO\")\r\n\r\n @staticmethod\r\n def b():\r\n mod = 10007\r\n a = np.eye(N=3, k=-1, dtype=np.int64)\r\n a[0] = 1\r\n n = int(sys.stdin.readline().rstrip())\r\n a = Algebra.matrix_pow(a, n - 1, mod)\r\n print(a[2][0])\r\n\r\n @staticmethod\r\n def c():\r\n n, m = map(int, sys.stdin.readline().split())\r\n cnt = [0, 0, 0]\r\n if m == 1:\r\n cnt = [-1, -1, -1]\r\n else:\r\n if m & 1:\r\n m -= 3\r\n cnt[1] += 1\r\n n -= 1\r\n cnt[2] = m // 2 - n\r\n cnt[0] = n - cnt[2]\r\n if cnt[0] < 0 or cnt[1] < 0 or cnt[2] < 0:\r\n print(-1, -1, -1)\r\n else:\r\n print(*cnt, sep=\" \")\r\n\r\n @staticmethod\r\n def d():\r\n n, *c = map(int, sys.stdin.read().split())\r\n lis = [inf] * n\r\n for x in c:\r\n lis[bi_l(lis, x)] = x\r\n print(n - bi_l(lis, inf))\r\n\r\n class ABC007:\r\n @staticmethod\r\n def a():\r\n n = int(sys.stdin.readline().rstrip())\r\n print(n - 1)\r\n\r\n @staticmethod\r\n def b():\r\n s = sys.stdin.readline().rstrip()\r\n if s == \"a\":\r\n print(-1)\r\n else:\r\n print(\"a\")\r\n\r\n @staticmethod\r\n def c():\r\n r, c = map(int, sys.stdin.readline().split())\r\n sy, sx = map(int, sys.stdin.readline().split())\r\n gy, gx = map(int, sys.stdin.readline().split())\r\n sy -= 1\r\n sx -= 1\r\n gy -= 1\r\n gx -= 1\r\n maze = [sys.stdin.readline().rstrip() for _ in range(r)]\r\n queue = deque([(sy, sx)])\r\n dist = np.full((r, c), np.inf)\r\n dist[sy, sx] = 0\r\n while queue:\r\n y, x = queue.popleft()\r\n for i, j in [(-1, 0), (1, 0), (0, -1), (0, 1)]:\r\n i += y\r\n j += x\r\n if maze[i][j] == \"#\" or dist[i, j] != np.inf:\r\n continue\r\n dist[i, j] = dist[y, x] + 1\r\n queue.append((i, j))\r\n print(int(dist[gy, gx]))\r\n\r\n @staticmethod\r\n def d():\r\n ng = set([4, 9])\r\n\r\n def count(d):\r\n return d if d <= 4 else d - 1\r\n\r\n def f(n):\r\n x = [int(d) for d in str(n)]\r\n flg = True\r\n dp = 0\r\n for d in x:\r\n dp = dp * 8 + flg * count(d)\r\n if d in ng:\r\n flg = False\r\n return n - (dp + flg)\r\n\r\n a, b = map(int, sys.stdin.readline().split())\r\n print(f(b) - f(a - 1))\r\n\r\n class ABC008:\r\n @staticmethod\r\n def a():\r\n s, t = map(int, sys.stdin.readline().split())\r\n print(t - s + 1)\r\n\r\n @staticmethod\r\n def b():\r\n n, *s = sys.stdin.read().split()\r\n res = defaultdict(int)\r\n for name in s:\r\n res[name] += 1\r\n print(sorted(res.items(), key=lambda x: x[1])[-1][0])\r\n\r\n @staticmethod\r\n def c():\r\n n, *a = map(int, sys.stdin.read().split())\r\n a = np.array(a)\r\n c = n - np.count_nonzero(a[:, None] % a, axis=1)\r\n print(np.sum((c + 1) // 2 / c))\r\n\r\n @staticmethod\r\n def d():\r\n w, h, n, *xy = map(int, sys.stdin.read().split())\r\n (*xy,) = zip(*([iter(xy)] * 2))\r\n\r\n @lru_cache(maxsize=None)\r\n def count(x1, y1, x2, y2):\r\n res = 0\r\n for x, y in xy:\r\n if not (x1 <= x <= x2 and y1 <= y <= y2):\r\n continue\r\n cnt = (x2 - x1) + (y2 - y1) + 1\r\n cnt += count(x1, y1, x - 1, y - 1)\r\n cnt += count(x1, y + 1, x - 1, y2)\r\n cnt += count(x + 1, y1, x2, y - 1)\r\n cnt += count(x + 1, y + 1, x2, y2)\r\n res = max(res, cnt)\r\n return res\r\n\r\n print(count(1, 1, w, h))\r\n\r\n class ABC009:\r\n @staticmethod\r\n def a():\r\n n = int(sys.stdin.readline().rstrip())\r\n print((n + 1) // 2)\r\n\r\n @staticmethod\r\n def b():\r\n n, *a = map(int, sys.stdin.read().split())\r\n print(sorted(set(a))[-2])\r\n\r\n @staticmethod\r\n def c():\r\n n, k = map(int, sys.stdin.readline().split())\r\n s = list(sys.stdin.readline().rstrip())\r\n cost = [1] * n\r\n r = k\r\n for i in range(n - 1):\r\n q = []\r\n for j in range(i + 1, n):\r\n if s[j] < s[i] and cost[i] + cost[j] <= r:\r\n heappush(q, (s[j], cost[i] + cost[j], -j))\r\n if not q:\r\n continue\r\n _, c, j = heappop(q)\r\n j = -j\r\n s[i], s[j] = s[j], s[i]\r\n r -= c\r\n cost[i] = cost[j] = 0\r\n print(\"\".join(s))\r\n\r\n @staticmethod\r\n def d():\r\n k, m = map(int, sys.stdin.readline().split())\r\n a = np.array([int(x) for x in sys.stdin.readline().split()])\r\n c = np.array([int(x) for x in sys.stdin.readline().split()])\r\n mask = (1 << 32) - 1\r\n d = np.eye(k, k, -1, dtype=np.uint32) * mask\r\n d[0] = c\r\n if m <= k:\r\n print(a[m - 1])\r\n return\r\n # print(Algebra.bitwise_mat_pow(d, m-k))\r\n # print(Algebra.bitwise_dot(Algebra.bitwise_mat_pow(d, m-k), a[::-1].reshape(-1, 1))[0].item())\r\n print(\r\n Algebra.bitwise_dot(\r\n Algebra.bitwise_mat_pow(d, m - k), a[::-1].reshape(-1, 1)\r\n )[0][0]\r\n )\r\n\r\n class ABC010:\r\n @staticmethod\r\n def a():\r\n print(sys.stdin.readline().rstrip() + \"pp\")\r\n\r\n @staticmethod\r\n def b():\r\n n, *a = map(int, sys.stdin.read().split())\r\n tot = 0\r\n for x in a:\r\n c = 0\r\n while x % 2 == 0 or x % 3 == 2:\r\n x -= 1\r\n c += 1\r\n tot += c\r\n print(tot)\r\n\r\n @staticmethod\r\n def c():\r\n sx, sy, gx, gy, t, v, n, *xy = map(int, sys.stdin.read().split())\r\n x, y = np.array(xy).reshape(-1, 2).T\r\n\r\n def dist(x1, y1, x2, y2):\r\n return np.sqrt((x2 - x1) ** 2 + (y2 - y1) ** 2)\r\n\r\n ans = (\r\n \"YES\"\r\n if (dist(sx, sy, x, y) + dist(x, y, gx, gy) <= v * t).any()\r\n else \"NO\"\r\n )\r\n print(ans)\r\n\r\n @staticmethod\r\n def d():\r\n n, g, e = map(int, sys.stdin.readline().split())\r\n p = [int(x) for x in sys.stdin.readline().split()]\r\n x, y = [], []\r\n for _ in range(e):\r\n a, b = map(int, sys.stdin.readline().split())\r\n x.append(a)\r\n y.append(b)\r\n x.append(b)\r\n y.append(a)\r\n for a in p:\r\n x.append(a)\r\n y.append(n)\r\n if not x:\r\n print(0)\r\n return\r\n c = [1] * len(x)\r\n min_cut = maximum_flow(\r\n csr_matrix((c, (x, y)), (n + 1, n + 1)), source=0, sink=n\r\n ).flow_value\r\n print(min_cut)\r\n\r\n @staticmethod\r\n def d_2():\r\n n, g, e = map(int, sys.stdin.readline().split())\r\n graph = nx.DiGraph()\r\n graph.add_nodes_from(range(n + 1))\r\n for p in [int(x) for x in sys.stdin.readline().split()]:\r\n graph.add_edge(p, n, capacity=1)\r\n for _ in range(e):\r\n a, b = map(int, sys.stdin.readline().split())\r\n graph.add_edge(a, b, capacity=1)\r\n graph.add_edge(b, a, capacity=1)\r\n print(nx.minimum_cut_value(graph, 0, n))\r\n\r\n @staticmethod\r\n def d_3():\r\n n, q, m = map(int, sys.stdin.readline().split())\r\n g = GeometryTopology.Graph(n + 1)\r\n # for i in range(n+1): g.add_node(i)\r\n for p in [int(x) for x in sys.stdin.readline().split()]:\r\n g.add_edge(p, n, capacity=1)\r\n for a, b in zip(*[map(int, sys.stdin.read().split())] * 2):\r\n g.add_edge(a, b, capacity=1)\r\n g.add_edge(b, a, capacity=1)\r\n print(g.dinic(0, n))\r\n\r\n class ABC011:\r\n @staticmethod\r\n def a():\r\n n = int(sys.stdin.readline().rstrip())\r\n print(n % 12 + 1)\r\n\r\n @staticmethod\r\n def b():\r\n s = sys.stdin.readline().rstrip()\r\n print(s[0].upper() + s[1:].lower())\r\n\r\n @staticmethod\r\n def c():\r\n n, *ng = map(int, sys.stdin.read().split())\r\n ng = set(ng)\r\n if n in ng:\r\n print(\"NO\")\r\n else:\r\n r = 100\r\n while n > 0:\r\n if r == 0:\r\n print(\"NO\")\r\n return\r\n for i in range(3, 0, -1):\r\n if (n - i) in ng:\r\n continue\r\n n -= i\r\n r -= 1\r\n break\r\n else:\r\n print(\"NO\")\r\n return\r\n print(\"YES\")\r\n\r\n @staticmethod\r\n def d():\r\n n, d, x, y = map(int, sys.stdin.read().split())\r\n x, y = abs(x), abs(y)\r\n if x % d or y % d:\r\n print(0)\r\n return\r\n x, y = x // d, y // d\r\n r = n - (x + y)\r\n if r < 0 or r & 1:\r\n print(0)\r\n return\r\n\r\n res = 0\r\n half_p = pow(1 / 2, n)\r\n for d in range(r // 2 + 1): # 0 <= d <= r//2, south\r\n south, north = d, y + d\r\n west = (r - 2 * d) // 2\r\n res += (\r\n half_p\r\n * comb(n, south, exact=True)\r\n * comb(n - south, north, exact=True)\r\n * comb(n - south - north, west, exact=True)\r\n * half_p\r\n )\r\n print(res)\r\n\r\n class ABC012:\r\n @staticmethod\r\n def a():\r\n a, b = map(int, sys.stdin.readline().split())\r\n print(b, a)\r\n\r\n @staticmethod\r\n def b():\r\n n = int(sys.stdin.readline().rstrip())\r\n h, n = divmod(n, 3600)\r\n m, s = divmod(n, 60)\r\n print(f\"{h:02}:{m:02}:{s:02}\")\r\n\r\n @staticmethod\r\n def c():\r\n n = 2025 - int(sys.stdin.readline().rstrip())\r\n res = []\r\n for i in range(1, 10):\r\n if n % i != 0 or n // i > 9:\r\n continue\r\n res.append(f\"{i} x {n//i}\")\r\n print(*sorted(res), sep=\"\\n\")\r\n\r\n @staticmethod\r\n def d():\r\n n, m, *abt = map(int, sys.stdin.read().split())\r\n a, b, t = np.array(abt).reshape(m, 3).T\r\n res = shortest_path(\r\n csr_matrix((t, (a - 1, b - 1)), (n, n)),\r\n method=\"FW\",\r\n directed=False,\r\n )\r\n print(res.max(axis=-1).min().astype(np.int64))\r\n\r\n @staticmethod\r\n def d_2():\r\n n, m, *abt = map(int, sys.stdin.read().split())\r\n g = GeometryTopology.Graph(n)\r\n for a, b, t in zip(*[iter(abt)] * 3):\r\n a -= 1\r\n b -= 1\r\n g.add_edge(a, b, weight=t)\r\n g.add_edge(b, a, weight=t)\r\n\r\n print(min(max(d) for d in g.floyd_warshall()))\r\n\r\n class ABC013:\r\n @staticmethod\r\n def a():\r\n print(ord(sys.stdin.readline().rstrip()) - ord(\"A\") + 1)\r\n\r\n @staticmethod\r\n def b():\r\n a, b = map(int, sys.stdin.read().split())\r\n d = abs(a - b)\r\n print(min(d, 10 - d))\r\n\r\n @staticmethod\r\n def c():\r\n n, h, a, b, c, d, e = map(int, sys.stdin.read().split())\r\n y = np.arange(n + 1)\r\n x = (n * e - h - (d + e) * y) // (b + e) + 1\r\n np.maximum(x, 0, out=x)\r\n np.minimum(x, n - y, out=x)\r\n print(np.amin(a * x + c * y))\r\n\r\n @staticmethod\r\n def d():\r\n n, m, d, *a = map(int, sys.stdin.read().split())\r\n res = list(range(n))\r\n\r\n def swap(i, j):\r\n res[i], res[j] = res[j], res[i]\r\n\r\n for i in a[::-1]:\r\n swap(i - 1, i)\r\n res = np.array(res)\r\n\r\n def binary_method(a, p):\r\n b = np.arange(n)\r\n while p:\r\n if p & 1:\r\n b = a[b]\r\n p >>= 1\r\n a = a[a]\r\n return b\r\n\r\n print(*(binary_method(res, d) + 1), sep=\"\\n\")\r\n\r\n class ABC014:\r\n @staticmethod\r\n def a():\r\n a, b = map(int, sys.stdin.read().split())\r\n print((a + b - 1) // b * b - a)\r\n\r\n @staticmethod\r\n def b():\r\n n, x, *a = map(int, sys.stdin.read().split())\r\n print(sum(a[i] for i in range(n) if x >> i & 1))\r\n\r\n @staticmethod\r\n def c():\r\n n, *ab = map(int, sys.stdin.read().split())\r\n a, b = np.array(ab).reshape(n, 2).T\r\n res = np.zeros(10**6 + 2, dtype=np.int64)\r\n np.add.at(res, a, 1)\r\n np.subtract.at(res, b + 1, 1)\r\n np.cumsum(res, out=res)\r\n print(res.max())\r\n\r\n @staticmethod\r\n def d():\r\n n = int(sys.stdin.readline().rstrip())\r\n g = GeometryTopology.Graph(n)\r\n for _ in range(n - 1):\r\n x, y = map(int, sys.stdin.readline().split())\r\n x -= 1\r\n y -= 1\r\n g.add_edge(x, y, weight=1)\r\n g.add_edge(y, x, weight=1)\r\n\r\n g.bfs(0)\r\n g.find_ancestors()\r\n\r\n q, *ab = map(int, sys.stdin.read().split())\r\n for a, b in zip(*[iter(ab)] * 2):\r\n a -= 1\r\n b -= 1\r\n print(g.find_dist(a, b) + 1)\r\n\r\n class ABC015:\r\n @staticmethod\r\n def a():\r\n a, b = sys.stdin.read().split()\r\n print(a if len(a) > len(b) else b)\r\n\r\n @staticmethod\r\n def b():\r\n n, *a = map(int, sys.stdin.read().split())\r\n a = np.array(a)\r\n print(\r\n np.ceil(\r\n a[np.nonzero(a)[0]].sum() / np.count_nonzero(a)\r\n ).astype(np.int8)\r\n )\r\n\r\n @staticmethod\r\n def c():\r\n n, k, *t = map(int, sys.stdin.read().split())\r\n t = np.array(t).reshape(n, k)\r\n x = np.zeros((1, 1), dtype=np.int8)\r\n for i in range(n):\r\n x = x.reshape(-1, 1) ^ t[i]\r\n print(\"Found\" if np.count_nonzero(x == 0) > 0 else \"Nothing\")\r\n\r\n @staticmethod\r\n def d():\r\n w, n, k, *ab = map(int, sys.stdin.read().split())\r\n dp = np.zeros((k + 1, w + 1), dtype=np.int32)\r\n for a, b in zip(*[iter(ab)] * 2):\r\n np.maximum(dp[1:, a:], dp[:-1, :-a] + b, out=dp[1:, a:])\r\n print(dp[k][w])\r\n\r\n class ABC016:\r\n @staticmethod\r\n def a():\r\n m, d = map(int, sys.stdin.readline().split())\r\n print(\"YES\" if m % d == 0 else \"NO\")\r\n\r\n @staticmethod\r\n def b():\r\n a, b, c = map(int, sys.stdin.readline().split())\r\n f1, f2 = a + b == c, a - b == c\r\n if f1 & f2:\r\n print(\"?\")\r\n elif f1 & (~f2):\r\n print(\"+\")\r\n elif (~f1) & f2:\r\n print(\"-\")\r\n else:\r\n print(\"!\")\r\n\r\n @staticmethod\r\n def c():\r\n n, _, *ab = map(int, sys.stdin.read().split())\r\n f = [0] * n\r\n for a, b in zip(*[iter(ab)] * 2):\r\n a -= 1\r\n b -= 1\r\n f[a] |= 1 << b\r\n f[b] |= 1 << a\r\n res = [\r\n bit_count(\r\n cumor(f[j] for j in range(n) if f[i] >> j & 1)\r\n & ~(f[i] | 1 << i)\r\n )\r\n for i in range(n)\r\n ]\r\n print(*res, sep=\"\\n\")\r\n\r\n @staticmethod\r\n def d():\r\n sx, sy, gx, gy = map(int, sys.stdin.readline().split())\r\n seg1 = ((sx, sy), (gx, gy))\r\n n = int(sys.stdin.readline().rstrip())\r\n p1 = (\r\n np.array(sys.stdin.read().split(), dtype=np.int64)\r\n .reshape(n, 2)\r\n .T\r\n )\r\n p2 = np.hstack((p1[:, 1:], p1[:, :1]))\r\n seg2 = (p1, p2)\r\n print(\r\n np.count_nonzero(GeometryTopology.intersect(seg1, seg2)) // 2\r\n + 1\r\n )\r\n\r\n class ABC017:\r\n @staticmethod\r\n def a():\r\n s, e = (\r\n np.array(sys.stdin.read().split(), dtype=np.int16)\r\n .reshape(3, 2)\r\n .T\r\n )\r\n print((s // 10 * e).sum())\r\n\r\n @staticmethod\r\n def b():\r\n choku_tail = set(\"ch, o, k, u\".split(\", \"))\r\n\r\n def is_choku(s):\r\n if s == \"\":\r\n return True\r\n if len(s) >= 1 and (s[-1] in choku_tail) and is_choku(s[:-1]):\r\n return True\r\n if len(s) >= 2 and (s[-2:] in choku_tail) and is_choku(s[:-2]):\r\n return True\r\n return False\r\n\r\n print(\"YES\" if is_choku(sys.stdin.readline().rstrip()) else \"NO\")\r\n\r\n @staticmethod\r\n def c():\r\n n, m, *lrs = map(int, sys.stdin.read().split())\r\n l, r, s = np.array(lrs).reshape(n, 3).T\r\n score = np.zeros((m + 1,), dtype=np.int32)\r\n np.add.at(score, l - 1, s)\r\n np.subtract.at(score, r, s)\r\n np.cumsum(score, out=score)\r\n print(s.sum() - score[:m].min())\r\n\r\n @staticmethod\r\n def d():\r\n n, m, *f = map(int, sys.stdin.read().split())\r\n prev = [0] * (n + 1)\r\n tmp = defaultdict(int)\r\n for i in range(n):\r\n prev[i + 1] = tmp[f[i]]\r\n tmp[f[i]] = i + 1\r\n\r\n dp = [0] * (n + 1)\r\n dp[0] = 1\r\n l, s = 0, dp[0]\r\n for i in range(1, n + 1):\r\n while l < prev[i]:\r\n s = (s - dp[l]) % MOD\r\n l += 1\r\n dp[i] = s\r\n s = (s + dp[i]) % MOD\r\n print(dp[n])\r\n\r\n class ABC018:\r\n @staticmethod\r\n def a():\r\n (*a,) = map(int, sys.stdin.read().split())\r\n a = sorted(enumerate(a), key=lambda x: -x[1])\r\n res = [None] * 3\r\n for i in range(3):\r\n res[a[i][0]] = i + 1\r\n print(*res, sep=\"\\n\")\r\n\r\n @staticmethod\r\n def b():\r\n s = sys.stdin.readline().rstrip()\r\n n, *lr = map(int, sys.stdin.read().split())\r\n for l, r in zip(*[iter(lr)] * 2):\r\n l -= 1\r\n r -= 1\r\n s = s[:l] + s[l : r + 1][::-1] + s[r + 1 :]\r\n print(s)\r\n\r\n @staticmethod\r\n def c():\r\n r, c, k = map(int, sys.stdin.readline().split())\r\n s = np.array([list(s) for s in sys.stdin.read().split()])\r\n s = np.pad(s, 1, constant_values=\"x\")\r\n\r\n a = np.zeros_like(s, dtype=np.float64)\r\n a[s == \"o\"] = np.inf\r\n for i in range(1, r + 1):\r\n np.minimum(a[i - 1, :] + 1, a[i, :], out=a[i, :])\r\n for i in range(r, 0, -1):\r\n np.minimum(a[i + 1, :] + 1, a[i, :], out=a[i, :])\r\n for j in range(1, c + 1):\r\n np.minimum(a[:, j - 1] + 1, a[:, j], out=a[:, j])\r\n for j in range(c, 0, -1):\r\n np.minimum(a[:, j + 1] + 1, a[:, j], out=a[:, j])\r\n print(np.count_nonzero(a >= k))\r\n\r\n @staticmethod\r\n def c_2():\r\n r, c, k = map(int, sys.stdin.readline().split())\r\n s = np.array([list(s) for s in sys.stdin.read().split()])\r\n s = np.pad(s, 1, constant_values=\"x\")\r\n a = (s == \"o\").astype(np.int16)\r\n a = distance_transform_cdt(a, metric=\"taxicab\")\r\n print(np.count_nonzero(a >= k))\r\n\r\n @staticmethod\r\n def d():\r\n n, m, p, q, r, *xyz = map(int, sys.stdin.read().split())\r\n x, y, z = np.array(xyz).reshape(r, 3).T\r\n h = np.zeros((n, m), dtype=np.int32)\r\n h[x - 1, y - 1] = z\r\n g = np.array([*itertools.combinations(range(n), p)])\r\n print(np.sort(h[g].sum(axis=1), axis=1)[:, -q:].sum(axis=1).max())\r\n\r\n class ABC019:\r\n @staticmethod\r\n def a():\r\n (*a,) = map(int, sys.stdin.readline().split())\r\n print(sorted(a)[1])\r\n\r\n @staticmethod\r\n def b():\r\n s = sys.stdin.readline().rstrip() + \"$\"\r\n cnt = 0\r\n prev = \"$\"\r\n t = \"\"\r\n for c in s:\r\n if c == prev:\r\n cnt += 1\r\n continue\r\n t += prev + str(cnt)\r\n prev = c\r\n cnt = 1\r\n print(t[2:])\r\n\r\n @staticmethod\r\n def c():\r\n n, *a = map(int, sys.stdin.read().split())\r\n res = set()\r\n for x in a:\r\n while not x & 1:\r\n x >>= 1\r\n res.add(x)\r\n print(len(res))\r\n\r\n @staticmethod\r\n def d():\r\n def inquire(u, v):\r\n print(f\"? {u} {v}\".format(u, v), flush=True)\r\n return int(sys.stdin.readline().rstrip())\r\n\r\n n = int(sys.stdin.readline().rstrip())\r\n u = sorted([(inquire(1, v), v) for v in range(2, n + 1)])[-1][1]\r\n d = max((inquire(u, v)) for v in range(1, n + 1) if u != v)\r\n print(f\"! {d}\")\r\n\r\n class ABC020:\r\n @staticmethod\r\n def a():\r\n print(\r\n \"ABC\"\r\n if int(sys.stdin.readline().rstrip()) == 1\r\n else \"chokudai\"\r\n )\r\n\r\n @staticmethod\r\n def b():\r\n a, b = sys.stdin.readline().split()\r\n print(int(a + b) * 2)\r\n\r\n @staticmethod\r\n def c():\r\n h, w, t = map(int, sys.stdin.readline().split())\r\n s = [list(s) for s in sys.stdin.read().split()]\r\n for i in range(h):\r\n for j in range(w):\r\n if s[i][j] == \"S\":\r\n sy, sx = i, j\r\n if s[i][j] == \"G\":\r\n gy, gx = i, j\r\n s[sy][sx] = s[gy][gx] = \".\"\r\n source, target = sy * w + sx, gy * w + gx\r\n\r\n def heuristic_function(u, v=target):\r\n uy, ux = divmod(u, w)\r\n vy, vx = divmod(v, w)\r\n return abs(vy - uy) + abs(ux - vx)\r\n\r\n def min_time(x):\r\n g = GeometryTopology.Graph(h * w)\r\n # g = nx.DiGraph()\r\n\r\n for i in range(h):\r\n for j in range(w):\r\n u = i * w + j\r\n if i > 0:\r\n g.add_edge(\r\n u,\r\n (i - 1) * w + j,\r\n weight=(1 if s[i - 1][j] == \".\" else x),\r\n )\r\n if i < h - 1:\r\n g.add_edge(\r\n u,\r\n (i + 1) * w + j,\r\n weight=(1 if s[i + 1][j] == \".\" else x),\r\n )\r\n if j > 0:\r\n g.add_edge(\r\n u,\r\n i * w + j - 1,\r\n weight=(1 if s[i][j - 1] == \".\" else x),\r\n )\r\n if j < w - 1:\r\n g.add_edge(\r\n u,\r\n i * w + j + 1,\r\n weight=(1 if s[i][j + 1] == \".\" else x),\r\n )\r\n\r\n return g.dijkstra(source)[target]\r\n return g.astar(source, target, heuristic_function)\r\n # return nx.dijkstra_path_length(g, source, target)\r\n # return nx.astar_path_length(g, source, target, heuristic_function)\r\n\r\n def binary_search():\r\n lo, hi = 1, t + 1\r\n while lo + 1 < hi:\r\n x = (lo + hi) // 2\r\n if min_time(x) > t:\r\n hi = x\r\n else:\r\n lo = x\r\n return lo\r\n\r\n print(binary_search())\r\n\r\n @staticmethod\r\n def d():\r\n n, k = map(int, sys.stdin.readline().split())\r\n div = sorted(NumberTheory.find_divisors(k))\r\n l = len(div)\r\n s = [0] * l\r\n for i, d in enumerate(div):\r\n s[i] = (1 + n // d) * (n // d) // 2 * d % MOD\r\n for i in range(l - 1, -1, -1):\r\n for j in range(i + 1, l):\r\n if div[j] % div[i]:\r\n continue\r\n s[i] = (s[i] - s[j]) % MOD\r\n\r\n print(\r\n sum(s[i] * k // div[i] % MOD for i in range(l)) % MOD\r\n ) # ans is LCM.\r\n\r\n class ABC021:\r\n @staticmethod\r\n def a():\r\n n = int(sys.stdin.readline().rstrip())\r\n s = [1 << i for i in range(5) if n >> i & 1]\r\n print(len(s), *s, sep=\"\\n\")\r\n\r\n @staticmethod\r\n def b():\r\n n, a, b, k, *p = map(int, sys.stdin.read().split())\r\n print(\"YES\" if len(set(p) | set([a, b])) == k + 2 else \"NO\")\r\n\r\n @staticmethod\r\n def c():\r\n n, a, b, m, *xy = map(int, sys.stdin.read().split())\r\n x, y = np.array(xy).reshape(m, 2).T - 1\r\n a -= 1\r\n b -= 1\r\n g = csgraph_to_dense(\r\n csr_matrix((np.ones(m), (x, y)), (n, n), dtype=np.int8)\r\n )\r\n g = np.logical_or(g, g.T)\r\n paths = np.zeros(n, dtype=np.int64).reshape(-1, 1)\r\n paths[a, 0] = 1\r\n while not paths[b, 0]:\r\n paths = np.dot(g, paths) % MOD\r\n print(paths[b, 0])\r\n\r\n @staticmethod\r\n def c_2():\r\n n, a, b, m, *xy = map(int, sys.stdin.read().split())\r\n a -= 1\r\n b -= 1\r\n g = GeometryTopology.Graph()\r\n\r\n for x, y in zip(*[iter(xy)] * 2):\r\n x -= 1\r\n y -= 1\r\n g.add_edge(x, y, weight=1)\r\n g.add_edge(y, x, weight=1)\r\n\r\n dist, paths = g.dijkstra(a, paths_cnt=True, mod=MOD)\r\n print(paths[b])\r\n\r\n @staticmethod\r\n def d():\r\n n, k = map(int, sys.stdin.read().split())\r\n cn = Combinatorics.CombinationsMod()\r\n print(cn(n + k - 1, k))\r\n\r\n class ABC022:\r\n @staticmethod\r\n def a():\r\n n, s, t, *a = map(int, sys.stdin.read().split())\r\n a = np.array(a)\r\n np.cumsum(a, out=a)\r\n print(((s <= a) & (a <= t)).sum())\r\n\r\n @staticmethod\r\n def b():\r\n n, *a = map(int, sys.stdin.read().split())\r\n c = Counter(a)\r\n print(sum(c.values()) - len(c))\r\n\r\n @staticmethod\r\n def c():\r\n n, m, *uvl = map(int, sys.stdin.read().split())\r\n u, v, l = np.array(uvl).reshape(m, 3).T\r\n u -= 1\r\n v -= 1\r\n g = csgraph_to_dense(csr_matrix((l, (u, v)), (n, n)))\r\n g += g.T\r\n g[g == 0] = np.inf\r\n dist0 = g[0].copy()\r\n g[0] = 0\r\n g[:, 0] = 0\r\n dist = shortest_path(g, method=\"FW\", directed=False)\r\n u, v = np.array([*itertools.combinations(range(1, n), 2)]).T\r\n res = (dist0[u] + dist[u, v] + dist0[v]).min()\r\n print(-1 if res == np.inf else int(res))\r\n\r\n @staticmethod\r\n def d():\r\n n, *ab = map(int, sys.stdin.read().split())\r\n c = np.array(ab).reshape(2, n, 2)\r\n g = c.mean(axis=1)\r\n d = np.sqrt(((c - g[:, None, :]) ** 2).sum(axis=-1)).sum(axis=1)\r\n print(d[1] / d[0])\r\n\r\n class ABC023:\r\n @staticmethod\r\n def a():\r\n print(sum(divmod(int(sys.stdin.readline().rstrip()), 10)))\r\n\r\n @staticmethod\r\n def b():\r\n n, s = sys.stdin.read().split()\r\n n = int(n)\r\n t = \"b\"\r\n for i in range(n // 2):\r\n if i % 3 == 0:\r\n t = \"a\" + t + \"c\"\r\n elif i % 3 == 1:\r\n t = \"c\" + t + \"a\"\r\n else:\r\n t = \"b\" + t + \"b\"\r\n print(n // 2 if t == s else -1)\r\n\r\n @staticmethod\r\n def b_2():\r\n n, s = sys.stdin.read().split()\r\n n = int(n)\r\n if n & 1 ^ 1:\r\n print(-1)\r\n return\r\n a = list(\"abc\")\r\n i = (1 - n // 2) % 3\r\n for c in s:\r\n if c != a[i]:\r\n print(-1)\r\n return\r\n i = (i + 1) % 3\r\n print(n // 2)\r\n\r\n @staticmethod\r\n def c():\r\n h, w, k, n, *rc = map(int, sys.stdin.read().split())\r\n r, c = np.array(rc).reshape(n, 2).T - 1\r\n rb = np.bincount(r, minlength=h)\r\n cb = np.bincount(c, minlength=w)\r\n rbb = np.bincount(rb, minlength=k + 1)\r\n cbb = np.bincount(cb, minlength=k + 1)\r\n tot = (rbb[: k + 1] * cbb[k::-1]).sum()\r\n real = np.bincount(rb[r] + cb[c] - 1, minlength=k + 1)\r\n print(tot - real[k - 1] + real[k])\r\n\r\n @staticmethod\r\n def d():\r\n n, *hs = map(int, sys.stdin.read().split())\r\n h, s = np.array(hs).reshape(n, 2).T\r\n\r\n t = np.arange(n)\r\n\r\n def is_ok(x):\r\n return np.all(np.sort((x - h) // s) >= t)\r\n\r\n def binary_search():\r\n lo, hi = 0, 10**14\r\n while lo + 1 < hi:\r\n x = (lo + hi) // 2\r\n if is_ok(x):\r\n hi = x\r\n else:\r\n lo = x\r\n return hi\r\n\r\n print(binary_search())\r\n\r\n class ABC024:\r\n @staticmethod\r\n def a():\r\n a, b, c, k, s, t = map(int, sys.stdin.read().split())\r\n print(a * s + b * t - c * (s + t) * (s + t >= k))\r\n\r\n @staticmethod\r\n def b():\r\n n, t, *a = map(int, sys.stdin.read().split())\r\n a = np.array(a)\r\n print(np.minimum(a[1:] - a[:-1], t).sum() + t)\r\n\r\n @staticmethod\r\n def c():\r\n n, d, k, *lrst = map(int, sys.stdin.read().split())\r\n lrst = np.array(lrst)\r\n lr = lrst[: 2 * d].reshape(d, 2)\r\n s, t = lrst[2 * d :].reshape(k, 2).T\r\n day = np.zeros((k,), dtype=np.int32)\r\n for i in range(d):\r\n l, r = lr[i]\r\n move = (l <= s) & (s <= r) & (s != t)\r\n reach = move & (l <= t) & (t <= r)\r\n s[move & (s < t)] = r\r\n s[move & (s > t)] = l\r\n s[reach] = t[reach]\r\n day[reach] = i + 1\r\n print(*day, sep=\"\\n\")\r\n\r\n @staticmethod\r\n def d():\r\n a, b, c = map(int, sys.stdin.read().split())\r\n p = MOD\r\n denom = pow(a * b % p - b * c % p + c * a % p, p - 2, p)\r\n w = (b * c - a * b) % p * denom % p\r\n h = (b * c - a * c) % p * denom % p\r\n print(h, w)\r\n\r\n class ABC025:\r\n @staticmethod\r\n def a():\r\n s, n = sys.stdin.read().split()\r\n n = int(n)\r\n i, j = divmod(n - 1, 5)\r\n print(s[i] + s[j])\r\n\r\n @staticmethod\r\n def b():\r\n n, a, b = map(int, sys.stdin.readline().split())\r\n res = defaultdict(int)\r\n for _ in range(n):\r\n s, d = sys.stdin.readline().split()\r\n d = int(d)\r\n res[s] += min(max(d, a), b)\r\n res = res[\"East\"] - res[\"West\"]\r\n if res == 0:\r\n ans = 0\r\n elif res > 0:\r\n ans = f\"East {res}\"\r\n else:\r\n ans = f\"West {-res}\"\r\n print(ans)\r\n\r\n @staticmethod\r\n def c():\r\n b = [0] * 6\r\n for i in range(2):\r\n (*row,) = map(int, sys.stdin.readline().split())\r\n for j in range(3):\r\n b[i * 3 + j] = row[j]\r\n c = [0] * 8\r\n for i in range(3):\r\n (*row,) = map(int, sys.stdin.readline().split())\r\n for j in range(2):\r\n c[i * 3 + j] = row[j]\r\n tot = sum(b) + sum(c)\r\n\r\n @lru_cache(maxsize=None)\r\n def f(s=tuple(0 for _ in range(9))):\r\n if all(s):\r\n res = 0\r\n for i in range(6):\r\n res += (s[i] == s[i + 3]) * b[i]\r\n for i in range(8):\r\n res += (s[i] == s[i + 1]) * c[i]\r\n return res\r\n cand = [i for i in range(9) if not s[i]]\r\n flg = len(cand) & 1\r\n s = list(s)\r\n res = []\r\n for i in cand:\r\n s[i] = (flg ^ 1) + 1\r\n res.append(f(tuple(s)))\r\n s[i] = 0\r\n return sorted(res, reverse=flg)[0]\r\n\r\n a = f()\r\n b = tot - a\r\n print(a)\r\n print(b)\r\n\r\n class ABC026:\r\n @staticmethod\r\n def a():\r\n a = int(sys.stdin.readline().rstrip())\r\n print(a // 2 * (a - a // 2))\r\n\r\n @staticmethod\r\n def b():\r\n n, *r = map(int, sys.stdin.read().split())\r\n s = np.pi * np.array([0] + r) ** 2\r\n s.sort()\r\n res = s[n::-2].sum() - s[n - 1 :: -2].sum()\r\n print(res)\r\n\r\n @staticmethod\r\n def c():\r\n n, *b = map(int, sys.stdin.read().split())\r\n g = GeometryTopology.Graph()\r\n for i in range(1, n):\r\n g.add_edge(b[i - 1] - 1, i, weight=1)\r\n\r\n def f(u=0):\r\n if not g.edges[u]:\r\n return 1\r\n s = [f(v) for v in g.edges[u]]\r\n return max(s) + min(s) + 1\r\n\r\n print(f())\r\n\r\n @staticmethod\r\n def d():\r\n a, b, c = map(int, sys.stdin.readline().split())\r\n\r\n def f(t):\r\n return a * t + b * np.sin(c * t * np.pi) - 100\r\n\r\n print(optimize.brenth(f, 0, 200))\r\n\r\n class ABC027:\r\n @staticmethod\r\n def a():\r\n l = [int(l) for l in sys.stdin.readline().split()]\r\n l.sort()\r\n print(l[2] if l[0] == l[1] else l[0])\r\n\r\n @staticmethod\r\n def b():\r\n n, *a = map(int, sys.stdin.read().split())\r\n m, r = divmod(sum(a), n)\r\n if r:\r\n print(-1)\r\n return\r\n population = 0\r\n towns = 0\r\n cnt = 0\r\n for x in a:\r\n population += x\r\n towns += 1\r\n if population / towns != m:\r\n cnt += 1\r\n continue\r\n population, towns = 0, 0\r\n print(cnt)\r\n\r\n @staticmethod\r\n def c():\r\n n = int(sys.stdin.readline().rstrip())\r\n flg = n.bit_length() & 1 ^ 1\r\n t = 0\r\n x = 1\r\n while x <= n:\r\n t += 1\r\n x = 2 * x + 1 if t & 1 ^ flg else 2 * x\r\n print(\"Aoki\" if t & 1 else \"Takahashi\")\r\n\r\n class ABC028:\r\n @staticmethod\r\n def a():\r\n n = int(sys.stdin.readline().rstrip())\r\n print(\r\n \"Bad\"\r\n if n < 60\r\n else \"Good\"\r\n if n < 90\r\n else \"Great\"\r\n if n < 100\r\n else \"Perfect\"\r\n )\r\n\r\n @staticmethod\r\n def b():\r\n s = sys.stdin.readline().rstrip()\r\n cnt = Counter(s)\r\n print(*[cnt.get(c, 0) for c in \"ABCDEF\"])\r\n\r\n @staticmethod\r\n def c():\r\n a, b, c, d, e = map(int, sys.stdin.readline().split())\r\n print(max(b + c + e, a + d + e))\r\n\r\n @staticmethod\r\n def d():\r\n n, k = map(int, sys.stdin.readline().split())\r\n c = 3 * 2 * (n - k) * (k - 1) + 3 * (n - 1) + 1\r\n print(c / n**3)\r\n\r\n class ABC029:\r\n @staticmethod\r\n def a():\r\n print(sys.stdin.readline().rstrip() + \"s\")\r\n\r\n @staticmethod\r\n def b():\r\n print(sum(\"r\" in s for s in sys.stdin.read().split()))\r\n\r\n @staticmethod\r\n def c():\r\n print(\r\n *[\r\n \"\".join(s)\r\n for s in itertools.product(\r\n \"abc\", repeat=int(sys.stdin.readline().rstrip())\r\n )\r\n ],\r\n sep=\"\\n\",\r\n )\r\n\r\n @staticmethod\r\n def d():\r\n n = int(sys.stdin.readline().rstrip())\r\n print(\r\n sum(\r\n n // 10 ** (i + 1) * 10**i\r\n + min(max((n % 10 ** (i + 1) - 10**i + 1), 0), 10**i)\r\n for i in range(9)\r\n )\r\n )\r\n\r\n class ABC030:\r\n @staticmethod\r\n def a():\r\n a, b, c, d = map(int, sys.stdin.readline().split())\r\n e, f = b * c, d * a\r\n print(\"TAKAHASHI\" if e > f else \"AOKI\" if f > e else \"DRAW\")\r\n\r\n @staticmethod\r\n def b():\r\n n, m = map(int, sys.stdin.readline().split())\r\n n = (n % 12 + m / 60) * 30\r\n m *= 6\r\n d = abs(n - m)\r\n print(min(d, 360 - d))\r\n\r\n @staticmethod\r\n def c():\r\n n, m = map(int, sys.stdin.readline().split())\r\n x, y = map(int, sys.stdin.readline().split())\r\n a = [int(x) for x in sys.stdin.readline().split()]\r\n b = [int(x) for x in sys.stdin.readline().split()]\r\n\r\n t = 0\r\n p = 1\r\n cnt = 0\r\n while True:\r\n if p:\r\n i = bi_l(a, t)\r\n if i == n:\r\n break\r\n t = a[i] + x\r\n else:\r\n i = bi_l(b, t)\r\n if i == m:\r\n break\r\n t = b[i] + y\r\n cnt += 1\r\n p ^= 1\r\n print(cnt)\r\n\r\n @staticmethod\r\n def d():\r\n n, a = map(int, sys.stdin.readline().split())\r\n a -= 1\r\n k = sys.stdin.readline().rstrip()\r\n b = [int(x) - 1 for x in sys.stdin.readline().split()]\r\n\r\n c = [None] * n\r\n for i in range(n + 1):\r\n if str(i) == k:\r\n print(a + 1)\r\n return\r\n if c[a] is not None:\r\n l, d = i - c[a], c[a]\r\n break\r\n c[a] = i\r\n a = b[a]\r\n\r\n r = [None] * len(k)\r\n r[0] = 1\r\n for i in range(len(k) - 1):\r\n r[i + 1] = r[i] * 10 % l\r\n k = [int(c) for c in k][::-1]\r\n d = (sum(r[i] * k[i] for i in range(len(k))) - d) % l\r\n for _ in range(d):\r\n a = b[a]\r\n print(a + 1)\r\n\r\n @staticmethod\r\n def d_2():\r\n n, a, k, *b = map(int, sys.stdin.read().split())\r\n a -= 1\r\n b = [x - 1 for x in b]\r\n c = [None] * n\r\n for i in range(n + 1):\r\n if i == k:\r\n print(a + 1)\r\n return\r\n if c[a] is not None:\r\n for _ in range((k - c[a]) % (i - c[a])):\r\n a = b[a]\r\n print(a + 1)\r\n return\r\n c[a] = i\r\n a = b[a]\r\n\r\n class ABC031:\r\n @staticmethod\r\n def a():\r\n a, d = map(int, sys.stdin.readline().split())\r\n if a > d:\r\n a, d = d, a\r\n print((a + 1) * d)\r\n\r\n @staticmethod\r\n def b():\r\n l, h, n, *a = map(int, sys.stdin.read().split())\r\n a = np.array(a)\r\n res = np.maximum(l - a, 0)\r\n res[a > h] = -1\r\n print(*res, sep=\"\\n\")\r\n\r\n @staticmethod\r\n def c():\r\n n, *a = map(int, sys.stdin.read().split())\r\n a = np.array(a)\r\n np.cumsum(a[::2], out=a[::2])\r\n np.cumsum(a[1::2], out=a[1::2])\r\n a = list(a) + [0] * 2\r\n\r\n def score(i, j):\r\n if i > j:\r\n i, j = j, i\r\n if (j - i) & 1:\r\n x, y = a[j - 1] - a[i - 2], a[j] - a[i - 1]\r\n else:\r\n x, y = a[j] - a[i - 2], a[j - 1] - a[i - 1]\r\n return x, y\r\n\r\n res = -inf\r\n for i in range(n):\r\n s = -inf\r\n for j in range(n):\r\n if i == j:\r\n continue\r\n x, y = score(i, j)\r\n if y > s:\r\n s, t = y, x\r\n res = max(res, t)\r\n print(res)\r\n\r\n @staticmethod\r\n def d():\r\n k, m = map(int, sys.stdin.readline().split())\r\n (*vw,) = zip(*[iter(sys.stdin.read().split())] * 2)\r\n for l in itertools.product((1, 2, 3), repeat=k):\r\n s = dict()\r\n for v, w in vw:\r\n i = 0\r\n for d in v:\r\n d = int(d) - 1\r\n j = i + l[d]\r\n if j > len(w):\r\n break\r\n t = w[i:j]\r\n if d in s and s[d] != t:\r\n break\r\n s[d] = t\r\n i = j\r\n else:\r\n if i == len(w):\r\n continue\r\n break\r\n else:\r\n for i in range(k):\r\n print(s[i])\r\n return\r\n\r\n class ABC032:\r\n @staticmethod\r\n def a():\r\n a, b, n = map(int, sys.stdin.read().split())\r\n l = NumberTheory.lcm(a, b)\r\n print((n + l - 1) // l * l)\r\n\r\n @staticmethod\r\n def b():\r\n s, k = sys.stdin.read().split()\r\n k = int(k)\r\n res = set()\r\n for i in range(len(s) - k + 1):\r\n res.add(s[i : i + k])\r\n print(len(res))\r\n\r\n @staticmethod\r\n def c():\r\n n, k, *s = map(int, sys.stdin.read().split())\r\n if 0 in s:\r\n print(n)\r\n return\r\n if k == 0:\r\n print(0)\r\n return\r\n res, tmp, l = 0, 1, 0\r\n for r in range(n):\r\n tmp *= s[r]\r\n while tmp > k:\r\n tmp //= s[l]\r\n l += 1\r\n res = max(res, r - l + 1)\r\n\r\n print(res)\r\n\r\n class ABC033:\r\n @staticmethod\r\n def a():\r\n print(\r\n \"SAME\"\r\n if len(set(sys.stdin.readline().rstrip())) == 1\r\n else \"DIFFERENT\"\r\n )\r\n\r\n @staticmethod\r\n def b():\r\n n = int(sys.stdin.readline().rstrip())\r\n res = dict()\r\n for _ in range(n):\r\n s, p = sys.stdin.readline().split()\r\n res[s] = int(p)\r\n tot = sum(res.values())\r\n for s, p in res.items():\r\n if p > tot / 2:\r\n print(s)\r\n return\r\n print(\"atcoder\")\r\n\r\n @staticmethod\r\n def c():\r\n s = sys.stdin.readline().rstrip()\r\n print(sum(not \"0\" in f for f in s.split(\"+\")))\r\n\r\n class ABC034:\r\n @staticmethod\r\n def a():\r\n x, y = map(int, sys.stdin.readline().split())\r\n print(\"Better\" if y > x else \"Worse\")\r\n\r\n @staticmethod\r\n def b():\r\n n = int(sys.stdin.readline().rstrip())\r\n print(n + 1 if n & 1 else n - 1)\r\n\r\n @staticmethod\r\n def c():\r\n h, w = map(int, sys.stdin.read().split())\r\n choose = Combinatorics.CombinationsMod()\r\n print(choose(h + w - 2, h - 1))\r\n\r\n @staticmethod\r\n def d():\r\n n, k, *wp = map(int, sys.stdin.read().split())\r\n w, p = np.array(wp).reshape(-1, 2).T\r\n\r\n def f(x):\r\n return np.sort(w * (p - x))[-k:].sum()\r\n\r\n print(optimize.bisect(f, 0, 100))\r\n\r\n class ABC035:\r\n @staticmethod\r\n def a():\r\n w, h = map(int, sys.stdin.readline().split())\r\n print(\"4:3\" if 4 * h == 3 * w else \"16:9\")\r\n\r\n @staticmethod\r\n def b():\r\n s, t = sys.stdin.read().split()\r\n y = x = z = 0\r\n for c in s:\r\n if c == \"?\":\r\n z += 1\r\n elif c == \"L\":\r\n x -= 1\r\n elif c == \"R\":\r\n x += 1\r\n elif c == \"D\":\r\n y -= 1\r\n elif c == \"U\":\r\n y += 1\r\n d = abs(y) + abs(x)\r\n print(d + z if t == \"1\" else max(d - z, (d - z) & 1))\r\n\r\n @staticmethod\r\n def c():\r\n n, q, *lr = map(int, sys.stdin.read().split())\r\n l, r = np.array(lr).reshape(q, 2).T\r\n res = np.zeros(n + 1, dtype=int)\r\n np.add.at(res, l - 1, 1)\r\n np.subtract.at(res, r, 1)\r\n np.cumsum(res, out=res)\r\n res = res & 1\r\n print(\"\".join(map(str, res[:-1])))\r\n\r\n @staticmethod\r\n def d():\r\n n, m, t = map(int, sys.stdin.readline().split())\r\n point = np.array(sys.stdin.readline().split(), dtype=int)\r\n a, b, c = (\r\n np.array(sys.stdin.read().split(), dtype=np.int64)\r\n .reshape(m, 3)\r\n .T\r\n )\r\n a -= 1\r\n b -= 1\r\n d_1 = shortest_path(\r\n csr_matrix((c, (a, b)), (n, n)),\r\n method=\"D\",\r\n directed=True,\r\n indices=0,\r\n )\r\n d_2 = shortest_path(\r\n csr_matrix((c, (b, a)), (n, n)),\r\n method=\"D\",\r\n directed=True,\r\n indices=0,\r\n )\r\n print(int(np.amax((t - (d_1 + d_2)) * point)))\r\n\r\n class ABC036:\r\n @staticmethod\r\n def a():\r\n a, b = map(int, sys.stdin.readline().split())\r\n print((b + a - 1) // a)\r\n\r\n @staticmethod\r\n def b():\r\n n, *s = sys.stdin.read().split()\r\n n = int(n)\r\n for j in range(n):\r\n row = \"\"\r\n for i in range(n - 1, -1, -1):\r\n row += s[i][j]\r\n print(row)\r\n\r\n @staticmethod\r\n def c():\r\n n, *a = map(int, sys.stdin.read().split())\r\n b = [None] * n\r\n prev = None\r\n j = -1\r\n for i, x in sorted(enumerate(a), key=lambda x: x[1]):\r\n if x != prev:\r\n j += 1\r\n b[i] = j\r\n prev = x\r\n print(*b, sep=\"\\n\")\r\n\r\n @staticmethod\r\n def d():\r\n n, *ab = map(int, sys.stdin.read().split())\r\n edges = [[] for _ in range(n)]\r\n for a, b in zip(*[iter(ab)] * 2):\r\n a -= 1\r\n b -= 1\r\n edges[a].append(b)\r\n edges[b].append(a)\r\n parent = [None] * n\r\n\r\n def count(u):\r\n black, white = 1, 1\r\n for v in edges[u]:\r\n if v == parent[u]:\r\n continue\r\n parent[v] = u\r\n b, w = count(v)\r\n black *= w\r\n black %= MOD\r\n white *= (b + w) % MOD\r\n white %= MOD\r\n return black, white\r\n\r\n print(sum(count(0)) % MOD)\r\n\r\n class ABC037:\r\n @staticmethod\r\n def a():\r\n a, b, c = map(int, sys.stdin.readline().split())\r\n print(c // min(a, b))\r\n\r\n @staticmethod\r\n def b():\r\n n, q, *lrt = map(int, sys.stdin.read().split())\r\n a = np.zeros(n, dtype=int)\r\n for l, r, t in zip(*[iter(lrt)] * 3):\r\n a[l - 1 : r] = t\r\n print(*a, sep=\"\\n\")\r\n\r\n @staticmethod\r\n def c():\r\n n, k, *a = map(int, sys.stdin.read().split())\r\n a = np.array([0] + a)\r\n np.cumsum(a, out=a)\r\n s = (a[k:] - a[:-k]).sum()\r\n print(s)\r\n\r\n @staticmethod\r\n def d():\r\n h, w, *a = map(int, sys.stdin.read().split())\r\n p = [None] * (h * w)\r\n\r\n def paths(k):\r\n if p[k]:\r\n return p[k]\r\n p[k] = 1\r\n i, j = divmod(k, w)\r\n if j > 0 and a[k] > a[k - 1]:\r\n p[k] += paths(k - 1)\r\n if j < w - 1 and a[k] > a[k + 1]:\r\n p[k] += paths(k + 1)\r\n if i > 0 and a[k] > a[k - w]:\r\n p[k] += paths(k - w)\r\n if i < h - 1 and a[k] > a[k + w]:\r\n p[k] += paths(k + w)\r\n p[k] %= MOD\r\n return p[k]\r\n\r\n print(sum(paths(i) for i in range(h * w)) % MOD)\r\n\r\n class ABC038:\r\n @staticmethod\r\n def a():\r\n s = sys.stdin.readline().rstrip()\r\n print(\"YES\" if s[-1] == \"T\" else \"NO\")\r\n\r\n @staticmethod\r\n def b():\r\n a, b, c, d = map(int, sys.stdin.read().split())\r\n print(\"YES\" if a == c or b == c or a == d or b == d else \"NO\")\r\n\r\n @staticmethod\r\n def c():\r\n n, *a = map(int, sys.stdin.read().split())\r\n a += [-1]\r\n cnt = n\r\n tmp = 1\r\n for i in range(n):\r\n if a[i + 1] > a[i]:\r\n tmp += 1\r\n else:\r\n cnt += tmp * (tmp - 1) // 2\r\n tmp = 1\r\n print(cnt)\r\n\r\n @staticmethod\r\n def d():\r\n n, *wh = map(int, sys.stdin.read().split())\r\n a = [\r\n x[1]\r\n for x in sorted(\r\n zip(*[iter(wh)] * 2), key=lambda x: (x[0], -x[1])\r\n )\r\n ]\r\n print(bi_l(DP.LIS(a), inf))\r\n\r\n class ABC039:\r\n @staticmethod\r\n def a():\r\n a, b, c = map(int, sys.stdin.readline().split())\r\n print((a * b + b * c + c * a) * 2)\r\n\r\n @staticmethod\r\n def b():\r\n x = int(sys.stdin.readline().rstrip())\r\n for n in range(1, int(x**0.5) + 1):\r\n if pow(n, 4) == x:\r\n print(n)\r\n return\r\n\r\n @staticmethod\r\n def c():\r\n board = \"WBWBWWBWBWBW\" * 3\r\n convert = \"Do, *, Re, *, Mi, Fa, *, So, *, La, *, Si\".split(\", \")\r\n s = sys.stdin.readline().rstrip()\r\n print(convert[board.index(s)])\r\n\r\n @staticmethod\r\n def d():\r\n h, w = map(int, sys.stdin.readline().split())\r\n s = \"\".join(sys.stdin.read().split())\r\n white = set()\r\n for i in range(h * w):\r\n if s[i] == \"#\":\r\n continue\r\n l = 0 if i % w == 0 else -1\r\n r = 0 if (i + 1) % w == 0 else 1\r\n white |= {\r\n i + dy + dx\r\n for dy in range(-w, w + 1, w)\r\n for dx in range(l, r + 1)\r\n }\r\n black_before = set(range(h * w)) - white\r\n black_after = set()\r\n for i in black_before:\r\n l = 0 if i % w == 0 else -1\r\n r = 0 if (i + 1) % w == 0 else 1\r\n black_after |= {\r\n i + dy + dx\r\n for dy in range(-w, w + 1, w)\r\n for dx in range(l, r + 1)\r\n }\r\n black_after &= set(range(h * w))\r\n for i in range(h * w):\r\n if s[i] == \"#\" and not i in black_after:\r\n print(\"impossible\")\r\n return\r\n print(\"possible\")\r\n for i in range(h):\r\n print(\r\n \"\".join(\r\n [\r\n \"#\" if i * w + j in black_before else \".\"\r\n for j in range(w)\r\n ]\r\n )\r\n )\r\n\r\n class ABC040:\r\n @staticmethod\r\n def a():\r\n n, x = map(int, sys.stdin.readline().split())\r\n print(min(x - 1, n - x))\r\n\r\n @staticmethod\r\n def b():\r\n n = int(sys.stdin.readline().rstrip())\r\n res = inf\r\n for i in range(1, int(n**0.5) + 1):\r\n res = min(res, n // i - i + n % i)\r\n print(res)\r\n\r\n @staticmethod\r\n def c():\r\n n, *h = map(int, sys.stdin.read().split())\r\n h = [h[0]] + h\r\n cost = [None] * (n + 1)\r\n cost[0] = cost[1] = 0\r\n for i in range(2, n + 1):\r\n cost[i] = min(\r\n cost[i - 2] + abs(h[i] - h[i - 2]),\r\n cost[i - 1] + abs(h[i] - h[i - 1]),\r\n )\r\n print(cost[n])\r\n\r\n @staticmethod\r\n def d():\r\n n, m = map(int, sys.stdin.readline().split())\r\n uf = GeometryTopology.Graph(n)\r\n uf.init_dsu()\r\n queue = []\r\n for _ in range(m):\r\n a, b, y = map(int, sys.stdin.readline().split())\r\n heappush(queue, (-(2 * y), a - 1, b - 1))\r\n q = int(sys.stdin.readline().rstrip())\r\n for i in range(q):\r\n v, y = map(int, sys.stdin.readline().split())\r\n heappush(queue, (-(2 * y + 1), v - 1, i))\r\n res = [None] * q\r\n while queue:\r\n y, i, j = heappop(queue)\r\n if y & 1:\r\n res[j] = uf.size[uf.find(i)]\r\n else:\r\n uf.unite(i, j)\r\n print(*res, sep=\"\\n\")\r\n\r\n class ABC041:\r\n @staticmethod\r\n def a():\r\n s, i = sys.stdin.read().split()\r\n i = int(i)\r\n print(s[i - 1])\r\n\r\n @staticmethod\r\n def b():\r\n a, b, c = map(int, sys.stdin.readline().split())\r\n ans = a * b % MOD * c % MOD\r\n print(ans)\r\n\r\n @staticmethod\r\n def c():\r\n n, *a = map(int, sys.stdin.read().split())\r\n for i, h in sorted(enumerate(a), key=lambda x: -x[1]):\r\n print(i + 1)\r\n\r\n @staticmethod\r\n def d():\r\n n, _, *xy = map(int, sys.stdin.read().split())\r\n g = [0] * n\r\n for x, y in zip(*[iter(xy)] * 2):\r\n g[x - 1] |= 1 << (y - 1)\r\n res = [0] * (1 << n)\r\n res[0] = 1\r\n for i in range(1 << n):\r\n for j in range(n):\r\n if i >> j & 1 ^ 1:\r\n continue\r\n if not (g[j] & i):\r\n res[i] += res[i & ~(1 << j)]\r\n print(res[-1])\r\n\r\n class ABC042:\r\n @staticmethod\r\n def a():\r\n a = [int(x) for x in sys.stdin.readline().split()]\r\n c = Counter(a)\r\n print(\"YES\" if c[5] == 2 and c[7] == 1 else \"NO\")\r\n\r\n @staticmethod\r\n def b():\r\n n, l, *s = sys.stdin.read().split()\r\n print(\"\".join(sorted(s)))\r\n\r\n @staticmethod\r\n def c():\r\n n, k, *d = sys.stdin.read().split()\r\n l = len(n)\r\n ok = sorted(set(string.digits) - set(d))\r\n cand = [\r\n int(\"\".join(p)) for p in itertools.product(ok, repeat=l)\r\n ] + [int(min(x for x in ok if x > \"0\") + min(ok) * l)]\r\n print(cand[bi_l(cand, int(n))])\r\n\r\n @staticmethod\r\n def d():\r\n h, w, a, b = map(int, sys.stdin.read().split())\r\n combinations = Combinatorics.CombinationsMod(\r\n n=2 * 10**5, mod=MOD\r\n )\r\n i = np.arange(h - a, h)\r\n ng = np.sum(\r\n combinations(i + b - 1, i)\r\n * combinations(h - i + w - b - 2, h - 1 - i)\r\n % MOD\r\n )\r\n print((combinations(h + w - 2, h - 1) - ng) % MOD)\r\n\r\n class ABC043:\r\n @staticmethod\r\n def a():\r\n n = int(sys.stdin.readline().rstrip())\r\n print((1 + n) * n // 2)\r\n\r\n @staticmethod\r\n def b():\r\n s = sys.stdin.readline().rstrip()\r\n t = \"\"\r\n for c in s:\r\n if c == \"B\":\r\n t = t[:-1]\r\n else:\r\n t += c\r\n print(t)\r\n\r\n @staticmethod\r\n def c():\r\n n, *a = map(int, sys.stdin.read().split())\r\n a = np.array(a)\r\n x = np.around(a.sum() / n).astype(int)\r\n print(np.sum((a - x) ** 2))\r\n\r\n @staticmethod\r\n def d():\r\n s = sys.stdin.readline().rstrip()\r\n n = len(s)\r\n for i in range(n - 1):\r\n if s[i] == s[i + 1]:\r\n print(i + 1, i + 2)\r\n return\r\n for i in range(n - 2):\r\n if s[i] == s[i + 2]:\r\n print(i + 1, i + 3)\r\n return\r\n print(-1, -1)\r\n\r\n class ABC044:\r\n @staticmethod\r\n def a():\r\n n, k, x, y = map(int, sys.stdin.read().split())\r\n print(min(n, k) * x + max(0, n - k) * y)\r\n\r\n @staticmethod\r\n def b():\r\n res = set(\r\n c & 1 for c in Counter(sys.stdin.readline().rstrip()).values()\r\n )\r\n print(\"Yes\" if len(res) == 1 and res.pop() == 0 else \"No\")\r\n\r\n @staticmethod\r\n def c():\r\n n, a, *x = map(int, sys.stdin.read().split())\r\n dp = np.zeros((n + 1, 2501), dtype=np.int64)\r\n dp[0, 0] = 1\r\n for v in x:\r\n dp[1:, v:] += dp[:-1, :-v]\r\n i = np.arange(1, n + 1)\r\n print(dp[i, i * a].sum())\r\n\r\n @staticmethod\r\n def c_2():\r\n n, a, *x = map(int, sys.stdin.read().split())\r\n for i in range(n):\r\n x[i] -= a\r\n\r\n s = defaultdict(int)\r\n s[0] = 1\r\n for i in range(n):\r\n ns = s.copy()\r\n for k, v in s.items():\r\n ns[k + x[i]] += v\r\n s = ns\r\n print(s[0] - 1)\r\n\r\n @staticmethod\r\n def d():\r\n pass\r\n\r\n class ABC045:\r\n @staticmethod\r\n def a():\r\n a, b, h = map(int, sys.stdin.read().split())\r\n print((a + b) * h // 2)\r\n\r\n @staticmethod\r\n def b():\r\n a, b, c = sys.stdin.read().split()\r\n d = {\"a\": a[::-1], \"b\": b[::-1], \"c\": c[::-1]}\r\n nx = \"a\"\r\n while 1:\r\n if not d[nx]:\r\n print(nx.upper())\r\n return\r\n d[nx], nx = d[nx][:-1], d[nx][-1]\r\n\r\n @staticmethod\r\n def c():\r\n def c(l):\r\n return pow(2, max(0, l - 1))\r\n\r\n s = sys.stdin.readline().rstrip()\r\n n = len(s)\r\n print(\r\n sum(\r\n int(s[i : j + 1]) * c(i) * c(n - 1 - j)\r\n for i in range(n)\r\n for j in range(i, n)\r\n )\r\n )\r\n\r\n @staticmethod\r\n def d():\r\n h, w, n, *ab = map(int, sys.stdin.read().split())\r\n c = defaultdict(int)\r\n for y, x in zip(*[iter(ab)] * 2):\r\n y -= 1\r\n x -= 1\r\n for dy, dx in itertools.product(range(-1, 2), repeat=2):\r\n i, j = y + dy, x + dx\r\n if not (0 < i < h - 1 and 0 < j < w - 1):\r\n continue\r\n c[(i, j)] += 1\r\n c = Counter(c.values())\r\n c[0] = (h - 2) * (w - 2) - sum(c.values())\r\n for i in range(10):\r\n print(c[i])\r\n\r\n class ABC046:\r\n @staticmethod\r\n def a():\r\n print(len(set(sys.stdin.readline().split())))\r\n\r\n @staticmethod\r\n def b():\r\n n, k = map(int, sys.stdin.readline().split())\r\n print(k * pow(k - 1, n - 1))\r\n\r\n @staticmethod\r\n def c():\r\n n, *xy = map(int, sys.stdin.read().split())\r\n a, b = 1, 1\r\n for x, y in zip(*[iter(xy)] * 2):\r\n n = max((a + x - 1) // x, (b + y - 1) // y)\r\n a, b = n * x, n * y\r\n print(a + b)\r\n\r\n @staticmethod\r\n def d():\r\n c = Counter(sys.stdin.readline().rstrip())\r\n print((c[\"g\"] - c[\"p\"]) // 2)\r\n\r\n class ABC047:\r\n @staticmethod\r\n def a():\r\n c = sorted(map(int, sys.stdin.readline().split()))\r\n print(\"Yes\" if c[0] + c[1] == c[2] else \"No\")\r\n\r\n @staticmethod\r\n def b():\r\n w, h, n, *xyf = map(int, sys.stdin.read().split())\r\n l, r, d, u = 0, w, 0, h\r\n for x, y, f in zip(*[iter(xyf)] * 3):\r\n if f == 1:\r\n l = max(l, x)\r\n if f == 2:\r\n r = min(r, x)\r\n if f == 3:\r\n d = max(d, y)\r\n if f == 4:\r\n u = min(u, y)\r\n print(max(0, r - l) * max(0, u - d))\r\n\r\n @staticmethod\r\n def c():\r\n s = sys.stdin.readline().rstrip()\r\n print(sum(s[i] != s[i + 1] for i in range(len(s) - 1)))\r\n\r\n @staticmethod\r\n def d():\r\n mn, mx, c = inf, -1, 0\r\n n, t, *a = map(int, sys.stdin.read().split())\r\n for p in a:\r\n if p - mn == mx:\r\n c += 1\r\n elif p - mn > mx:\r\n mx, c = p - mn, 1\r\n mn = min(mn, p)\r\n print(c)\r\n\r\n class ABC048:\r\n @staticmethod\r\n def a():\r\n def initial(s):\r\n return s[0].upper()\r\n\r\n print(\"\".join(map(initial, sys.stdin.readline().split())))\r\n\r\n @staticmethod\r\n def b():\r\n a, b, x = map(int, sys.stdin.readline().split())\r\n print(\r\n b // x - (a - 1) // x\r\n ) # if a=0, (a-1)/x is rounded down to -1.\r\n\r\n @staticmethod\r\n def c():\r\n n, x, *a = map(int, sys.stdin.read().split())\r\n cnt = prev = 0\r\n for i in range(n):\r\n d = prev + a[i] - x\r\n prev = a[i]\r\n if d <= 0:\r\n continue\r\n cnt += d\r\n prev -= d\r\n print(cnt)\r\n\r\n @staticmethod\r\n def d():\r\n s = sys.stdin.readline().rstrip()\r\n print(\"First\" if len(s) & 1 ^ (s[0] == s[-1]) else \"Second\")\r\n\r\n class ABC049:\r\n @staticmethod\r\n def a():\r\n vowels = set(\"aeiou\")\r\n print(\r\n \"vowel\"\r\n if sys.stdin.readline().rstrip() in vowels\r\n else \"consonant\"\r\n )\r\n\r\n @staticmethod\r\n def b():\r\n h, w, *s = sys.stdin.read().split()\r\n for l in s:\r\n for _ in range(2):\r\n print(l)\r\n\r\n @staticmethod\r\n def c():\r\n t = set(\"dream, dreamer, erase, eraser\".split(\", \"))\r\n\r\n def obtainable(s):\r\n while True:\r\n for i in range(5, 8):\r\n if s[-i:] in t:\r\n s = s[:-i]\r\n if not s:\r\n return True\r\n break\r\n else:\r\n return False\r\n\r\n s = sys.stdin.readline().rstrip()\r\n print(\"YES\" if obtainable(s) else \"NO\")\r\n\r\n @staticmethod\r\n def d():\r\n n, k, l = map(int, sys.stdin.readline().split())\r\n uf1 = GeometryTopology.Graph(n)\r\n uf1.init_dsu()\r\n uf2 = GeometryTopology.Graph(n)\r\n uf2.init_dsu()\r\n\r\n def add_edges(uf, m):\r\n for _ in range(m):\r\n x, y = map(int, sys.stdin.readline().split())\r\n x -= 1\r\n y -= 1\r\n uf.unite(x, y)\r\n\r\n add_edges(uf1, k)\r\n add_edges(uf2, l)\r\n\r\n g = defaultdict(list)\r\n for i in range(n):\r\n g[(uf1.find(i), uf2.find(i))].append(i)\r\n\r\n res = [None] * n\r\n for a in g:\r\n for i in g[a]:\r\n res[i] = len(g[a])\r\n\r\n print(*res, sep=\" \")\r\n\r\n class ABC050:\r\n @staticmethod\r\n def a():\r\n print(eval(sys.stdin.readline().rstrip()))\r\n\r\n @staticmethod\r\n def b():\r\n n = int(sys.stdin.readline().rstrip())\r\n t = np.array(sys.stdin.readline().split(), dtype=np.int64)\r\n m, *px = map(int, sys.stdin.read().split())\r\n p, x = np.array(px).reshape(m, 2).T\r\n p -= 1\r\n print(*(t.sum() + x - t[p]), sep=\"\\n\")\r\n\r\n @staticmethod\r\n def c():\r\n n, *a = map(int, sys.stdin.read().split())\r\n a = Counter(a)\r\n if n & 1 and not (\r\n a[0] == 1 and all(a[i] == 2 for i in range(2, n, 2))\r\n ):\r\n print(0)\r\n return\r\n if ~n & 1 and any(a[i] != 2 for i in range(1, n, 2)):\r\n print(0)\r\n return\r\n print(pow(2, n // 2, MOD))\r\n\r\n @staticmethod\r\n def d():\r\n pass\r\n\r\n class ABC051:\r\n @staticmethod\r\n def a():\r\n print(\" \".join(sys.stdin.readline().rstrip().split(\",\")))\r\n\r\n @staticmethod\r\n def b():\r\n k, s = map(int, sys.stdin.readline().split())\r\n tot = 0\r\n for x in range(k + 1):\r\n if s - x < 0:\r\n break\r\n if s - x > 2 * k:\r\n continue\r\n tot += s - x + 1 if s - x <= k else 2 * k - (s - x) + 1\r\n print(tot)\r\n\r\n @staticmethod\r\n def c():\r\n x1, y1, x2, y2 = map(int, sys.stdin.readline().split())\r\n dx, dy = x2 - x1, y2 - y1\r\n print(\r\n \"U\" * dy\r\n + \"R\" * (dx + 1)\r\n + \"D\" * (dy + 1)\r\n + \"L\" * (dx + 1)\r\n + \"U\"\r\n + \"L\"\r\n + \"U\" * (dy + 1)\r\n + \"R\" * (dx + 1)\r\n + \"D\" * (dy + 1)\r\n + \"L\" * dx\r\n )\r\n\r\n @staticmethod\r\n def d():\r\n n, m, *abc = map(int, sys.stdin.read().split())\r\n x = np.arange(n)\r\n a, b, c = np.array(abc).reshape(m, 3).T\r\n a -= 1\r\n b -= 1\r\n d = shortest_path(\r\n csr_matrix((c, (a, b)), shape=(n, n)),\r\n method=\"FW\",\r\n directed=False,\r\n ).astype(np.int64)\r\n print(\r\n m\r\n - np.any(\r\n d[x, a[:, None]] + c[:, None] == d[x, b[:, None]], axis=1\r\n ).sum()\r\n )\r\n\r\n class ABC052:\r\n @staticmethod\r\n def a():\r\n a, b, c, d = map(int, sys.stdin.readline().split())\r\n print(max(a * b, c * d))\r\n\r\n @staticmethod\r\n def b():\r\n n, s = sys.stdin.read().split()\r\n n = int(n)\r\n a = [0] * (n + 1)\r\n for i in range(n):\r\n a[i + 1] = a[i] + (1 if s[i] == \"I\" else -1)\r\n print(max(a))\r\n\r\n @staticmethod\r\n def c():\r\n n = int(sys.stdin.readline().rstrip())\r\n pn = NumberTheory.PrimeNumbers(n)\r\n s = 1\r\n for c in pn.factorize_factorial(n).values():\r\n s = s * (c + 1) % MOD\r\n print(s)\r\n\r\n @staticmethod\r\n def d():\r\n n, a, b, *x = map(int, sys.stdin.read().split())\r\n x = np.array(x)\r\n print(np.minimum((x[1:] - x[:-1]) * a, b).sum())\r\n\r\n class ABC053:\r\n @staticmethod\r\n def a():\r\n print(\r\n \"ABC\" if int(sys.stdin.readline().rstrip()) < 1200 else \"ARC\"\r\n )\r\n\r\n @staticmethod\r\n def b():\r\n s = sys.stdin.readline().rstrip()\r\n print(len(s) - s.find(\"A\") - s[::-1].find(\"Z\"))\r\n\r\n @staticmethod\r\n def c():\r\n x = int(sys.stdin.readline().rstrip())\r\n q, r = divmod(x, 11)\r\n print(2 * q + (r + 5) // 6)\r\n\r\n @staticmethod\r\n def d():\r\n n, *a = map(int, sys.stdin.read().split())\r\n print(n - ((n - len(set(a)) + 1) // 2 * 2))\r\n\r\n class ABC054:\r\n @staticmethod\r\n def a():\r\n def f(x):\r\n return (x + 11) % 13\r\n\r\n a, b = map(int, sys.stdin.readline().split())\r\n print(\"Alice\" if f(a) > f(b) else \"Bob\" if f(a) < f(b) else \"Draw\")\r\n\r\n @staticmethod\r\n def b():\r\n n, m = map(int, sys.stdin.readline().split())\r\n a = [sys.stdin.readline().rstrip() for _ in range(n)]\r\n b = [sys.stdin.readline().rstrip() for _ in range(m)]\r\n\r\n for i in range(n - m + 1):\r\n for j in range(n - m + 1):\r\n for y in range(m):\r\n for x in range(m):\r\n if a[i + y][j + x] == b[y][x]:\r\n continue\r\n break\r\n else:\r\n continue\r\n break\r\n else:\r\n print(\"Yes\")\r\n return\r\n print(\"No\")\r\n\r\n @staticmethod\r\n def c():\r\n n, m, *ab = map(int, sys.stdin.read().split())\r\n g = GeometryTopology.Graph(n)\r\n for a, b in zip(*[iter(ab)] * 2):\r\n a -= 1\r\n b -= 1\r\n g.add_edge(a, b)\r\n g.add_edge(b, a)\r\n\r\n cnt = 0\r\n stack = [(0, 1)]\r\n while stack:\r\n u, s = stack.pop()\r\n if s == (1 << n) - 1:\r\n cnt += 1\r\n continue\r\n for v in g.edges[u]:\r\n if s >> v & 1:\r\n continue\r\n stack.append((v, s | 1 << v))\r\n print(cnt)\r\n\r\n @staticmethod\r\n def d():\r\n n, ma, mb, *abc = map(int, sys.stdin.read().split())\r\n dp = np.full((401, 401), np.inf)\r\n dp[0, 0] = 0\r\n for a, b, c in zip(*[iter(abc)] * 3):\r\n np.minimum(dp[a:, b:], dp[:-a, :-b] + c, out=dp[a:, b:])\r\n i = np.arange(1, 400 // max(ma, mb) + 1)\r\n res = dp[i * ma, i * mb].min()\r\n print(int(res) if res != np.inf else -1)\r\n\r\n class ABC055:\r\n @staticmethod\r\n def a():\r\n n = int(sys.stdin.readline().rstrip())\r\n print(800 * n - 200 * (n // 15))\r\n\r\n @staticmethod\r\n def b():\r\n n = int(sys.stdin.readline().rstrip())\r\n fac, _ = Algebra.generate_fac_ifac(n, MOD)\r\n print(fac[-1])\r\n\r\n @staticmethod\r\n def c():\r\n n, m = map(int, sys.stdin.readline().split())\r\n print(m // 2 if m <= 2 * n else n + (m - 2 * n) // 4)\r\n\r\n @staticmethod\r\n def d():\r\n n, s = sys.stdin.read().split()\r\n n = int(n)\r\n s = [1 if c == \"o\" else 0 for c in s]\r\n\r\n def possible(t):\r\n for i in range(1, n - 1):\r\n t[i + 1] = t[i - 1] ^ t[i] ^ s[i]\r\n return (\r\n (t[0] ^ s[0] ^ t[1] ^ t[-1])\r\n | (t[-1] ^ s[-1] ^ t[-2] ^ t[0])\r\n ) ^ 1\r\n\r\n for fst in [(1, 0), (0, 1), (1, 1), (0, 0)]:\r\n t = [None] * n\r\n t[0], t[1] = fst[0], fst[1]\r\n if possible(t):\r\n print(\"\".join(\"S\" if x == 1 else \"W\" for x in t))\r\n return\r\n print(-1)\r\n\r\n class ABC056:\r\n @staticmethod\r\n def a():\r\n def to_i(c):\r\n return 1 if c == \"H\" else 0\r\n\r\n a, b = map(to_i, sys.stdin.readline().split())\r\n print(\"D\" if a ^ b else \"H\")\r\n\r\n @staticmethod\r\n def b():\r\n w, a, b = map(int, sys.stdin.readline().split())\r\n if a > b:\r\n a, b = b, a\r\n print(max(b - (a + w), 0))\r\n\r\n @staticmethod\r\n def c():\r\n x = int(sys.stdin.readline().rstrip())\r\n print(int(math.ceil(math.sqrt(2 * x + 1 / 4) - 0.5)))\r\n\r\n @staticmethod\r\n def d():\r\n n, k, *a = map(int, sys.stdin.read().split())\r\n a = sorted(min(x, k) for x in a)\r\n\r\n def necessary(i):\r\n dp = np.zeros(k, dtype=np.bool)\r\n dp[0] = True\r\n for j in range(n):\r\n if j == i:\r\n continue\r\n dp[a[j] :] += dp[: -a[j]]\r\n return np.any(dp[k - a[i] :])\r\n\r\n def binary_search():\r\n lo, hi = -1, n\r\n while hi - lo > 1:\r\n i = (lo + hi) // 2\r\n if necessary(i):\r\n hi = i\r\n else:\r\n lo = i\r\n return hi\r\n\r\n print(binary_search())\r\n\r\n class ABC057:\r\n @staticmethod\r\n def a():\r\n a, b = map(int, sys.stdin.readline().split())\r\n print((a + b) % 24)\r\n\r\n @staticmethod\r\n def b():\r\n n, m, *I = map(int, sys.stdin.read().split())\r\n I = np.array(I).reshape(-1, 2)\r\n ab, cd = I[:n], I[n:]\r\n print(\r\n *(\r\n np.argmin(\r\n np.absolute(ab[:, None] - cd).sum(axis=-1), axis=-1\r\n )\r\n + 1\r\n ),\r\n sep=\"\\n\",\r\n )\r\n\r\n @staticmethod\r\n def c():\r\n n = int(sys.stdin.readline().rstrip())\r\n divs = NumberTheory.find_divisors(n)\r\n print(len(str(divs[bi_l(divs, math.sqrt(n))])))\r\n\r\n @staticmethod\r\n def d():\r\n c = Combinatorics.choose\r\n n, a, b, *v = map(int, sys.stdin.read().split())\r\n v.sort()\r\n print(sum(v[-a:]) / a)\r\n l, r = bi_l(v, v[-a]), bi_r(v, v[-a])\r\n print(\r\n sum(\r\n c(r - l, i)\r\n for i in range(r - n + a, r - max(l, n - b) + 1)\r\n )\r\n if r == n\r\n else c(r - l, r - n + a)\r\n )\r\n\r\n class ABC058:\r\n @staticmethod\r\n def a():\r\n a, b, c = map(int, sys.stdin.readline().split())\r\n print(\"YES\" if c - b == b - a else \"NO\")\r\n\r\n @staticmethod\r\n def b():\r\n s, t = sys.stdin.read().split()\r\n a = \"\"\r\n for i in range(len(t)):\r\n a += s[i] + t[i]\r\n if len(s) > len(t):\r\n a += s[-1]\r\n print(a)\r\n\r\n @staticmethod\r\n def c():\r\n n, *s = sys.stdin.read().split()\r\n res = {c: 100 for c in string.ascii_lowercase}\r\n for counter in map(Counter, s):\r\n for (\r\n c,\r\n x,\r\n ) in res.items():\r\n res[c] = min(x, counter[c])\r\n t = \"\"\r\n for c, x in sorted(res.items()):\r\n t += c * x\r\n print(t)\r\n\r\n @staticmethod\r\n def d():\r\n n, m, *xy = map(int, sys.stdin.read().split())\r\n x, y = np.array(xy[:n]), np.array(xy[n:])\r\n print(\r\n (x * (np.arange(n) + 1) - np.cumsum(x)).sum()\r\n % MOD\r\n * ((y * (np.arange(m) + 1) - np.cumsum(y)).sum() % MOD)\r\n % MOD\r\n )\r\n\r\n class ABC059:\r\n @staticmethod\r\n def a():\r\n def initial(s):\r\n return s[0].upper()\r\n\r\n print(\"\".join(map(initial, sys.stdin.readline().split())))\r\n\r\n @staticmethod\r\n def b():\r\n a, b = sys.stdin.read().split()\r\n la, lb = len(a), len(b)\r\n print(\r\n \"GREATER\"\r\n if la > lb\r\n else \"LESS\"\r\n if la < lb\r\n else \"GREATER\"\r\n if a > b\r\n else \"LESS\"\r\n if a < b\r\n else \"EQUAL\"\r\n )\r\n\r\n @staticmethod\r\n def c():\r\n n, *a = map(int, sys.stdin.read().split())\r\n c = s = 0\r\n for i in range(n):\r\n s += a[i]\r\n if i & 1 and s >= 0:\r\n c += s + 1\r\n s = -1\r\n elif i & 1 ^ 1 and s <= 0:\r\n c += 1 - s\r\n s = 1\r\n c1 = c\r\n c = s = 0\r\n for i in range(n):\r\n s += a[i]\r\n if i & 1 and s <= 0:\r\n c += 1 - s\r\n s = 1\r\n elif i & 1 ^ 1 and s >= 0:\r\n c += s + 1\r\n s = -1\r\n c2 = c\r\n print(min(c1, c2))\r\n\r\n @staticmethod\r\n def d():\r\n x, y = map(int, sys.stdin.readline().split())\r\n print(\"Brown\" if abs(x - y) <= 1 else \"Alice\")\r\n\r\n class ABC060:\r\n @staticmethod\r\n def a():\r\n a, b, c = sys.stdin.readline().split()\r\n print(\"YES\" if a[-1] == b[0] and b[-1] == c[0] else \"NO\")\r\n\r\n @staticmethod\r\n def b():\r\n a, b, c = map(int, sys.stdin.readline().split())\r\n print(\"NO\" if c % NumberTheory.gcd(a, b) else \"YES\")\r\n\r\n @staticmethod\r\n def c():\r\n n, t, *a = map(int, sys.stdin.read().split())\r\n print(sum(min(a[i + 1] - a[i], t) for i in range(n - 1)) + t)\r\n\r\n @staticmethod\r\n def d():\r\n pass\r\n\r\n class ABC061:\r\n @staticmethod\r\n def a():\r\n a, b, c = map(int, sys.stdin.readline().split())\r\n print(\"Yes\" if a <= c <= b else \"No\")\r\n\r\n @staticmethod\r\n def b():\r\n n, m, *ab = map(int, sys.stdin.read().split())\r\n ab = np.array(ab) - 1\r\n g = np.zeros(n, dtype=np.int32)\r\n np.add.at(g, ab, 1)\r\n print(*g, sep=\"\\n\")\r\n\r\n @staticmethod\r\n def c():\r\n n, k, *ab = map(int, sys.stdin.read().split())\r\n ab = np.transpose(np.array(ab).reshape(n, 2))\r\n a, b = ab[:, np.argsort(ab[0])]\r\n print(a[np.cumsum(b) >= k][0])\r\n\r\n @staticmethod\r\n def d():\r\n n, m, *abc = map(int, sys.stdin.read().split())\r\n a, b, c = np.array(abc).reshape(m, 3).T\r\n a -= 1\r\n b -= 1\r\n c *= -1\r\n g = csr_matrix(\r\n ([1] * (m + 1), (np.append(a, n - 1), np.append(b, 0))), (n, n)\r\n )\r\n _, labels = connected_components(g, connection=\"strong\")\r\n bl = (labels[a] == labels[0]) & (labels[b] == labels[0])\r\n g = csr_matrix((c[bl], (a[bl], b[bl])), (n, n))\r\n try:\r\n print(\r\n -shortest_path(g, method=\"BF\", directed=True, indices=0)[\r\n -1\r\n ].astype(int)\r\n )\r\n except:\r\n print(\"inf\")\r\n\r\n @staticmethod\r\n def d_2():\r\n n, m, *abc = map(int, sys.stdin.read().split())\r\n a, b, c = np.array(abc).reshape(m, 3).T\r\n a -= 1\r\n b -= 1\r\n c *= -1\r\n d = np.full(n, np.inf)\r\n d[0] = 0\r\n for _ in range(n - 1):\r\n np.minimum.at(d, b, d[a] + c)\r\n neg_cycle = np.zeros(n, dtype=np.bool)\r\n for _ in range(n):\r\n np.logical_or.at(neg_cycle, b, d[a] + c < d[b])\r\n np.minimum.at(d, b, d[a] + c)\r\n print(inf if neg_cycle[-1] else -d[-1].astype(int))\r\n\r\n class ABC062:\r\n @staticmethod\r\n def a():\r\n g = [0, 2, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0]\r\n x, y = map(int, sys.stdin.readline().split())\r\n print(\"Yes\" if g[x - 1] == g[y - 1] else \"No\")\r\n\r\n @staticmethod\r\n def b():\r\n h, w = map(int, sys.stdin.readline().split())\r\n a = np.array(\r\n [list(s) for s in sys.stdin.read().split()], dtype=\"U1\"\r\n )\r\n a = np.pad(a, pad_width=1, constant_values=\"#\")\r\n for s in a:\r\n print(\"\".join(s))\r\n\r\n @staticmethod\r\n def c():\r\n h, w = map(int, sys.stdin.readline().split())\r\n if h * w % 3 == 0:\r\n print(0)\r\n return\r\n\r\n def minimize(h, w):\r\n return min(\r\n h,\r\n *(\r\n s[-1] - s[0]\r\n for x in range(w // 3, w // 3 + 2)\r\n for s in (\r\n sorted(\r\n [\r\n h * x,\r\n h // 2 * (w - x),\r\n (h + 1) // 2 * (w - x),\r\n ]\r\n ),\r\n )\r\n ),\r\n )\r\n\r\n print(min(minimize(h, w), minimize(w, h)))\r\n\r\n @staticmethod\r\n def d():\r\n n, *a = map(int, sys.stdin.read().split())\r\n a = np.array(a)\r\n\r\n def optimize(a):\r\n a = list(a)\r\n l, r = a[:n], a[n:]\r\n heapify(l)\r\n s = [None] * (n + 1)\r\n s[0] = sum(l)\r\n for i in range(n):\r\n x = heappop(l)\r\n heappush(l, max(x, r[i]))\r\n s[i + 1] = s[i] + max(0, r[i] - x)\r\n return np.array(s)\r\n\r\n print(\r\n (\r\n optimize(a[: 2 * n]) + optimize(-a[-1 : n - 1 : -1])[::-1]\r\n ).max()\r\n )\r\n\r\n class ABC063:\r\n @staticmethod\r\n def a():\r\n a = sum(map(int, sys.stdin.readline().split()))\r\n print(\"error\" if a >= 10 else a)\r\n\r\n @staticmethod\r\n def b():\r\n s = sys.stdin.readline().rstrip()\r\n print(\"yes\" if len(set(s)) == len(s) else \"no\")\r\n\r\n @staticmethod\r\n def c():\r\n n, *a = map(int, sys.stdin.read().split())\r\n a = np.array(a)\r\n s = a.sum()\r\n if s % 10:\r\n print(s)\r\n elif not np.count_nonzero(a % 10):\r\n print(0)\r\n else:\r\n print(s - a[a % 10 != 0].min())\r\n\r\n @staticmethod\r\n def d():\r\n n, a, b, *h = map(int, sys.stdin.read().split())\r\n h = np.array(h)\r\n d = a - b\r\n\r\n def possible(c):\r\n hh = h.copy()\r\n np.maximum(hh - b * c, 0, out=hh)\r\n return ((hh + d - 1) // d).sum() <= c\r\n\r\n def binary_search():\r\n lo, hi = 0, 10**9\r\n while hi - lo > 1:\r\n c = (lo + hi) // 2\r\n if possible(c):\r\n hi = c\r\n else:\r\n lo = c\r\n return hi\r\n\r\n print(binary_search())\r\n\r\n class ABC064:\r\n @staticmethod\r\n def a():\r\n r, g, b = map(int, sys.stdin.readline().split())\r\n print(\"NO\" if (10 * g + b) % 4 else \"YES\")\r\n\r\n @staticmethod\r\n def b():\r\n n, *a = map(int, sys.stdin.read().split())\r\n a.sort()\r\n print(a[-1] - a[0])\r\n\r\n @staticmethod\r\n def c():\r\n n, *a = map(int, sys.stdin.read().split())\r\n a = np.bincount(np.minimum(np.array(a) // 400, 8), minlength=9)\r\n mx = np.count_nonzero(a[:-1]) + a[-1]\r\n mn = max(mx - a[-1], 1)\r\n print(mn, mx)\r\n\r\n @staticmethod\r\n def d():\r\n n, s = sys.stdin.read().split()\r\n l = r = 0\r\n for c in s:\r\n if c == \"(\":\r\n r += 1\r\n else:\r\n if r == 0:\r\n l += 1\r\n else:\r\n r -= 1\r\n print(\"(\" * l + s + \")\" * r)\r\n\r\n class ABC065:\r\n @staticmethod\r\n def a():\r\n x, a, b = map(int, sys.stdin.readline().split())\r\n y = -a + b\r\n print(\"delicious\" if y <= 0 else \"safe\" if y <= x else \"dangerous\")\r\n\r\n @staticmethod\r\n def b():\r\n n, *a = [int(x) - 1 for x in sys.stdin.read().split()]\r\n i = 0\r\n for c in range(n):\r\n i = a[i]\r\n if i == 1:\r\n print(c + 1)\r\n return\r\n print(-1)\r\n\r\n @staticmethod\r\n def c():\r\n n, m = map(int, sys.stdin.readline().split())\r\n d = abs(n - m)\r\n if d >= 2:\r\n print(0)\r\n return\r\n fac, _ = Algebra.generate_fac_ifac(10**5)\r\n print(fac[n] * fac[m] * (1 if d else 2) % MOD)\r\n\r\n @staticmethod\r\n def d():\r\n n, *xy = map(int, sys.stdin.read().split())\r\n x, y = np.array(xy).reshape(n, 2).T\r\n i = np.argsort(x)\r\n ax, bx, cx = (\r\n i[:-1],\r\n i[1:],\r\n x[\r\n i[1:],\r\n ]\r\n - x[i[:-1]],\r\n )\r\n i = np.argsort(y)\r\n ay, by, cy = (\r\n i[:-1],\r\n i[1:],\r\n y[\r\n i[1:],\r\n ]\r\n - y[i[:-1]],\r\n )\r\n e = np.vstack(\r\n [np.hstack([ax, ay]), np.hstack([bx, by]), np.hstack([cx, cy])]\r\n )\r\n e = e[:, np.argsort(e[-1])]\r\n _, i = np.unique(e[:-1], return_index=True, axis=1)\r\n a, b, c = e[:, i]\r\n print(\r\n minimum_spanning_tree(csr_matrix((c, (a, b)), (n, n)))\r\n .astype(np.int64)\r\n .sum()\r\n )\r\n\r\n @staticmethod\r\n def d_2():\r\n n, *xy = map(int, sys.stdin.read().split())\r\n x, y = xy[::2], xy[1::2]\r\n g = GeometryTopology.Graph(n)\r\n\r\n def make(a):\r\n b = sorted(enumerate(a), key=lambda x: x[1])\r\n for i in range(n - 1):\r\n u, v, w = b[i][0], b[i + 1][0], b[i + 1][1] - b[i][1]\r\n for u, v in [(v, u), (u, v)]:\r\n if not v in g.edges[u]:\r\n g.add_edge(u, v, weight=w)\r\n else:\r\n g.edges[u][v].weight = min(g.edges[u][v].weight, w)\r\n\r\n make(x)\r\n make(y)\r\n _, d = g.kruskal()\r\n # _, d = g.prim()\r\n # _, d = g.boruvka()\r\n print(d)\r\n\r\n class ABC066:\r\n @staticmethod\r\n def a():\r\n print(sum(sorted(map(int, sys.stdin.readline().split()))[:-1]))\r\n\r\n @staticmethod\r\n def b():\r\n s = sys.stdin.readline().rstrip()\r\n\r\n def f(s):\r\n n = len(s) // 2\r\n return s[:n] == s[n:]\r\n\r\n for i in range(len(s) - 2, 0, -2):\r\n if f(s[:i]):\r\n print(i)\r\n return\r\n\r\n @staticmethod\r\n def c():\r\n n, *a = map(int, sys.stdin.read().split())\r\n b = deque()\r\n for i in range(n):\r\n if i & 1:\r\n b.appendleft(a[i])\r\n else:\r\n b.append(a[i])\r\n if n & 1:\r\n b.reverse()\r\n print(*b)\r\n\r\n @staticmethod\r\n def d():\r\n n, *a = map(int, sys.stdin.read().split())\r\n tmp = [None] * (n + 1)\r\n for i in range(n + 1):\r\n if tmp[a[i]] is not None:\r\n d = tmp[a[i]] + n - i\r\n break\r\n tmp[a[i]] = i\r\n k = np.arange(1, n + 2)\r\n c = Combinatorics.CombinationsMod(n + 1, MOD)\r\n print(*((c(n + 1, k) - c(d, k - 1)) % MOD), sep=\"\\n\")\r\n\r\n class ABC067:\r\n @staticmethod\r\n def a():\r\n pass\r\n\r\n @staticmethod\r\n def b():\r\n pass\r\n\r\n @staticmethod\r\n def c():\r\n pass\r\n\r\n @staticmethod\r\n def d():\r\n n, *ab = map(int, sys.stdin.read().split())\r\n g = GeometryTopology.Graph(n)\r\n for a, b in zip(*[iter(ab)] * 2):\r\n a -= 1\r\n b -= 1\r\n g.add_edge(a, b)\r\n g.add_edge(b, a)\r\n d1, d2 = g.bfs(0), g.bfs(n - 1)\r\n print(\r\n \"Fennec\"\r\n if sum(d1[i] <= d2[i] for i in range(n)) > n // 2\r\n else \"Snuke\"\r\n )\r\n\r\n class ABC068:\r\n @staticmethod\r\n def a():\r\n pass\r\n\r\n @staticmethod\r\n def b():\r\n pass\r\n\r\n @staticmethod\r\n def c():\r\n pass\r\n\r\n @staticmethod\r\n def d():\r\n k = int(sys.stdin.readline().rstrip())\r\n n = 50\r\n print(n)\r\n q, r = divmod(k, n)\r\n a = np.arange(n - 1, -1, -1) + q\r\n a[:r] += 1\r\n print(*a)\r\n\r\n class ABC069:\r\n @staticmethod\r\n def a():\r\n pass\r\n\r\n @staticmethod\r\n def b():\r\n pass\r\n\r\n @staticmethod\r\n def c():\r\n pass\r\n\r\n @staticmethod\r\n def d():\r\n h, w, n, *a = map(int, sys.stdin.read().split())\r\n c = [i + 1 for i in range(n) for j in range(a[i])]\r\n for i in range(h):\r\n row = c[i * w : (i + 1) * w]\r\n if i & 1:\r\n row = row[::-1]\r\n print(*row)\r\n\r\n class ABC070:\r\n @staticmethod\r\n def d():\r\n n = int(sys.stdin.readline().rstrip())\r\n g = GeometryTopology.Graph(n)\r\n for _ in range(n - 1):\r\n a, b, c = map(int, sys.stdin.readline().split())\r\n a -= 1\r\n b -= 1\r\n g.add_edge(a, b, weight=c)\r\n g.add_edge(b, a, weight=c)\r\n q, k = map(int, sys.stdin.readline().split())\r\n d = g.bfs(k - 1)\r\n for _ in range(q):\r\n x, y = map(int, sys.stdin.readline().split())\r\n x -= 1\r\n y -= 1\r\n print(d[x] + d[y])\r\n\r\n class ABC071:\r\n @staticmethod\r\n def d():\r\n n, *s = sys.stdin.read().split()\r\n n = int(n)\r\n s = list(zip(*s))\r\n dp = [0] * n\r\n dp[0] = 3 if s[0][0] == s[0][1] else 6\r\n for i in range(1, n):\r\n dp[i] = dp[i - 1]\r\n if s[i][0] == s[i - 1][0]:\r\n continue\r\n dp[i] *= (\r\n 2\r\n if s[i - 1][0] == s[i - 1][1]\r\n else 3\r\n if s[i][0] != s[i][1]\r\n else 1\r\n )\r\n dp[i] %= MOD\r\n print(dp[-1])\r\n\r\n class ABC072:\r\n @staticmethod\r\n def d():\r\n n, *p = map(int, sys.stdin.read().split())\r\n p += [-1]\r\n cnt, i = 0, 0\r\n while i < n:\r\n if p[i] == i + 1:\r\n cnt += p[i] == i + 1\r\n if p[i + 1] == i + 2:\r\n i += 1\r\n i += 1\r\n print(cnt)\r\n\r\n class ABC073:\r\n @staticmethod\r\n def a():\r\n pass\r\n\r\n @staticmethod\r\n def b():\r\n pass\r\n\r\n @staticmethod\r\n def c():\r\n pass\r\n\r\n @staticmethod\r\n def d():\r\n n, m, r, *I = map(int, sys.stdin.read().split())\r\n I = np.array(I)\r\n a, b, c = I[r:].reshape(m, 3).T\r\n d = shortest_path(\r\n csr_matrix((c, (a - 1, b - 1)), (n, n)),\r\n method=\"FW\",\r\n directed=False,\r\n ).astype(np.int32)\r\n r = np.array([*itertools.permutations(I[:r] - 1)])\r\n print((d[r[:, :-1], r[:, 1:]].sum(axis=1)).min())\r\n\r\n class ABC074:\r\n @staticmethod\r\n def a():\r\n pass\r\n\r\n @staticmethod\r\n def b():\r\n pass\r\n\r\n @staticmethod\r\n def c():\r\n pass\r\n\r\n @staticmethod\r\n def d():\r\n n, *a = map(int, sys.stdin.read().split())\r\n a = np.array(a, dtype=np.int32).reshape(n, n)\r\n b = shortest_path(a, method=\"FW\").astype(np.int32)\r\n if (b < a).any():\r\n print(-1)\r\n return\r\n np.fill_diagonal(b, 10**9)\r\n a[np.any(b[:, None] + b <= a[:, :, None], axis=2)] = 0\r\n print(a.sum() // 2)\r\n\r\n class ABC075:\r\n @staticmethod\r\n def a():\r\n pass\r\n\r\n @staticmethod\r\n def b():\r\n pass\r\n\r\n @staticmethod\r\n def c():\r\n pass\r\n\r\n @staticmethod\r\n def d():\r\n n, k, *xy = map(int, sys.stdin.read().split())\r\n xy = np.array(xy).reshape(n, 2)\r\n x_y = xy.copy()[np.argsort(xy[:, 0])]\r\n y_x = xy.copy()[np.argsort(xy[:, 1])]\r\n comb = np.array([*itertools.combinations(range(n), 2)])\r\n i1, i2 = comb.T\r\n j1, j2 = comb[None, :].T\r\n s = (y_x[:, 1][i2] - y_x[:, 1][i1]) * (\r\n x_y[:, 0][j2] - x_y[:, 0][j1]\r\n )\r\n c = np.zeros((n + 1, n + 1), dtype=np.int64)\r\n for i in range(n):\r\n c[i + 1, 1:] += c[i, 1:] + (y_x[i, 0] <= x_y[:, 0])\r\n a = c[i2 + 1, j2 + 1] - c[i2 + 1, j1] - c[i1, j2 + 1] + c[i1, j1]\r\n print(s[a >= k].min())\r\n\r\n class ABC076:\r\n @staticmethod\r\n def d():\r\n n, *tv = map(int, sys.stdin.read().split())\r\n t, v = np.array(tv).reshape(2, n)\r\n t = np.pad(t, pad_width=[2, 1], constant_values=0)\r\n np.cumsum(t, out=t)\r\n l, r = t[:-1], t[1:]\r\n v = np.pad(v, pad_width=[1, 1], constant_values=0)\r\n x = np.arange(0, r[-1] + 0.1, 0.5, dtype=np.float32)[:, None]\r\n # y = np.stack([v-(x-l), np.zeros(r[-1]*2+1, dtype=np.float32)[:,None]+v, v+(x-r)]).max(axis=0).min(axis=1)\r\n mx = v - (x - l)\r\n np.maximum(mx, v, out=mx)\r\n np.maximum(mx, v + (x - r), out=mx)\r\n y = mx.min(axis=1)\r\n print(((y[:-1] + y[1:]) / 4).sum())\r\n\r\n class ABC077:\r\n @staticmethod\r\n def d():\r\n k = int(sys.stdin.readline().rstrip())\r\n g = GeometryTopology.Graph(k)\r\n for i in range(k):\r\n g.add_edge(i, i * 10 % k, weight=0)\r\n g.add_edge(i, (i + 1) % k, update=False, weight=1)\r\n print(1 + g.bfs01(1)[0])\r\n\r\n class ABC078:\r\n @staticmethod\r\n def d():\r\n n, z, w, *a = map(int, sys.stdin.read().split())\r\n print(\r\n abs(a[0] - w)\r\n if n == 1\r\n else max(abs(a[-1] - w), abs(a[-1] - a[-2]))\r\n )\r\n\r\n class ABC079:\r\n @staticmethod\r\n def d():\r\n h, w, *I = map(int, sys.stdin.read().split())\r\n I = np.array(I)\r\n c = I[:100].reshape(10, 10)\r\n a = I[100:].reshape(h, w)\r\n c = shortest_path(c.T, method=\"D\", indices=1).astype(np.int32)\r\n print(c[a[a != -1]].sum())\r\n\r\n class ABC080:\r\n @staticmethod\r\n def d():\r\n n, c, *stc = map(int, sys.stdin.read().split())\r\n using = np.zeros((c, 10**5 + 2), dtype=np.int8)\r\n s, t, c = np.array(stc).reshape(n, 3).T\r\n np.add.at(using, (c - 1, s), 1)\r\n np.subtract.at(using, (c - 1, t + 1), 1)\r\n np.cumsum(using, axis=1, out=using)\r\n print(np.count_nonzero(using, axis=0).max())\r\n\r\n class ABC081:\r\n @staticmethod\r\n def d():\r\n n, *a = map(int, sys.stdin.read().split())\r\n a = np.array(a)\r\n i = np.argmax(np.absolute(a))\r\n # a +=\r\n print(2 * n)\r\n for j in range(n):\r\n print(i + 1, j + 1)\r\n if a[i] >= 0:\r\n for j in range(n - 1):\r\n print(j + 1, j + 2)\r\n else:\r\n for j in range(n - 1, 0, -1):\r\n print(j + 1, j)\r\n\r\n class ABC082:\r\n pass\r\n\r\n class ABC083:\r\n pass\r\n\r\n class ABC084:\r\n pass\r\n\r\n class ABC085:\r\n pass\r\n\r\n class ABC086:\r\n pass\r\n\r\n class ABC087:\r\n pass\r\n\r\n class ABC088:\r\n pass\r\n\r\n class ABC089:\r\n pass\r\n\r\n class ABC090:\r\n pass\r\n\r\n class ABC091:\r\n pass\r\n\r\n class ABC092:\r\n pass\r\n\r\n class ABC093:\r\n pass\r\n\r\n class ABC094:\r\n pass\r\n\r\n class ABC095:\r\n pass\r\n\r\n class ABC096:\r\n pass\r\n\r\n class ABC097:\r\n pass\r\n\r\n class ABC098:\r\n pass\r\n\r\n class ABC099:\r\n pass\r\n\r\n class ABC100:\r\n pass\r\n\r\n class ABC101:\r\n pass\r\n\r\n class ABC102:\r\n pass\r\n\r\n class ABC103:\r\n pass\r\n\r\n class ABC104:\r\n pass\r\n\r\n class ABC105:\r\n pass\r\n\r\n class ABC106:\r\n pass\r\n\r\n class ABC107:\r\n pass\r\n\r\n class ABC108:\r\n pass\r\n\r\n class ABC109:\r\n pass\r\n\r\n class ABC110:\r\n pass\r\n\r\n class ABC111:\r\n pass\r\n\r\n class ABC112:\r\n pass\r\n\r\n class ABC113:\r\n pass\r\n\r\n class ABC114:\r\n pass\r\n\r\n class ABC115:\r\n pass\r\n\r\n class ABC116:\r\n pass\r\n\r\n class ABC117:\r\n pass\r\n\r\n class ABC118:\r\n pass\r\n\r\n class ABC119:\r\n pass\r\n\r\n class ABC120:\r\n pass\r\n\r\n class ABC121:\r\n pass\r\n\r\n class ABC122:\r\n pass\r\n\r\n class ABC123:\r\n pass\r\n\r\n class ABC124:\r\n pass\r\n\r\n class ABC125:\r\n pass\r\n\r\n class ABC126:\r\n pass\r\n\r\n class ABC127:\r\n pass\r\n\r\n class ABC128:\r\n pass\r\n\r\n class ABC129:\r\n pass\r\n\r\n class ABC130:\r\n pass\r\n\r\n class ABC131:\r\n pass\r\n\r\n class ABC132:\r\n pass\r\n\r\n class ABC133:\r\n pass\r\n\r\n class ABC134:\r\n pass\r\n\r\n class ABC135:\r\n pass\r\n\r\n class ABC136:\r\n pass\r\n\r\n class ABC137:\r\n pass\r\n\r\n class ABC138:\r\n pass\r\n\r\n class ABC139:\r\n pass\r\n\r\n class ABC140:\r\n pass\r\n\r\n class ABC141:\r\n pass\r\n\r\n class ABC142:\r\n pass\r\n\r\n class ABC143:\r\n pass\r\n\r\n class ABC144:\r\n pass\r\n\r\n class ABC145:\r\n pass\r\n\r\n class ABC146:\r\n pass\r\n\r\n class ABC147:\r\n pass\r\n\r\n class ABC148:\r\n pass\r\n\r\n class ABC149:\r\n pass\r\n\r\n class ABC150:\r\n pass\r\n\r\n class ABC151:\r\n pass\r\n\r\n class ABC152:\r\n pass\r\n\r\n class ABC153:\r\n pass\r\n\r\n class ABC154:\r\n pass\r\n\r\n class ABC155:\r\n pass\r\n\r\n class ABC156:\r\n pass\r\n\r\n class ABC157:\r\n pass\r\n\r\n class ABC158:\r\n pass\r\n\r\n class ABC159:\r\n pass\r\n\r\n class ABC160:\r\n pass\r\n\r\n class ABC161:\r\n pass\r\n\r\n class ABC162:\r\n pass\r\n\r\n class ABC163:\r\n pass\r\n\r\n class ABC164:\r\n pass\r\n\r\n class ABC165:\r\n pass\r\n\r\n class ABC166:\r\n pass\r\n\r\n class ABC167:\r\n pass\r\n\r\n class ABC168:\r\n pass\r\n\r\n class ABC169:\r\n pass\r\n\r\n class ABC170:\r\n @staticmethod\r\n def a():\r\n x = [int(x) for x in sys.stdin.readline().split()]\r\n for i in range(5):\r\n if x[i] != i + 1:\r\n print(i + 1)\r\n break\r\n\r\n @staticmethod\r\n def b():\r\n x, y = map(int, sys.stdin.readline().split())\r\n print(\"Yes\" if 2 * x <= y <= 4 * x and y % 2 == 0 else \"No\")\r\n\r\n @staticmethod\r\n def c():\r\n x, n, *p = map(int, sys.stdin.read().split())\r\n a = list(set(range(102)) - set(p))\r\n a = [(abs(y - x), y) for y in a]\r\n print(sorted(a)[0][1])\r\n\r\n @staticmethod\r\n def d():\r\n n, *a = map(int, sys.stdin.read().split())\r\n cand = set(a)\r\n cnt = 0\r\n for x, c in sorted(Counter(a).items()):\r\n cnt += c == 1 and x in cand\r\n cand -= set(range(x * 2, 10**6 + 1, x))\r\n print(cnt)\r\n\r\n @staticmethod\r\n def e():\r\n n, q = map(int, sys.stdin.readline().split())\r\n queue = []\r\n m = 2 * 10**5\r\n infants = [[] for _ in range(m)]\r\n highest_rate = [None] * m\r\n where = [None] * n\r\n rate = [None] * n\r\n\r\n def entry(i, k):\r\n where[i] = k\r\n while infants[k]:\r\n r, j = heappop(infants[k])\r\n if where[j] != k or j == i:\r\n continue\r\n if rate[i] >= -r:\r\n highest_rate[k] = rate[i]\r\n heappush(queue, (rate[i], k, i))\r\n heappush(infants[k], (r, j))\r\n break\r\n else:\r\n highest_rate[k] = rate[i]\r\n heappush(queue, (rate[i], k, i))\r\n heappush(infants[k], (-rate[i], i))\r\n\r\n def transfer(i, k):\r\n now = where[i]\r\n while infants[now]:\r\n r, j = heappop(infants[now])\r\n if where[j] != now or j == i:\r\n continue\r\n if highest_rate[now] != -r:\r\n highest_rate[now] = -r\r\n heappush(queue, (-r, now, j))\r\n heappush(infants[now], (r, j))\r\n break\r\n else:\r\n highest_rate[now] = None\r\n entry(i, k)\r\n\r\n def inquire():\r\n while True:\r\n r, k, i = heappop(queue)\r\n if where[i] != k or r != highest_rate[k]:\r\n continue\r\n heappush(queue, (r, k, i))\r\n return r\r\n\r\n for i in range(n):\r\n a, b = map(int, sys.stdin.readline().split())\r\n rate[i] = a\r\n entry(i, b - 1)\r\n for _ in range(q):\r\n c, d = map(int, sys.stdin.readline().split())\r\n transfer(c - 1, d - 1)\r\n print(inquire())\r\n\r\n class ABC171:\r\n @staticmethod\r\n def a():\r\n c = sys.stdin.readline().rstrip()\r\n print(\"A\" if c < \"a\" else \"a\")\r\n\r\n @staticmethod\r\n def b():\r\n n, k, *p = map(int, sys.stdin.read().split())\r\n print(sum(sorted(p)[:k]))\r\n\r\n @staticmethod\r\n def c():\r\n n = int(sys.stdin.readline().rstrip())\r\n n -= 1\r\n l = 1\r\n while True:\r\n if n < pow(26, l):\r\n break\r\n n -= pow(26, l)\r\n l += 1\r\n res = \"\".join(\r\n [chr(ord(\"a\") + d) for d in NumberTheory.base_convert(n, 26)][\r\n ::-1\r\n ]\r\n )\r\n res = \"a\" * (l - len(res)) + res\r\n print(res)\r\n\r\n @staticmethod\r\n def d():\r\n n = int(sys.stdin.readline().rstrip())\r\n a = [int(x) for x in sys.stdin.readline().split()]\r\n s = sum(a)\r\n cnt = Counter(a)\r\n q = int(sys.stdin.readline().rstrip())\r\n for _ in range(q):\r\n b, c = map(int, sys.stdin.readline().split())\r\n s += (c - b) * cnt[b]\r\n print(s)\r\n cnt[c] += cnt[b]\r\n cnt[b] = 0\r\n\r\n @staticmethod\r\n def e():\r\n n, *a = map(int, sys.stdin.read().split())\r\n s = 0\r\n for x in a:\r\n s ^= x\r\n b = map(lambda x: x ^ s, a)\r\n print(*b, sep=\" \")\r\n\r\n class ABC172:\r\n @staticmethod\r\n def a():\r\n a = int(sys.stdin.readline().rstrip())\r\n print(a * (1 + a + a**2))\r\n\r\n @staticmethod\r\n def b():\r\n s, t = sys.stdin.read().split()\r\n print(sum(s[i] != t[i] for i in range(len(s))))\r\n\r\n @staticmethod\r\n def c():\r\n n, m, k = map(int, sys.stdin.readline().split())\r\n a = [0] + [int(x) for x in sys.stdin.readline().split()]\r\n b = [int(x) for x in sys.stdin.readline().split()]\r\n (*sa,) = itertools.accumulate(a)\r\n (*sb,) = itertools.accumulate(b)\r\n res = 0\r\n for i in range(n + 1):\r\n r = k - sa[i]\r\n if r < 0:\r\n break\r\n res = max(res, i + bi_r(sb, r))\r\n print(res)\r\n\r\n @staticmethod\r\n def d():\r\n n = int(sys.stdin.readline().rstrip())\r\n f = np.zeros(n + 1, dtype=np.int64)\r\n for i in range(1, n + 1):\r\n f[i::i] += 1\r\n print((np.arange(1, n + 1) * f[1:]).sum())\r\n\r\n class ABC173:\r\n @staticmethod\r\n def a():\r\n n = int(sys.stdin.readline().rstrip())\r\n charge = (n + 999) // 1000 * 1000 - n\r\n print(charge)\r\n\r\n @staticmethod\r\n def b():\r\n n, *s = sys.stdin.read().split()\r\n c = Counter(s)\r\n for v in \"AC, WA, TLE, RE\".split(\", \"):\r\n print(f\"{v} x {c[v]}\")\r\n\r\n @staticmethod\r\n def c():\r\n h, w, k = map(int, sys.stdin.readline().split())\r\n c = [sys.stdin.readline().rstrip() for _ in range(h)]\r\n tot = 0\r\n for i in range(1 << h):\r\n for j in range(1 << w):\r\n cnt = 0\r\n for y in range(h):\r\n for x in range(w):\r\n if i >> y & 1 or j >> x & 1:\r\n continue\r\n cnt += c[y][x] == \"#\"\r\n tot += cnt == k\r\n print(tot)\r\n\r\n @staticmethod\r\n def d():\r\n n, *a = map(int, sys.stdin.read().split())\r\n a.sort(reverse=True)\r\n res = (\r\n a[0]\r\n + sum(a[1 : 1 + (n - 2) // 2]) * 2\r\n + a[1 + (n - 2) // 2] * (n & 1)\r\n )\r\n print(res)\r\n\r\n @staticmethod\r\n def e():\r\n MOD = 10**9 + 7\r\n n, k, *a = map(int, sys.stdin.read().split())\r\n minus = [x for x in a if x < 0]\r\n plus = [x for x in a if x > 0]\r\n if len(plus) + len(minus) // 2 * 2 >= k: # plus\r\n (*minus,) = map(abs, minus)\r\n minus.sort(reverse=True)\r\n plus.sort(reverse=True)\r\n cand = []\r\n if len(minus) & 1:\r\n minus = minus[:-1]\r\n for i in range(0, len(minus) - 1, 2):\r\n cand.append(minus[i] * minus[i + 1] % MOD)\r\n if k & 1:\r\n res = plus[0]\r\n plus = plus[1:]\r\n else:\r\n res = 1\r\n if len(plus) & 1:\r\n plus = plus[:-1]\r\n for i in range(0, len(plus) - 1, 2):\r\n cand.append(plus[i] * plus[i + 1] % MOD)\r\n cand.sort(reverse=True)\r\n for x in cand[: k // 2]:\r\n res *= x\r\n res %= MOD\r\n print(res)\r\n elif 0 in a:\r\n print(0)\r\n else:\r\n cand = sorted(map(abs, a))\r\n res = 1\r\n for i in range(k):\r\n res *= cand[i]\r\n res %= MOD\r\n res = MOD - res\r\n print(res)\r\n pass\r\n\r\n class ABC174:\r\n @staticmethod\r\n def a():\r\n print(\"Yes\" if int(sys.stdin.readline().rstrip()) >= 30 else \"No\")\r\n\r\n class ABC178:\r\n @staticmethod\r\n def a():\r\n pass\r\n\r\n @staticmethod\r\n def b():\r\n pass\r\n\r\n @staticmethod\r\n def c():\r\n pass\r\n\r\n @staticmethod\r\n def d():\r\n s = int(sys.stdin.readline().rstrip())\r\n if s == 0:\r\n print(1)\r\n return\r\n elif s == 1:\r\n print(0)\r\n return\r\n c = np.eye(3, k=-1, dtype=np.int64)\r\n c[0, 0] = c[0, 2] = 1\r\n a = np.array([0, 0, 1])\r\n print(Algebra.dot(Algebra.matrix_pow(c, s - 2), a)[0])\r\n\r\n class ABC179:\r\n @staticmethod\r\n def a():\r\n s = sys.stdin.readline().rstrip()\r\n print(s + \"s\" if s[-1] != \"s\" else s + \"es\")\r\n\r\n @staticmethod\r\n def b():\r\n n, *d = map(int, sys.stdin.read().split())\r\n d = np.array(d).reshape(n, 2).T\r\n d = np.equal(d[0], d[1]).astype(int)\r\n dd = d.copy()\r\n dd[1:] += d[:-1]\r\n dd[:-1] += d[1:]\r\n print(\"Yes\" if (dd >= 3).any() else \"No\")\r\n\r\n @staticmethod\r\n def c():\r\n n = int(sys.stdin.readline().rstrip())\r\n res = (n // np.arange(1, n + 1)).sum() - len(\r\n NumberTheory.find_divisors(n)\r\n )\r\n print(res)\r\n\r\n @staticmethod\r\n def d():\r\n mod = 998244353\r\n n, k, *lr = map(int, sys.stdin.read().split())\r\n l, r = np.array(lr).reshape(k, -1).T\r\n\r\n @njit((i8, i8[:], i8[:]), cache=True)\r\n def solve(n, l, r):\r\n res = np.zeros(n * 2, dtype=np.int64)\r\n res[0], res[1] = 1, -1\r\n for i in range(n - 1):\r\n res[i + 1] = (res[i + 1] + res[i]) % mod\r\n res[i + l] = (res[i + l] + res[i]) % mod\r\n res[i + r + 1] = (res[i + r + 1] - res[i]) % mod\r\n print(res[n - 1])\r\n\r\n solve(n, l, r)\r\n\r\n @staticmethod\r\n def e():\r\n n, x, m = map(int, sys.stdin.readline().split())\r\n res = [-1 for _ in range(m)]\r\n s = 0\r\n loop = np.zeros(m, dtype=np.int64)\r\n for i in range(m + 1):\r\n if i == n:\r\n print(s)\r\n return\r\n if res[x] != -1:\r\n l, loop = i - res[x], loop[res[x] : i]\r\n q, r = divmod(n - i, l)\r\n print(s + q * loop.sum() + loop[:r].sum())\r\n return\r\n res[x], loop[i] = i, x\r\n s += x\r\n x = x**2 % m\r\n\r\n class ABC180:\r\n @staticmethod\r\n def a():\r\n n, a, b = map(int, sys.stdin.readline().split())\r\n print(n - a + b)\r\n\r\n @staticmethod\r\n def b():\r\n n, *x = map(int, sys.stdin.read().split())\r\n x = np.absolute(np.array(x))\r\n print(x.sum())\r\n print(np.sqrt((x**2).sum()))\r\n print(x.max())\r\n\r\n @staticmethod\r\n def c():\r\n n = int(sys.stdin.readline().rstrip())\r\n div = NumberTheory.find_divisors(n)\r\n print(*div, sep=\"\\n\")\r\n\r\n @staticmethod\r\n def d():\r\n x, y, a, b = map(int, sys.stdin.readline().split())\r\n cnt = 0\r\n while x * a <= x + b:\r\n x *= a\r\n if x >= y:\r\n print(cnt)\r\n return\r\n cnt += 1\r\n cnt += (y - x - 1) // b\r\n print(cnt)\r\n\r\n @staticmethod\r\n def e():\r\n n, *xyz = map(int, sys.stdin.read().split())\r\n\r\n xyz = list(zip(*[iter(xyz)] * 3))\r\n dist = [[0] * n for _ in range(n)]\r\n for i in range(n):\r\n a, b, c = xyz[i]\r\n for j in range(n):\r\n p, q, r = xyz[j]\r\n dist[i][j] = abs(p - a) + abs(q - b) + max(0, r - c)\r\n\r\n dp = [[inf] * n for _ in range(1 << n)]\r\n dp[0][0] = 0\r\n for s in range(1 << n):\r\n for i in range(n):\r\n t = s | (1 << i)\r\n for j in range(n):\r\n dp[t][i] = min(dp[t][i], dp[s][j] + dist[j][i])\r\n print(dp[-1][0])\r\n\r\n @staticmethod\r\n def f(): # rewrite with jit compiling later.\r\n n, m, l = map(int, sys.stdin.readline().split())\r\n c = Combinatorics.CombinationsMod(n, MOD)\r\n path = np.zeros(n + 1, dtype=np.int64)\r\n path[1] = path[2] = 1\r\n for i in range(3, n + 1):\r\n path[i] = path[i - 1] * i % MOD\r\n cycle = np.zeros(n + 1, dtype=np.int64)\r\n cycle[1:] = path[:-1]\r\n dp = np.zeros((n + 1, m + 1), dtype=np.int64)\r\n\r\n def f(l):\r\n dp[:, :] = 0\r\n dp[0, 0] = 1\r\n for i in range(n):\r\n for j in range(m + 1):\r\n k = np.arange(1, min(l, n - i, m - j + 1) + 1)\r\n dp[i + k, j + k - 1] += (\r\n dp[i, j]\r\n * c(n - i - 1, k - 1)\r\n % MOD\r\n * path[k]\r\n % MOD\r\n )\r\n dp[i + k, j + k - 1] %= MOD\r\n k = np.arange(2, min(l, n - i, m - j) + 1)\r\n dp[i + k, j + k] += (\r\n dp[i, j]\r\n * c(n - i - 1, k - 1)\r\n % MOD\r\n * cycle[k]\r\n % MOD\r\n )\r\n dp[i + k, j + k] %= MOD\r\n return dp[n, m]\r\n\r\n print((f(l) - f(l - 1)) % MOD)\r\n\r\n @staticmethod\r\n def f_2(): # PyPy\r\n n, m, l = map(int, sys.stdin.readline().split())\r\n c = Combinatorics.CombinationsMod(n, MOD)\r\n path = [0] * (n + 1)\r\n path[1] = path[2] = 1\r\n for i in range(3, n + 1):\r\n path[i] = path[i - 1] * i % MOD\r\n cycle = [0] + path[:-1]\r\n\r\n def f(l):\r\n dp = [[0] * (m + 1) for _ in range(n + 1)]\r\n dp[0][0] = 1\r\n for i in range(n):\r\n for j in range(m + 1):\r\n for k in range(1, min(l, n - i, m - j + 1) + 1):\r\n dp[i + k][j + k - 1] += (\r\n dp[i][j]\r\n * c(n - i - 1, k - 1)\r\n % MOD\r\n * path[k]\r\n % MOD\r\n )\r\n dp[i + k][j + k - 1] %= MOD\r\n for k in range(1, min(l, n - i, m - j) + 1):\r\n dp[i + k][j + k] += (\r\n dp[i][j]\r\n * c(n - i - 1, k - 1)\r\n % MOD\r\n * cycle[k]\r\n % MOD\r\n )\r\n dp[i + k][j + k] %= MOD\r\n\r\n return dp[n][m]\r\n\r\n print((f(l) - f(l - 1)) % MOD)\r\n\r\n class ARC106:\r\n @staticmethod\r\n def a():\r\n n = int(sys.stdin.readline().rstrip())\r\n a = 1\r\n while pow(3, a) <= n:\r\n m = n - pow(3, a)\r\n b = 1\r\n while pow(5, b) <= m:\r\n if pow(5, b) == m:\r\n print(a, b)\r\n return\r\n b += 1\r\n a += 1\r\n print(-1)\r\n\r\n @staticmethod\r\n def b():\r\n n, m = map(int, sys.stdin.readline().split())\r\n a = [int(x) for x in sys.stdin.readline().split()]\r\n b = [int(x) for x in sys.stdin.readline().split()]\r\n uf = GeometryTopology.Graph(n)\r\n uf.init_dsu()\r\n for _ in range(m):\r\n c, d = map(int, sys.stdin.readline().split())\r\n c -= 1\r\n d -= 1\r\n uf.unite(c, d)\r\n\r\n visited = [False] * n\r\n ga = [[] for _ in range(n)]\r\n gb = [[] for _ in range(n)]\r\n for i in range(n):\r\n r = uf.find(i)\r\n ga[r].append(a[i])\r\n gb[r].append(b[i])\r\n print(\r\n \"Yes\"\r\n if all(sum(ga[i]) == sum(gb[i]) for i in range(n))\r\n else \"No\"\r\n )\r\n\r\n @staticmethod\r\n def c():\r\n n, m = map(int, sys.stdin.readline().split())\r\n if m < 0:\r\n print(-1)\r\n return\r\n if n == 1:\r\n if m != 0:\r\n print(-1)\r\n return\r\n print(1, 2)\r\n return\r\n\r\n if m >= n - 1:\r\n print(-1)\r\n return\r\n l, r = 1, 10**9\r\n print(l, r)\r\n for _ in range(n - 2 - m):\r\n l += 1\r\n r -= 1\r\n print(l, r)\r\n r = l\r\n for _ in range(m + 1):\r\n l, r = r + 1, r + 2\r\n print(l, r)\r\n\r\n @staticmethod\r\n def d():\r\n mod = 998244353\r\n n, k, *a = map(int, sys.stdin.read().split())\r\n a = np.array(a)\r\n b = np.zeros((k + 1, n), dtype=np.int64)\r\n b[0] = 1\r\n for i in range(k):\r\n b[i + 1] = b[i] * a % mod\r\n s = b.sum(axis=1) % mod\r\n inv_2 = pow(2, mod - 2, mod)\r\n c = Combinatorics.CombinationsMod(mod=mod)\r\n for x in range(1, k + 1):\r\n l = np.arange(x + 1)\r\n print(\r\n (\r\n (c(x, l) * s[l] % mod * s[l][::-1] % mod).sum() % mod\r\n - pow(2, x, mod) * s[x]\r\n )\r\n % mod\r\n * inv_2\r\n % mod\r\n )\r\n\r\n @staticmethod\r\n def e():\r\n pass\r\n\r\n @staticmethod\r\n def f():\r\n pass\r\n\r\n class ACL001:\r\n @staticmethod\r\n def a():\r\n n, *xy = map(int, sys.stdin.read().split())\r\n (*xy,) = zip(*[iter(xy)] * 2)\r\n print(xy)\r\n pass\r\n\r\n class TDPC:\r\n @staticmethod\r\n def t():\r\n pass\r\n\r\n class MSolutions2020:\r\n @staticmethod\r\n def a():\r\n x = int(sys.stdin.readline().rstrip())\r\n x -= 400\r\n print(8 - x // 200)\r\n\r\n @staticmethod\r\n def b():\r\n r, g, b, k = map(int, sys.stdin.read().split())\r\n while k and g <= r:\r\n g *= 2\r\n k -= 1\r\n while k and b <= g:\r\n b *= 2\r\n k -= 1\r\n print(\"Yes\" if r < g < b else \"No\")\r\n\r\n @staticmethod\r\n def c():\r\n n, k, *a = map(int, sys.stdin.read().split())\r\n for i in range(k, n):\r\n print(\"Yes\" if a[i] > a[i - k] else \"No\")\r\n\r\n @staticmethod\r\n def d():\r\n n, *a = map(int, sys.stdin.read().split())\r\n a += [-1]\r\n m = 1000\r\n s = 0\r\n for i in range(n):\r\n if a[i + 1] == a[i]:\r\n continue\r\n elif a[i + 1] > a[i]:\r\n cnt = m // a[i]\r\n m -= a[i] * cnt\r\n s += cnt\r\n else:\r\n m += a[i] * s\r\n s = 0\r\n print(m)\r\n\r\n\r\nclass Codeforces:\r\n class CR676div2:\r\n @staticmethod\r\n def a():\r\n t = int(sys.stdin.readline().rstrip())\r\n for _ in range(t):\r\n a, b = map(int, sys.stdin.readline().split())\r\n print(a ^ b)\r\n\r\n @staticmethod\r\n def b():\r\n t = int(sys.stdin.readline().rstrip())\r\n for _ in range(t):\r\n n = int(sys.stdin.readline().rstrip())\r\n s = [list(sys.stdin.readline().rstrip()) for _ in range(n)]\r\n s[0][0] = s[-1][-1] = \"0\"\r\n for i in range(n):\r\n for j in range(n):\r\n s[i][j] = int(s[i][j])\r\n\r\n def can_goal(g, c=0):\r\n visited = [0] * n\r\n stack = [(0, 0)]\r\n visited[0] |= 1 << 0\r\n while stack:\r\n y, x = stack.pop()\r\n for dy, dx in [(-1, 0), (0, -1), (1, 0), (0, 1)]:\r\n i, j = y + dy, x + dx\r\n if i < 0 or i >= n or j < 0 or j >= n:\r\n continue\r\n if i == j == n - 1:\r\n return True\r\n if visited[i] >> j & 1:\r\n continue\r\n visited[i] |= 1 << j\r\n if g[i][j] != c:\r\n continue\r\n stack.append((i, j))\r\n return False\r\n\r\n if not (can_goal(s, 0) or can_goal(s, 1)):\r\n print(0)\r\n continue\r\n\r\n flg = 0\r\n for i in range(n):\r\n for j in range(n):\r\n if i == j == 0 or i == j == n - 1:\r\n continue\r\n s[i][j] ^= 1\r\n if not (can_goal(s, 0) or can_goal(s, 1)):\r\n print(1)\r\n print(i + 1, j + 1)\r\n flg = 1\r\n break\r\n s[i][j] ^= 1\r\n if flg:\r\n break\r\n if flg:\r\n continue\r\n\r\n print(2)\r\n if s[0][1] == s[1][0]:\r\n print(n, n - 1)\r\n print(n - 1, n)\r\n continue\r\n\r\n if s[0][1] == s[-1][-2]:\r\n print(1, 2)\r\n print(n - 1, n)\r\n else:\r\n print(1, 2)\r\n print(n, n - 1)\r\n\r\n @staticmethod\r\n def c():\r\n pass\r\n\r\n\r\nclass ProjectEuler:\r\n @staticmethod\r\n def p1():\r\n def f(n, x):\r\n return (x + n // x * x) * (n // x) // 2\r\n\r\n n = 1000\r\n ans = f(n - 1, 3) + f(n - 1, 5) - f(n - 1, 15)\r\n print(ans)\r\n\r\n @staticmethod\r\n def p2():\r\n fib = [1, 2]\r\n while fib[-1] < 4 * 10**6:\r\n fib.append(fib[-1] + fib[-2])\r\n print(sum(fib[1:-1:3]))\r\n\r\n @staticmethod\r\n def p3():\r\n pn = NumberTheory.PrimeNumbers()\r\n res = pn.factorize(600851475143)\r\n print(max(res.keys()))\r\n\r\n @staticmethod\r\n def p4():\r\n def is_palindrome(n):\r\n n = str(n)\r\n return n == n[::-1]\r\n\r\n cand = []\r\n for a in range(100, 1000):\r\n for b in range(a, 1000):\r\n n = a * b\r\n if is_palindrome(n):\r\n cand.append(n)\r\n print(max(cand))\r\n\r\n @staticmethod\r\n def p5():\r\n pn = NumberTheory.PrimeNumbers()\r\n res = defaultdict(int)\r\n for i in range(1, 21):\r\n for p, c in pn.factorize(i).items():\r\n res[p] = max(res[p], c)\r\n ans = 1\r\n for p, c in res.items():\r\n ans *= pow(p, c)\r\n print(ans)\r\n\r\n @staticmethod\r\n def p6():\r\n a = np.arange(101)\r\n b = np.cumsum(a**2)\r\n a = a.cumsum()\r\n print(a[100] ** 2 - b[100])\r\n\r\n @staticmethod\r\n def p7():\r\n nt = NumberTheory.PrimeNumbers()\r\n print(sorted(nt)[10000])\r\n\r\n @staticmethod\r\n def p8():\r\n n = \"7316717653133062491922511967442657474235534919493496983520312774506326239578318016984801869478851843858615607891129494954595017379583319528532088055111254069874715852386305071569329096329522744304355766896648950445244523161731856403098711121722383113622298934233803081353362766142828064444866452387493035890729629049156044077239071381051585930796086670172427121883998797908792274921901699720888093776657273330010533678812202354218097512545405947522435258490771167055601360483958644670632441572215539753697817977846174064955149290862569321978468622482839722413756570560574902614079729686524145351004748216637048440319989000889524345065854122758866688116427171479924442928230863465674813919123162824586178664583591245665294765456828489128831426076900422421902267105562632111110937054421750694165896040807198403850962455444362981230987879927244284909188845801561660979191338754992005240636899125607176060588611646710940507754100225698315520005593572972571636269561882670428252483600823257530420752963450\"\r\n n = [int(d) for d in list(n)]\r\n res = 0\r\n for i in range(988):\r\n x = 1\r\n for j in range(13):\r\n x *= n[i + j]\r\n res = max(res, x)\r\n print(res)\r\n\r\n @staticmethod\r\n def p9():\r\n for a in range(1, 997):\r\n for b in range(a, 998 - a):\r\n c = 1000 - a - b\r\n if a**2 + b**2 == c**2:\r\n print(a * b * c)\r\n return\r\n\r\n @staticmethod\r\n def p10():\r\n pn = NumberTheory.PrimeNumbers(2 * 10**6 + 1)\r\n print(sum(pn))\r\n\r\n @staticmethod\r\n def p11():\r\n grid = \"08 02 22 97 38 15 00 40 00 75 04 05 07 78 52 12 50 77 91 08 49 49 99 40 17 81 18 57 60 87 17 40 98 43 69 48 04 56 62 00 81 49 31 73 55 79 14 29 93 71 40 67 53 88 30 03 49 13 36 65 52 70 95 23 04 60 11 42 69 24 68 56 01 32 56 71 37 02 36 91 22 31 16 71 51 67 63 89 41 92 36 54 22 40 40 28 66 33 13 80 24 47 32 60 99 03 45 02 44 75 33 53 78 36 84 20 35 17 12 50 32 98 81 28 64 23 67 10 26 38 40 67 59 54 70 66 18 38 64 70 67 26 20 68 02 62 12 20 95 63 94 39 63 08 40 91 66 49 94 21 24 55 58 05 66 73 99 26 97 17 78 78 96 83 14 88 34 89 63 72 21 36 23 09 75 00 76 44 20 45 35 14 00 61 33 97 34 31 33 95 78 17 53 28 22 75 31 67 15 94 03 80 04 62 16 14 09 53 56 92 16 39 05 42 96 35 31 47 55 58 88 24 00 17 54 24 36 29 85 57 86 56 00 48 35 71 89 07 05 44 44 37 44 60 21 58 51 54 17 58 19 80 81 68 05 94 47 69 28 73 92 13 86 52 17 77 04 89 55 40 04 52 08 83 97 35 99 16 07 97 57 32 16 26 26 79 33 27 98 66 88 36 68 87 57 62 20 72 03 46 33 67 46 55 12 32 63 93 53 69 04 42 16 73 38 25 39 11 24 94 72 18 08 46 29 32 40 62 76 36 20 69 36 41 72 30 23 88 34 62 99 69 82 67 59 85 74 04 36 16 20 73 35 29 78 31 90 01 74 31 49 71 48 86 81 16 23 57 05 54 01 70 54 71 83 51 54 69 16 92 33 48 61 43 52 01 89 19 67 48\"\r\n print(grid)\r\n\r\n pass\r\n\r\n\r\nclass Yukicoder:\r\n def __init__(self):\r\n pass\r\n\r\n def __call__(self):\r\n print(1)\r\n\r\n\r\nclass AOJ:\r\n @staticmethod\r\n def ALDS1_12_A():\r\n n, *a = map(int, sys.stdin.read().split())\r\n g = GeometryTopology.Graph(n)\r\n for i in range(n - 1):\r\n for j in range(i + 1, n):\r\n if a[i * n + j] == -1:\r\n continue\r\n g.add_edge(i, j, weight=a[i * n + j])\r\n g.add_edge(j, i, weight=a[i * n + j])\r\n _, d = g.kruskal()\r\n # _, d = g.prim()\r\n # _, d = g.boruvka()\r\n print(d)\r\n\r\n @staticmethod\r\n def GRL_3_C(): # strongly connected components\r\n n, m = map(int, sys.stdin.readline().split())\r\n g = GeometryTopology.Graph(n)\r\n for _ in range(m):\r\n g.add_edge(*map(int, sys.stdin.readline().split()))\r\n r = g.scc()\r\n q, *uv = map(int, sys.stdin.read().split())\r\n for u, v in zip(*[iter(uv)] * 2):\r\n print(int(r[u] == r[v]))\r\n\r\n\r\nclass YosupoJudge:\r\n @staticmethod\r\n def Directed_MST():\r\n n, m, s, *abc = map(int, sys.stdin.read().split())\r\n g = GeometryTopology.Graph(n)\r\n for a, b, c in zip(*[iter(abc)] * 3):\r\n g.add_edge(a, b, weight=c)\r\n _, d, p = g.prim(src=s, return_parent=True)\r\n print(d)\r\n print(*p)\r\n\r\n @staticmethod\r\n def Manhattan_MST():\r\n n, *xy = map(int, sys.stdin.read().split())\r\n g = GeometryTopology.Graph(n)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n # AtCoder.ABC179.f()\r\n # AtCoder.ABC060.d()\r\n AtCoder.ABC081.d()\r\n # AtCoder.ARC106.d()\r\n # YosupoJudge.Directed_MST()\r\n pass\r\n"
] | [
[
"numpy.array",
"scipy.sparse.csr_matrix",
"scipy.sparse.csgraph.floyd_warshall"
],
[
"numpy.logical_or",
"numpy.ones",
"numpy.sum",
"numpy.any",
"numpy.argsort",
"numpy.logical_or.at",
"numpy.fill_diagonal",
"numpy.amax",
"numpy.append",
"scipy.optimize",
"numpy.absolute",
"numpy.subtract.at",
"numpy.identity",
"numpy.unique",
"numpy.flatnonzero",
"numpy.minimum",
"numpy.sqrt",
"numpy.eye",
"numpy.minimum.at",
"numpy.nonzero",
"numpy.bincount",
"scipy.ndimage.distance_transform_cdt",
"numpy.zeros",
"scipy.optimize.bisect",
"numpy.equal",
"numpy.add.at",
"numpy.count_nonzero",
"numpy.arange",
"scipy.sparse.csgraph.connected_components",
"numpy.hstack",
"numpy.maximum",
"numpy.sort",
"numpy.pad",
"numpy.resize",
"numpy.zeros_like",
"numpy.cumsum",
"numpy.bitwise_xor.reduce",
"scipy.sparse.csr_matrix",
"scipy.optimize.brenth",
"scipy.sparse.csgraph.shortest_path",
"numpy.ravel",
"numpy.amin",
"numpy.array",
"numpy.sin",
"numpy.dot",
"numpy.full"
]
] |