repo_name
stringlengths
6
77
path
stringlengths
8
215
license
stringclasses
15 values
cells
sequence
types
sequence
pk-ai/training
machine-learning/deep-learning/udacity/ud730/1_notmnist.ipynb
mit
[ "Deep Learning\nAssignment 1\nThe objective of this assignment is to learn about simple data curation practices, and familiarize you with some of the data we'll be reusing later.\nThis notebook uses the notMNIST dataset to be used with python experiments. This dataset is designed to look like the classic MNIST dataset, while looking a little more like real data: it's a harder task, and the data is a lot less 'clean' than MNIST.", "# These are all the modules we'll be using later. Make sure you can import them\n# before proceeding further.\nfrom __future__ import print_function\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport os\nimport sys\nimport tarfile\nfrom IPython.display import display, Image\nfrom scipy import ndimage\nfrom sklearn.linear_model import LogisticRegression\nfrom six.moves.urllib.request import urlretrieve\nfrom six.moves import cPickle as pickle\n\n# Config the matplotlib backend as plotting inline in IPython\n%matplotlib inline", "First, we'll download the dataset to our local machine. The data consists of characters rendered in a variety of fonts on a 28x28 image. The labels are limited to 'A' through 'J' (10 classes). The training set has about 500k and the testset 19000 labeled examples. Given these sizes, it should be possible to train models quickly on any machine.", "url = 'https://commondatastorage.googleapis.com/books1000/'\nlast_percent_reported = None\ndata_root = '.' # Change me to store data elsewhere\n\ndef download_progress_hook(count, blockSize, totalSize):\n \"\"\"A hook to report the progress of a download. This is mostly intended for users with\n slow internet connections. Reports every 5% change in download progress.\n \"\"\"\n global last_percent_reported\n percent = int(count * blockSize * 100 / totalSize)\n\n if last_percent_reported != percent:\n if percent % 5 == 0:\n sys.stdout.write(\"%s%%\" % percent)\n sys.stdout.flush()\n else:\n sys.stdout.write(\".\")\n sys.stdout.flush()\n \n last_percent_reported = percent\n \ndef maybe_download(filename, expected_bytes, force=False):\n \"\"\"Download a file if not present, and make sure it's the right size.\"\"\"\n dest_filename = os.path.join(data_root, filename)\n if force or not os.path.exists(dest_filename):\n print('Attempting to download:', filename) \n filename, _ = urlretrieve(url + filename, dest_filename, reporthook=download_progress_hook)\n print('\\nDownload Complete!')\n statinfo = os.stat(dest_filename)\n if statinfo.st_size == expected_bytes:\n print('Found and verified', dest_filename)\n else:\n raise Exception(\n 'Failed to verify ' + dest_filename + '. Can you get to it with a browser?')\n return dest_filename\n\ntrain_filename = maybe_download('notMNIST_large.tar.gz', 247336696)\ntest_filename = maybe_download('notMNIST_small.tar.gz', 8458043)", "Extract the dataset from the compressed .tar.gz file.\nThis should give you a set of directories, labeled A through J.", "num_classes = 10\nnp.random.seed(133)\n\ndef maybe_extract(filename, force=False):\n root = os.path.splitext(os.path.splitext(filename)[0])[0] # remove .tar.gz\n if os.path.isdir(root) and not force:\n # You may override by setting force=True.\n print('%s already present - Skipping extraction of %s.' % (root, filename))\n else:\n print('Extracting data for %s. This may take a while. Please wait.' % root)\n tar = tarfile.open(filename)\n sys.stdout.flush()\n tar.extractall(data_root)\n tar.close()\n data_folders = [\n os.path.join(root, d) for d in sorted(os.listdir(root))\n if os.path.isdir(os.path.join(root, d))]\n if len(data_folders) != num_classes:\n raise Exception(\n 'Expected %d folders, one per class. Found %d instead.' % (\n num_classes, len(data_folders)))\n print(data_folders)\n return data_folders\n \ntrain_folders = maybe_extract(train_filename)\ntest_folders = maybe_extract(test_filename)", "Problem 1\nLet's take a peek at some of the data to make sure it looks sensible. Each exemplar should be an image of a character A through J rendered in a different font. Display a sample of the images that we just downloaded. Hint: you can use the package IPython.display.", "# Solution for Problem 1\nimport random\nprint('Displaying images of train folders')\n# Looping through train folders and displaying a random image of each folder\nfor path in train_folders:\n image_file = os.path.join(path, random.choice(os.listdir(path)))\n display(Image(filename=image_file))\n\nprint('Displaying images of test folders')\n# Looping through train folders and displaying a random image of each folder\nfor path in test_folders:\n image_file = os.path.join(path, random.choice(os.listdir(path)))\n display(Image(filename=image_file))", "Now let's load the data in a more manageable format. Since, depending on your computer setup you might not be able to fit it all in memory, we'll load each class into a separate dataset, store them on disk and curate them independently. Later we'll merge them into a single dataset of manageable size.\nWe'll convert the entire dataset into a 3D array (image index, x, y) of floating point values, normalized to have approximately zero mean and standard deviation ~0.5 to make training easier down the road. \nA few images might not be readable, we'll just skip them.", "image_size = 28 # Pixel width and height.\npixel_depth = 255.0 # Number of levels per pixel.\n\ndef load_letter(folder, min_num_images):\n \"\"\"Load the data for a single letter label.\"\"\"\n image_files = os.listdir(folder)\n dataset = np.ndarray(shape=(len(image_files), image_size, image_size),\n dtype=np.float32)\n print(folder)\n num_images = 0\n for image in image_files:\n image_file = os.path.join(folder, image)\n try:\n image_data = (ndimage.imread(image_file).astype(float) - \n pixel_depth / 2) / pixel_depth\n if image_data.shape != (image_size, image_size):\n raise Exception('Unexpected image shape: %s' % str(image_data.shape))\n dataset[num_images, :, :] = image_data\n num_images = num_images + 1\n except IOError as e:\n print('Could not read:', image_file, ':', e, '- it\\'s ok, skipping.')\n \n dataset = dataset[0:num_images, :, :]\n if num_images < min_num_images:\n raise Exception('Many fewer images than expected: %d < %d' %\n (num_images, min_num_images))\n \n print('Full dataset tensor:', dataset.shape)\n print('Mean:', np.mean(dataset))\n print('Standard deviation:', np.std(dataset))\n return dataset\n \ndef maybe_pickle(data_folders, min_num_images_per_class, force=False):\n dataset_names = []\n for folder in data_folders:\n set_filename = folder + '.pickle'\n dataset_names.append(set_filename)\n if os.path.exists(set_filename) and not force:\n # You may override by setting force=True.\n print('%s already present - Skipping pickling.' % set_filename)\n else:\n print('Pickling %s.' % set_filename)\n dataset = load_letter(folder, min_num_images_per_class)\n try:\n with open(set_filename, 'wb') as f:\n pickle.dump(dataset, f, pickle.HIGHEST_PROTOCOL)\n except Exception as e:\n print('Unable to save data to', set_filename, ':', e)\n \n return dataset_names\n\ntrain_datasets = maybe_pickle(train_folders, 45000)\ntest_datasets = maybe_pickle(test_folders, 1800)", "Problem 2\nLet's verify that the data still looks good. Displaying a sample of the labels and images from the ndarray. Hint: you can use matplotlib.pyplot.", "# Solution for Problem 2\ndef show_first_image(datasets):\n for pickl in datasets:\n print('Showing a first image from pickle ', pickl)\n try:\n with open(pickl, 'rb') as f:\n letter_set = pickle.load(f)\n plt.imshow(letter_set[0])\n except Exception as e:\n print('Unable to show image from pickle ', pickl, ':', e)\n raise\nprint('From Training dataset')\nshow_first_image(train_datasets)\nprint('From Test Dataset')\nshow_first_image(test_datasets)", "Problem 3\nAnother check: we expect the data to be balanced across classes. Verify that.", "def show_dataset_shape(datasets):\n for pickl in datasets:\n try:\n with open(pickl, 'rb') as f:\n letter_set = pickle.load(f)\n print('Shape of pickle ', pickl, 'is', np.shape(letter_set))\n except Exception as e:\n print('Unable to show image from pickle ', pickl, ':', e)\n raise\n\nprint('Shape for Training set')\nshow_dataset_shape(train_datasets)\nprint('Shape for Test set')\nshow_dataset_shape(test_datasets)", "Merge and prune the training data as needed. Depending on your computer setup, you might not be able to fit it all in memory, and you can tune train_size as needed. The labels will be stored into a separate array of integers 0 through 9.\nAlso create a validation dataset for hyperparameter tuning.", "def make_arrays(nb_rows, img_size):\n if nb_rows:\n dataset = np.ndarray((nb_rows, img_size, img_size), dtype=np.float32)\n labels = np.ndarray(nb_rows, dtype=np.int32)\n else:\n dataset, labels = None, None\n return dataset, labels\n\ndef merge_datasets(pickle_files, train_size, valid_size=0):\n num_classes = len(pickle_files)\n valid_dataset, valid_labels = make_arrays(valid_size, image_size)\n train_dataset, train_labels = make_arrays(train_size, image_size)\n vsize_per_class = valid_size // num_classes\n tsize_per_class = train_size // num_classes\n \n start_v, start_t = 0, 0\n end_v, end_t = vsize_per_class, tsize_per_class\n end_l = vsize_per_class+tsize_per_class\n for label, pickle_file in enumerate(pickle_files): \n try:\n with open(pickle_file, 'rb') as f:\n letter_set = pickle.load(f)\n # let's shuffle the letters to have random validation and training set\n np.random.shuffle(letter_set)\n if valid_dataset is not None:\n valid_letter = letter_set[:vsize_per_class, :, :]\n valid_dataset[start_v:end_v, :, :] = valid_letter\n valid_labels[start_v:end_v] = label\n start_v += vsize_per_class\n end_v += vsize_per_class\n \n train_letter = letter_set[vsize_per_class:end_l, :, :]\n train_dataset[start_t:end_t, :, :] = train_letter\n train_labels[start_t:end_t] = label\n start_t += tsize_per_class\n end_t += tsize_per_class\n except Exception as e:\n print('Unable to process data from', pickle_file, ':', e)\n raise\n \n return valid_dataset, valid_labels, train_dataset, train_labels\n \n\"\"\"\ntrain_size = 200000\nvalid_size = 10000\ntest_size = 10000\n\"\"\" \ntrain_size = 20000\nvalid_size = 1000\ntest_size = 1000\n\nvalid_dataset, valid_labels, train_dataset, train_labels = merge_datasets(\n train_datasets, train_size, valid_size)\n_, _, test_dataset, test_labels = merge_datasets(test_datasets, test_size)\n\nprint('Training:', train_dataset.shape, train_labels.shape)\nprint('Validation:', valid_dataset.shape, valid_labels.shape)\nprint('Testing:', test_dataset.shape, test_labels.shape)", "Next, we'll randomize the data. It's important to have the labels well shuffled for the training and test distributions to match.", "def randomize(dataset, labels):\n permutation = np.random.permutation(labels.shape[0])\n shuffled_dataset = dataset[permutation,:,:]\n shuffled_labels = labels[permutation]\n return shuffled_dataset, shuffled_labels\ntrain_dataset, train_labels = randomize(train_dataset, train_labels)\ntest_dataset, test_labels = randomize(test_dataset, test_labels)\nvalid_dataset, valid_labels = randomize(valid_dataset, valid_labels)", "Problem 4\nConvince yourself that the data is still good after shuffling!", "print('Printing Train, validation and test labels after shuffling')\ndef print_first_10_labels(labels):\n printing_labels = []\n for i in range(10):\n printing_labels.append(labels[[i]])\n print(printing_labels)\nprint_first_10_labels(train_labels)\nprint_first_10_labels(test_labels)\nprint_first_10_labels(valid_labels)", "Finally, let's save the data for later reuse:", "pickle_file = os.path.join(data_root, 'notMNIST.pickle')\n\ntry:\n f = open(pickle_file, 'wb')\n save = {\n 'train_dataset': train_dataset,\n 'train_labels': train_labels,\n 'valid_dataset': valid_dataset,\n 'valid_labels': valid_labels,\n 'test_dataset': test_dataset,\n 'test_labels': test_labels,\n }\n pickle.dump(save, f, pickle.HIGHEST_PROTOCOL)\n f.close()\nexcept Exception as e:\n print('Unable to save data to', pickle_file, ':', e)\n raise\n\nstatinfo = os.stat(pickle_file)\nprint('Compressed pickle size:', statinfo.st_size)", "Problem 5\nBy construction, this dataset might contain a lot of overlapping samples, including training data that's also contained in the validation and test set! Overlap between training and test can skew the results if you expect to use your model in an environment where there is never an overlap, but are actually ok if you expect to see training samples recur when you use it.\nMeasure how much overlap there is between training, validation and test samples.\nOptional questions:\n- What about near duplicates between datasets? (images that are almost identical)\n- Create a sanitized validation and test set, and compare your accuracy on those in subsequent assignments.\n\n\nProblem 6\nLet's get an idea of what an off-the-shelf classifier can give you on this data. It's always good to check that there is something to learn, and that it's a problem that is not so trivial that a canned solution solves it.\nTrain a simple model on this data using 50, 100, 1000 and 5000 training samples. Hint: you can use the LogisticRegression model from sklearn.linear_model.\nOptional question: train an off-the-shelf model on all the data!", "logreg_model_clf = LogisticRegression()\nnsamples, nx, ny = train_dataset.shape\nd2_train_dataset = train_dataset.reshape((nsamples,nx*ny))\nlogreg_model_clf.fit(d2_train_dataset, train_labels)\nfrom sklearn.metrics import accuracy_score\nnsamples, nx, ny = valid_dataset.shape\nd2_valid_dataset = valid_dataset.reshape((nsamples,nx*ny))\nprint(\"validation accuracy,\", accuracy_score(valid_labels, logreg_model_clf.predict(d2_valid_dataset)))\nnsamples, nx, ny = test_dataset.shape\nd2_train_dataset = test_dataset.reshape((nsamples,nx*ny))\nprint(\"test accuracy,\", accuracy_score(test_labels, logreg_model_clf.predict(d2_train_dataset)))" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
mattgiguere/doglodge
code/.ipynb_checkpoints/bf_qt_scraping-checkpoint.ipynb
mit
[ "bf_qt_scraping\nThis notebook describes how hotel data can be scraped using PyQT.\nThe items we want to extract are:\n- the hotels for a given city\n- links to each hotel page\n- text hotel summary\n- text hotel description\nOnce the links for each hotel are determined, I then want to extract the following items pertaining to each review:\n- title\n- author\n- text\n- rating", "import sys \nfrom PyQt4.QtGui import * \nfrom PyQt4.QtCore import * \nfrom PyQt4.QtWebKit import * \nfrom lxml import html \n\nclass Render(QWebPage): \n def __init__(self, url): \n self.app = QApplication(sys.argv) \n QWebPage.__init__(self) \n self.loadFinished.connect(self._loadFinished) \n self.mainFrame().load(QUrl(url)) \n self.app.exec_() \n\n def _loadFinished(self, result): \n self.frame = self.mainFrame() \n self.app.quit() \n \n def update_url(self, url):\n self.mainFrame().load(QUrl(url)) \n self.app.exec_() \n \n\nurl = 'http://www.bringfido.com/lodging/city/new_haven_ct_us' \n#This does the magic.Loads everything\nr = Render(url) \n#result is a QString.\nresult = r.frame.toHtml()\n\n# result\n\n#QString should be converted to string before processed by lxml\nformatted_result = str(result.toAscii())\n\n#Next build lxml tree from formatted_result\ntree = html.fromstring(formatted_result)\n\ntree.text_content\n\n#Now using correct Xpath we are fetching URL of archives\narchive_links = tree.xpath('//*[@id=\"results_list\"]/div')\nprint archive_links\n\nurl = 'http://pycoders.com/archive/' \nr = Render(url) \nresult = r.frame.toHtml()\n\n#QString should be converted to string before processed by lxml\nformatted_result = str(result.toAscii())\n\ntree = html.fromstring(formatted_result)\n\n#Now using correct Xpath we are fetching URL of archives\narchive_links = tree.xpath('//*[@class=\"campaign\"]/a/@href')\n\n# for lnk in archive_links:\n# print(lnk)", "Now the Hotels", "url = 'http://www.bringfido.com/lodging/city/new_haven_ct_us' \nr = Render(url) \nresult = r.frame.toHtml()\n\n#QString should be converted to string before processed by lxml\nformatted_result = str(result.toAscii())\n\ntree = html.fromstring(formatted_result)\n\n#Now using correct Xpath we are fetching URL of archives\narchive_links = tree.xpath('//*[@id=\"results_list\"]/div')\n\nprint(archive_links)\nprint('')\n\nfor lnk in archive_links:\n print(lnk.xpath('div[2]/h1/a/text()')[0])\n print(lnk.text_content())\n print('*'*25)\n", "Now Get the Links", "links = []\nfor lnk in archive_links:\n print(lnk.xpath('div/h1/a/@href')[0])\n links.append(lnk.xpath('div/h1/a/@href')[0])\n print('*'*25)\n\nlnk.xpath('//*/div/h1/a/@href')[0]\n\nlinks", "Loading Reviews\nNext, we want to step through each page, and scrape the reviews for each hotel.", "url_base = 'http://www.bringfido.com' \nr.update_url(url_base+links[0]) \nresult = r.frame.toHtml()\n\n#QString should be converted to string before processed by lxml\nformatted_result = str(result.toAscii())\n\ntree = html.fromstring(formatted_result)\n\nhotel_description = tree.xpath('//*[@class=\"body\"]/text()')\n\ndetails = tree.xpath('//*[@class=\"address\"]/text()')\n\naddress = details[0]\ncsczip = details[1]\nphone = details[2]\n\n#Now using correct Xpath we are fetching URL of archives\nreviews = tree.xpath('//*[@class=\"review_container\"]')\n\ntexts = []\ntitles = []\nauthors = []\nratings = []\n\nprint(reviews)\nprint('')\nfor rev in reviews:\n titles.append(rev.xpath('div/div[1]/text()')[0])\n authors.append(rev.xpath('div/div[2]/text()')[0])\n texts.append(rev.xpath('div/div[3]/text()')[0])\n ratings.append(rev.xpath('div[2]/img/@src')[0].split('/')[-1][0:1])\n print(rev.xpath('div[2]/img/@src')[0].split('/')[-1][0:1])\n\n\ntitles\n\nauthors\n\ntexts\n\nratings" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
tritemio/multispot_paper
out_notebooks/usALEX-5samples-E-corrected-all-ph-out-12d.ipynb
mit
[ "Executed: Mon Mar 27 11:39:24 2017\nDuration: 7 seconds.\nusALEX-5samples - Template\n\nThis notebook is executed through 8-spots paper analysis.\nFor a direct execution, uncomment the cell below.", "ph_sel_name = \"None\"\n\ndata_id = \"12d\"\n\n# data_id = \"7d\"", "Load software and filenames definitions", "from fretbursts import *\n\ninit_notebook()\nfrom IPython.display import display", "Data folder:", "data_dir = './data/singlespot/'\n\nimport os\ndata_dir = os.path.abspath(data_dir) + '/'\nassert os.path.exists(data_dir), \"Path '%s' does not exist.\" % data_dir", "List of data files:", "from glob import glob\nfile_list = sorted(f for f in glob(data_dir + '*.hdf5') if '_BKG' not in f)\n## Selection for POLIMI 2012-11-26 datatset\nlabels = ['17d', '27d', '7d', '12d', '22d']\nfiles_dict = {lab: fname for lab, fname in zip(labels, file_list)}\nfiles_dict\n\ndata_id", "Data load\nInitial loading of the data:", "d = loader.photon_hdf5(filename=files_dict[data_id])", "Load the leakage coefficient from disk:", "leakage_coeff_fname = 'results/usALEX - leakage coefficient DexDem.csv'\nleakage = np.loadtxt(leakage_coeff_fname)\n\nprint('Leakage coefficient:', leakage)", "Load the direct excitation coefficient ($d_{exAA}$) from disk:", "dir_ex_coeff_fname = 'results/usALEX - direct excitation coefficient dir_ex_aa.csv'\ndir_ex_aa = np.loadtxt(dir_ex_coeff_fname)\n\nprint('Direct excitation coefficient (dir_ex_aa):', dir_ex_aa)", "Load the gamma-factor ($\\gamma$) from disk:", "gamma_fname = 'results/usALEX - gamma factor - all-ph.csv'\ngamma = np.loadtxt(gamma_fname)\n\nprint('Gamma-factor:', gamma)", "Update d with the correction coefficients:", "d.leakage = leakage\nd.dir_ex = dir_ex_aa\nd.gamma = gamma", "Laser alternation selection\nAt this point we have only the timestamps and the detector numbers:", "d.ph_times_t[0][:3], d.ph_times_t[0][-3:]#, d.det_t\n\nprint('First and last timestamps: {:10,} {:10,}'.format(d.ph_times_t[0][0], d.ph_times_t[0][-1]))\nprint('Total number of timestamps: {:10,}'.format(d.ph_times_t[0].size))", "We need to define some parameters: donor and acceptor ch, excitation period and donor and acceptor excitiations:", "d.add(det_donor_accept=(0, 1), alex_period=4000, D_ON=(2850, 580), A_ON=(900, 2580), offset=0)", "We should check if everithing is OK with an alternation histogram:", "plot_alternation_hist(d)", "If the plot looks good we can apply the parameters with:", "loader.alex_apply_period(d)\n\nprint('D+A photons in D-excitation period: {:10,}'.format(d.D_ex[0].sum()))\nprint('D+A photons in A-excitation period: {:10,}'.format(d.A_ex[0].sum()))", "Measurements infos\nAll the measurement data is in the d variable. We can print it:", "d", "Or check the measurements duration:", "d.time_max", "Compute background\nCompute the background using automatic threshold:", "d.calc_bg(bg.exp_fit, time_s=60, tail_min_us='auto', F_bg=1.7)\n\ndplot(d, timetrace_bg)\n\nd.rate_m, d.rate_dd, d.rate_ad, d.rate_aa", "Burst search and selection", "d.burst_search(L=10, m=10, F=7, ph_sel=Ph_sel('all'))\n\nprint(d.ph_sel)\ndplot(d, hist_fret);\n\n# if data_id in ['7d', '27d']:\n# ds = d.select_bursts(select_bursts.size, th1=20)\n# else:\n# ds = d.select_bursts(select_bursts.size, th1=30)\n\nds = d.select_bursts(select_bursts.size, add_naa=False, th1=30)\n\nn_bursts_all = ds.num_bursts[0]\n\ndef select_and_plot_ES(fret_sel, do_sel):\n ds_fret= ds.select_bursts(select_bursts.ES, **fret_sel)\n ds_do = ds.select_bursts(select_bursts.ES, **do_sel)\n bpl.plot_ES_selection(ax, **fret_sel)\n bpl.plot_ES_selection(ax, **do_sel) \n return ds_fret, ds_do\n\nax = dplot(ds, hist2d_alex, S_max_norm=2, scatter_alpha=0.1)\n\nif data_id == '7d':\n fret_sel = dict(E1=0.60, E2=1.2, S1=0.2, S2=0.9, rect=False)\n do_sel = dict(E1=-0.2, E2=0.5, S1=0.8, S2=2, rect=True) \n ds_fret, ds_do = select_and_plot_ES(fret_sel, do_sel)\n \nelif data_id == '12d':\n fret_sel = dict(E1=0.30,E2=1.2,S1=0.131,S2=0.9, rect=False)\n do_sel = dict(E1=-0.4, E2=0.4, S1=0.8, S2=2, rect=False)\n ds_fret, ds_do = select_and_plot_ES(fret_sel, do_sel)\n\nelif data_id == '17d':\n fret_sel = dict(E1=0.01, E2=0.98, S1=0.14, S2=0.88, rect=False)\n do_sel = dict(E1=-0.4, E2=0.4, S1=0.80, S2=2, rect=False)\n ds_fret, ds_do = select_and_plot_ES(fret_sel, do_sel)\n\nelif data_id == '22d':\n fret_sel = dict(E1=-0.16, E2=0.6, S1=0.2, S2=0.80, rect=False)\n do_sel = dict(E1=-0.2, E2=0.4, S1=0.85, S2=2, rect=True)\n ds_fret, ds_do = select_and_plot_ES(fret_sel, do_sel) \n\nelif data_id == '27d':\n fret_sel = dict(E1=-0.1, E2=0.5, S1=0.2, S2=0.82, rect=False)\n do_sel = dict(E1=-0.2, E2=0.4, S1=0.88, S2=2, rect=True)\n ds_fret, ds_do = select_and_plot_ES(fret_sel, do_sel) \n\nn_bursts_do = ds_do.num_bursts[0]\nn_bursts_fret = ds_fret.num_bursts[0]\n\nn_bursts_do, n_bursts_fret\n\nd_only_frac = 1.*n_bursts_do/(n_bursts_do + n_bursts_fret)\nprint('D-only fraction:', d_only_frac)\n\ndplot(ds_fret, hist2d_alex, scatter_alpha=0.1);\n\ndplot(ds_do, hist2d_alex, S_max_norm=2, scatter=False);", "Donor Leakage fit", "bandwidth = 0.03\n\nE_range_do = (-0.1, 0.15)\nE_ax = np.r_[-0.2:0.401:0.0002]\n\nE_pr_do_kde = bext.fit_bursts_kde_peak(ds_do, bandwidth=bandwidth, weights='size', \n x_range=E_range_do, x_ax=E_ax, save_fitter=True)\n\nmfit.plot_mfit(ds_do.E_fitter, plot_kde=True, bins=np.r_[E_ax.min(): E_ax.max(): bandwidth])\nplt.xlim(-0.3, 0.5)\nprint(\"%s: E_peak = %.2f%%\" % (ds.ph_sel, E_pr_do_kde*100))", "Burst sizes", "nt_th1 = 50\n\ndplot(ds_fret, hist_size, which='all', add_naa=False)\nxlim(-0, 250)\nplt.axvline(nt_th1)\n\nTh_nt = np.arange(35, 120)\nnt_th = np.zeros(Th_nt.size)\nfor i, th in enumerate(Th_nt):\n ds_nt = ds_fret.select_bursts(select_bursts.size, th1=th)\n nt_th[i] = (ds_nt.nd[0] + ds_nt.na[0]).mean() - th\n\nplt.figure()\nplot(Th_nt, nt_th)\nplt.axvline(nt_th1)\n\nnt_mean = nt_th[np.where(Th_nt == nt_th1)][0]\nnt_mean", "Fret fit\nMax position of the Kernel Density Estimation (KDE):", "E_pr_fret_kde = bext.fit_bursts_kde_peak(ds_fret, bandwidth=bandwidth, weights='size')\nE_fitter = ds_fret.E_fitter\n\nE_fitter.histogram(bins=np.r_[-0.1:1.1:0.03])\nE_fitter.fit_histogram(mfit.factory_gaussian(center=0.5))\n\nE_fitter.fit_res[0].params.pretty_print()\n\nfig, ax = plt.subplots(1, 2, figsize=(14, 4.5))\nmfit.plot_mfit(E_fitter, ax=ax[0])\nmfit.plot_mfit(E_fitter, plot_model=False, plot_kde=True, ax=ax[1])\nprint('%s\\nKDE peak %.2f ' % (ds_fret.ph_sel, E_pr_fret_kde*100))\ndisplay(E_fitter.params*100)", "Weighted mean of $E$ of each burst:", "ds_fret.fit_E_m(weights='size')", "Gaussian fit (no weights):", "ds_fret.fit_E_generic(fit_fun=bl.gaussian_fit_hist, bins=np.r_[-0.1:1.1:0.03], weights=None)", "Gaussian fit (using burst size as weights):", "ds_fret.fit_E_generic(fit_fun=bl.gaussian_fit_hist, bins=np.r_[-0.1:1.1:0.005], weights='size')\n\nE_kde_w = E_fitter.kde_max_pos[0]\nE_gauss_w = E_fitter.params.loc[0, 'center']\nE_gauss_w_sig = E_fitter.params.loc[0, 'sigma']\nE_gauss_w_err = float(E_gauss_w_sig/np.sqrt(ds_fret.num_bursts[0]))\nE_gauss_w_fiterr = E_fitter.fit_res[0].params['center'].stderr\nE_kde_w, E_gauss_w, E_gauss_w_sig, E_gauss_w_err, E_gauss_w_fiterr", "Stoichiometry fit\nMax position of the Kernel Density Estimation (KDE):", "S_pr_fret_kde = bext.fit_bursts_kde_peak(ds_fret, burst_data='S', bandwidth=0.03) #weights='size', add_naa=True)\nS_fitter = ds_fret.S_fitter\n\nS_fitter.histogram(bins=np.r_[-0.1:1.1:0.03])\nS_fitter.fit_histogram(mfit.factory_gaussian(), center=0.5)\n\nfig, ax = plt.subplots(1, 2, figsize=(14, 4.5))\nmfit.plot_mfit(S_fitter, ax=ax[0])\nmfit.plot_mfit(S_fitter, plot_model=False, plot_kde=True, ax=ax[1])\nprint('%s\\nKDE peak %.2f ' % (ds_fret.ph_sel, S_pr_fret_kde*100))\ndisplay(S_fitter.params*100)\n\nS_kde = S_fitter.kde_max_pos[0]\nS_gauss = S_fitter.params.loc[0, 'center']\nS_gauss_sig = S_fitter.params.loc[0, 'sigma']\nS_gauss_err = float(S_gauss_sig/np.sqrt(ds_fret.num_bursts[0]))\nS_gauss_fiterr = S_fitter.fit_res[0].params['center'].stderr\nS_kde, S_gauss, S_gauss_sig, S_gauss_err, S_gauss_fiterr", "The Maximum likelihood fit for a Gaussian population is the mean:", "S = ds_fret.S[0]\nS_ml_fit = (S.mean(), S.std())\nS_ml_fit", "Computing the weighted mean and weighted standard deviation we get:", "weights = bl.fret_fit.get_weights(ds_fret.nd[0], ds_fret.na[0], weights='size', naa=ds_fret.naa[0], gamma=1.)\nS_mean = np.dot(weights, S)/weights.sum()\nS_std_dev = np.sqrt(\n np.dot(weights, (S - S_mean)**2)/weights.sum())\nS_wmean_fit = [S_mean, S_std_dev]\nS_wmean_fit", "Save data to file", "sample = data_id", "The following string contains the list of variables to be saved. When saving, the order of the variables is preserved.", "variables = ('sample n_bursts_all n_bursts_do n_bursts_fret '\n 'E_kde_w E_gauss_w E_gauss_w_sig E_gauss_w_err E_gauss_w_fiterr '\n 'S_kde S_gauss S_gauss_sig S_gauss_err S_gauss_fiterr '\n 'E_pr_do_kde nt_mean\\n')", "This is just a trick to format the different variables:", "variables_csv = variables.replace(' ', ',')\nfmt_float = '{%s:.6f}'\nfmt_int = '{%s:d}'\nfmt_str = '{%s}'\nfmt_dict = {**{'sample': fmt_str}, \n **{k: fmt_int for k in variables.split() if k.startswith('n_bursts')}}\nvar_dict = {name: eval(name) for name in variables.split()}\nvar_fmt = ', '.join([fmt_dict.get(name, fmt_float) % name for name in variables.split()]) + '\\n'\ndata_str = var_fmt.format(**var_dict)\n\nprint(variables_csv)\nprint(data_str)\n\n# NOTE: The file name should be the notebook name but with .csv extension\nwith open('results/usALEX-5samples-E-corrected-all-ph.csv', 'a') as f:\n f.seek(0, 2)\n if f.tell() == 0:\n f.write(variables_csv)\n f.write(data_str)" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
tritemio/multispot_paper
out_notebooks/usALEX-5samples-PR-raw-dir_ex_aa-fit-out-AexAem-17d.ipynb
mit
[ "Executed: Mon Mar 27 11:38:07 2017\nDuration: 10 seconds.\nusALEX-5samples - Template\n\nThis notebook is executed through 8-spots paper analysis.\nFor a direct execution, uncomment the cell below.", "ph_sel_name = \"AexAem\"\n\ndata_id = \"17d\"\n\n# ph_sel_name = \"all-ph\"\n# data_id = \"7d\"", "Load software and filenames definitions", "from fretbursts import *\n\ninit_notebook()\nfrom IPython.display import display", "Data folder:", "data_dir = './data/singlespot/'", "Check that the folder exists:", "import os\ndata_dir = os.path.abspath(data_dir) + '/'\nassert os.path.exists(data_dir), \"Path '%s' does not exist.\" % data_dir", "List of data files in data_dir:", "from glob import glob\n\nfile_list = sorted(f for f in glob(data_dir + '*.hdf5') if '_BKG' not in f)\nfile_list\n\n## Selection for POLIMI 2012-12-6 dataset\n# file_list.pop(2)\n# file_list = file_list[1:-2]\n# display(file_list)\n# labels = ['22d', '27d', '17d', '12d', '7d']\n\n## Selection for P.E. 2012-12-6 dataset\n# file_list.pop(1)\n# file_list = file_list[:-1]\n# display(file_list)\n# labels = ['22d', '27d', '17d', '12d', '7d']\n\n## Selection for POLIMI 2012-11-26 datatset\nlabels = ['17d', '27d', '7d', '12d', '22d']\n\nfiles_dict = {lab: fname for lab, fname in zip(labels, file_list)}\nfiles_dict\n\nph_sel_map = {'all-ph': Ph_sel('all'), 'AexAem': Ph_sel(Aex='Aem')}\nph_sel = ph_sel_map[ph_sel_name]\n\ndata_id, ph_sel_name", "Data load\nInitial loading of the data:", "d = loader.photon_hdf5(filename=files_dict[data_id])", "Laser alternation selection\nAt this point we have only the timestamps and the detector numbers:", "d.ph_times_t, d.det_t", "We need to define some parameters: donor and acceptor ch, excitation period and donor and acceptor excitiations:", "d.add(det_donor_accept=(0, 1), alex_period=4000, D_ON=(2850, 580), A_ON=(900, 2580), offset=0)", "We should check if everithing is OK with an alternation histogram:", "plot_alternation_hist(d)", "If the plot looks good we can apply the parameters with:", "loader.alex_apply_period(d)", "Measurements infos\nAll the measurement data is in the d variable. We can print it:", "d", "Or check the measurements duration:", "d.time_max", "Compute background\nCompute the background using automatic threshold:", "d.calc_bg(bg.exp_fit, time_s=60, tail_min_us='auto', F_bg=1.7)\n\ndplot(d, timetrace_bg)\n\nd.rate_m, d.rate_dd, d.rate_ad, d.rate_aa", "Burst search and selection", "from mpl_toolkits.axes_grid1 import AxesGrid\nimport lmfit\nprint('lmfit version:', lmfit.__version__)\n\nassert d.dir_ex == 0\nassert d.leakage == 0\n\nd.burst_search(m=10, F=6, ph_sel=ph_sel)\n\nprint(d.ph_sel, d.num_bursts)\n\nds_sa = d.select_bursts(select_bursts.naa, th1=30)\nds_sa.num_bursts", "Preliminary selection and plots", "mask = (d.naa[0] - np.abs(d.na[0] + d.nd[0])) > 30\nds_saw = d.select_bursts_mask_apply([mask])\n\nds_sas0 = ds_sa.select_bursts(select_bursts.S, S2=0.10)\nds_sas = ds_sa.select_bursts(select_bursts.S, S2=0.15)\nds_sas2 = ds_sa.select_bursts(select_bursts.S, S2=0.20)\nds_sas3 = ds_sa.select_bursts(select_bursts.S, S2=0.25)\n\nds_st = d.select_bursts(select_bursts.size, add_naa=True, th1=30)\nds_sas.num_bursts\n\ndx = ds_sas0\nsize = dx.na[0] + dx.nd[0]\ns_hist, s_bins = np.histogram(size, bins=np.r_[-15 : 25 : 1], density=True)\ns_ax = s_bins[:-1] + 0.5*(s_bins[1] - s_bins[0])\nplot(s_ax, s_hist, '-o', alpha=0.5)\n\ndx = ds_sas\nsize = dx.na[0] + dx.nd[0]\ns_hist, s_bins = np.histogram(size, bins=np.r_[-15 : 25 : 1], density=True)\ns_ax = s_bins[:-1] + 0.5*(s_bins[1] - s_bins[0])\nplot(s_ax, s_hist, '-o', alpha=0.5)\n\ndx = ds_sas2\nsize = dx.na[0] + dx.nd[0]\ns_hist, s_bins = np.histogram(size, bins=np.r_[-15 : 25 : 1], density=True)\ns_ax = s_bins[:-1] + 0.5*(s_bins[1] - s_bins[0])\nplot(s_ax, s_hist, '-o', alpha=0.5)\n\ndx = ds_sas3\nsize = dx.na[0] + dx.nd[0]\ns_hist, s_bins = np.histogram(size, bins=np.r_[-15 : 25 : 1], density=True)\ns_ax = s_bins[:-1] + 0.5*(s_bins[1] - s_bins[0])\nplot(s_ax, s_hist, '-o', alpha=0.5)\n\nplt.title('(nd + na) for A-only population using different S cutoff');\n\ndx = ds_sa\n\nalex_jointplot(dx);\n\ndplot(ds_sa, hist_S)", "A-direct excitation fitting\nTo extract the A-direct excitation coefficient we need to fit the \nS values for the A-only population.\nThe S value for the A-only population is fitted with different methods:\n- Histogram git with 2 Gaussians or with 2 asymmetric Gaussians \n(an asymmetric Gaussian has right- and left-side of the peak\ndecreasing according to different sigmas).\n- KDE maximum\nIn the following we apply these methods using different selection\nor weighting schemes to reduce amount of FRET population and make\nfitting of the A-only population easier.\nEven selection\nHere A-only and FRET population are evenly selected.", "dx = ds_sa\n\nbin_width = 0.03\nbandwidth = 0.03\nbins = np.r_[-0.2 : 1 : bin_width]\nx_kde = np.arange(bins.min(), bins.max(), 0.0002)\n\n## Weights\nweights = None\n\n## Histogram fit\nfitter_g = mfit.MultiFitter(dx.S)\nfitter_g.histogram(bins=np.r_[-0.2 : 1.2 : bandwidth])\nfitter_g.fit_histogram(model = mfit.factory_two_gaussians(p1_center=0.1, p2_center=0.4))\nS_hist_orig = fitter_g.hist_pdf\n\nS_2peaks = fitter_g.params.loc[0, 'p1_center']\ndir_ex_S2p = S_2peaks/(1 - S_2peaks)\nprint('Fitted direct excitation (na/naa) [2-Gauss]:', dir_ex_S2p)\n\n## KDE\nfitter_g.calc_kde(bandwidth=bandwidth)\nfitter_g.find_kde_max(x_kde, xmin=0, xmax=0.15)\n\nS_peak = fitter_g.kde_max_pos[0]\ndir_ex_S_kde = S_peak/(1 - S_peak)\nprint('Fitted direct excitation (na/naa) [KDE]: ', dir_ex_S_kde)\n\nfig, ax = plt.subplots(1, 2, figsize=(14, 4.5))\n\nmfit.plot_mfit(fitter_g, ax=ax[0])\nax[0].set_title('2-Gaussians fit (S_fit = %.2f %%)' % (S_2peaks*100))\n\nmfit.plot_mfit(fitter_g, ax=ax[1], plot_model=False, plot_kde=True)\nax[1].set_title('KDE fit (S_fit = %.2f %%)' % (S_peak*100));\n\n## 2-Asym-Gaussian\nfitter_ag = mfit.MultiFitter(dx.S)\nfitter_ag.histogram(bins=np.r_[-0.2 : 1.2 : bandwidth])\nfitter_ag.fit_histogram(model = mfit.factory_two_asym_gaussians(p1_center=0.1, p2_center=0.4))\n#print(fitter_ag.fit_obj[0].model.fit_report())\n\nS_2peaks_a = fitter_ag.params.loc[0, 'p1_center']\ndir_ex_S2pa = S_2peaks_a/(1 - S_2peaks_a)\nprint('Fitted direct excitation (na/naa) [2-Gauss]:', dir_ex_S2pa)\n\nfig, ax = plt.subplots(1, 2, figsize=(14, 4.5))\n\nmfit.plot_mfit(fitter_g, ax=ax[0])\nax[0].set_title('2-Gaussians fit (S_fit = %.2f %%)' % (S_2peaks*100))\n\nmfit.plot_mfit(fitter_ag, ax=ax[1])\nax[1].set_title('2-Asym-Gaussians fit (S_fit = %.2f %%)' % (S_2peaks_a*100));", "Zero threshold on nd\nSelect bursts with:\n$$n_d < 0$$.", "dx = ds_sa.select_bursts(select_bursts.nd, th1=-100, th2=0)\n\nfitter = bext.bursts_fitter(dx, 'S')\nfitter.fit_histogram(model = mfit.factory_gaussian(center=0.1))\nS_1peaks_th = fitter.params.loc[0, 'center']\ndir_ex_S1p = S_1peaks_th/(1 - S_1peaks_th)\nprint('Fitted direct excitation (na/naa) [2-Gauss]:', dir_ex_S1p)\n\nmfit.plot_mfit(fitter)\nplt.xlim(-0.1, 0.6)", "Selection 1\nBursts are weighted using $w = f(S)$, where the function $f(S)$ is a\nGaussian fitted to the $S$ histogram of the FRET population.", "dx = ds_sa\n\n## Weights\nweights = 1 - mfit.gaussian(dx.S[0], fitter_g.params.loc[0, 'p2_center'], fitter_g.params.loc[0, 'p2_sigma'])\nweights[dx.S[0] >= fitter_g.params.loc[0, 'p2_center']] = 0\n\n## Histogram fit\nfitter_w1 = mfit.MultiFitter(dx.S)\nfitter_w1.weights = [weights]\nfitter_w1.histogram(bins=np.r_[-0.2 : 1.2 : bandwidth])\nfitter_w1.fit_histogram(model = mfit.factory_two_gaussians(p1_center=0.1, p2_center=0.4))\nS_2peaks_w1 = fitter_w1.params.loc[0, 'p1_center']\ndir_ex_S2p_w1 = S_2peaks_w1/(1 - S_2peaks_w1)\nprint('Fitted direct excitation (na/naa) [2-Gauss]:', dir_ex_S2p_w1)\n\n## KDE\nfitter_w1.calc_kde(bandwidth=bandwidth)\nfitter_w1.find_kde_max(x_kde, xmin=0, xmax=0.15)\nS_peak_w1 = fitter_w1.kde_max_pos[0]\ndir_ex_S_kde_w1 = S_peak_w1/(1 - S_peak_w1)\nprint('Fitted direct excitation (na/naa) [KDE]: ', dir_ex_S_kde_w1)\n\ndef plot_weights(x, weights, ax):\n ax2 = ax.twinx()\n x_sort = x.argsort()\n ax2.plot(x[x_sort], weights[x_sort], color='k', lw=4, alpha=0.4)\n ax2.set_ylabel('Weights');\n\nfig, ax = plt.subplots(1, 2, figsize=(14, 4.5))\nmfit.plot_mfit(fitter_w1, ax=ax[0])\nmfit.plot_mfit(fitter_g, ax=ax[0], plot_model=False, plot_kde=False)\nplot_weights(dx.S[0], weights, ax=ax[0])\nax[0].set_title('2-Gaussians fit (S_fit = %.2f %%)' % (S_2peaks_w1*100))\n\nmfit.plot_mfit(fitter_w1, ax=ax[1], plot_model=False, plot_kde=True)\nmfit.plot_mfit(fitter_g, ax=ax[1], plot_model=False, plot_kde=False)\nplot_weights(dx.S[0], weights, ax=ax[1])\nax[1].set_title('KDE fit (S_fit = %.2f %%)' % (S_peak_w1*100));", "Selection 2\nBursts are here weighted using weights $w$:\n$$w = n_{aa} - |n_a + n_d|$$", "## Weights\nsizes = dx.nd[0] + dx.na[0] #- dir_ex_S_kde_w3*dx.naa[0]\nweights = dx.naa[0] - abs(sizes)\nweights[weights < 0] = 0\n\n## Histogram\nfitter_w4 = mfit.MultiFitter(dx.S)\nfitter_w4.weights = [weights]\nfitter_w4.histogram(bins=np.r_[-0.2 : 1.2 : bandwidth])\nfitter_w4.fit_histogram(model = mfit.factory_two_gaussians(p1_center=0.1, p2_center=0.4))\nS_2peaks_w4 = fitter_w4.params.loc[0, 'p1_center']\ndir_ex_S2p_w4 = S_2peaks_w4/(1 - S_2peaks_w4)\nprint('Fitted direct excitation (na/naa) [2-Gauss]:', dir_ex_S2p_w4)\n\n## KDE\nfitter_w4.calc_kde(bandwidth=bandwidth)\nfitter_w4.find_kde_max(x_kde, xmin=0, xmax=0.15)\nS_peak_w4 = fitter_w4.kde_max_pos[0]\ndir_ex_S_kde_w4 = S_peak_w4/(1 - S_peak_w4)\nprint('Fitted direct excitation (na/naa) [KDE]: ', dir_ex_S_kde_w4)\n\nfig, ax = plt.subplots(1, 2, figsize=(14, 4.5))\n\nmfit.plot_mfit(fitter_w4, ax=ax[0])\nmfit.plot_mfit(fitter_g, ax=ax[0], plot_model=False, plot_kde=False)\n#plot_weights(dx.S[0], weights, ax=ax[0])\nax[0].set_title('2-Gaussians fit (S_fit = %.2f %%)' % (S_2peaks_w4*100))\n\nmfit.plot_mfit(fitter_w4, ax=ax[1], plot_model=False, plot_kde=True)\nmfit.plot_mfit(fitter_g, ax=ax[1], plot_model=False, plot_kde=False)\n#plot_weights(dx.S[0], weights, ax=ax[1])\nax[1].set_title('KDE fit (S_fit = %.2f %%)' % (S_peak_w4*100));", "Selection 3\nBursts are here selected according to:\n$$n_{aa} - |n_a + n_d| > 30$$", "mask = (d.naa[0] - np.abs(d.na[0] + d.nd[0])) > 30\nds_saw = d.select_bursts_mask_apply([mask])\nprint(ds_saw.num_bursts)\n\ndx = ds_saw\n\n## Weights\nweights = None\n\n## 2-Gaussians\nfitter_w5 = mfit.MultiFitter(dx.S)\nfitter_w5.histogram(bins=np.r_[-0.2 : 1.2 : bandwidth])\nfitter_w5.fit_histogram(model = mfit.factory_two_gaussians(p1_center=0.1, p2_center=0.4))\nS_2peaks_w5 = fitter_w5.params.loc[0, 'p1_center']\ndir_ex_S2p_w5 = S_2peaks_w5/(1 - S_2peaks_w5)\nprint('Fitted direct excitation (na/naa) [2-Gauss]:', dir_ex_S2p_w5)\n\n## KDE\nfitter_w5.calc_kde(bandwidth=bandwidth)\nfitter_w5.find_kde_max(x_kde, xmin=0, xmax=0.15)\nS_peak_w5 = fitter_w5.kde_max_pos[0]\nS_2peaks_w5_fiterr = fitter_w5.fit_res[0].params['p1_center'].stderr\ndir_ex_S_kde_w5 = S_peak_w5/(1 - S_peak_w5)\nprint('Fitted direct excitation (na/naa) [KDE]: ', dir_ex_S_kde_w5)\n\n## 2-Asym-Gaussians\nfitter_w5a = mfit.MultiFitter(dx.S)\nfitter_w5a.histogram(bins=np.r_[-0.2 : 1.2 : bandwidth])\nfitter_w5a.fit_histogram(model = mfit.factory_two_asym_gaussians(p1_center=0.05, p2_center=0.3))\nS_2peaks_w5a = fitter_w5a.params.loc[0, 'p1_center']\ndir_ex_S2p_w5a = S_2peaks_w5a/(1 - S_2peaks_w5a)\n#print(fitter_w5a.fit_obj[0].model.fit_report(min_correl=0.5))\nprint('Fitted direct excitation (na/naa) [2-Asym-Gauss]:', dir_ex_S2p_w5a)\n\nfig, ax = plt.subplots(1, 3, figsize=(19, 4.5))\n\nmfit.plot_mfit(fitter_w5, ax=ax[0])\nmfit.plot_mfit(fitter_g, ax=ax[0], plot_model=False, plot_kde=False)\nax[0].set_title('2-Gaussians fit (S_fit = %.2f %%)' % (S_2peaks_w5*100))\n\nmfit.plot_mfit(fitter_w5, ax=ax[1], plot_model=False, plot_kde=True)\nmfit.plot_mfit(fitter_g, ax=ax[1], plot_model=False, plot_kde=False)\nax[1].set_title('KDE fit (S_fit = %.2f %%)' % (S_peak_w5*100));\n\nmfit.plot_mfit(fitter_w5a, ax=ax[2])\nmfit.plot_mfit(fitter_g, ax=ax[2], plot_model=False, plot_kde=False)\nax[2].set_title('2-Asym-Gaussians fit (S_fit = %.2f %%)' % (S_2peaks_w5a*100));", "Save data to file", "sample = data_id\nn_bursts_aa = ds_sas.num_bursts[0]", "The following string contains the list of variables to be saved. When saving, the order of the variables is preserved.", "variables = ('sample n_bursts_aa dir_ex_S1p dir_ex_S_kde dir_ex_S2p dir_ex_S2pa '\n 'dir_ex_S2p_w1 dir_ex_S_kde_w1 dir_ex_S_kde_w4 dir_ex_S_kde_w5 dir_ex_S2p_w5 dir_ex_S2p_w5a '\n 'S_2peaks_w5 S_2peaks_w5_fiterr\\n')", "This is just a trick to format the different variables:", "variables_csv = variables.replace(' ', ',')\nfmt_float = '{%s:.6f}'\nfmt_int = '{%s:d}'\nfmt_str = '{%s}'\nfmt_dict = {**{'sample': fmt_str}, \n **{k: fmt_int for k in variables.split() if k.startswith('n_bursts')}}\nvar_dict = {name: eval(name) for name in variables.split()}\nvar_fmt = ', '.join([fmt_dict.get(name, fmt_float) % name for name in variables.split()]) + '\\n'\ndata_str = var_fmt.format(**var_dict)\n\nprint(variables_csv)\nprint(data_str)\n\n# NOTE: The file name should be the notebook name but with .csv extension\nwith open('results/usALEX-5samples-PR-raw-dir_ex_aa-fit-%s.csv' % ph_sel_name, 'a') as f:\n f.seek(0, 2)\n if f.tell() == 0:\n f.write(variables_csv)\n f.write(data_str)" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
Juan-Mateos/coll_int_ai_case
notebooks/ml_topic_analysis_exploration.ipynb
mit
[ "Prototype pipeline for the analysis of ML arxiv data\nWe query arxiv to get papers, and then run them against Crossref event data to find social media discussion and Microsoft Academic Knowledge to find institutional affiliations\n```\nQuery Arxiv -> Paper repository -> Analysis -> Topic model -> Classify\n | |\n | |----> Social network analysis of researchers\n | |----> Geocoding of institutions (via GRID?)\n |\n Extract author data from Google Scholar ----> Geocode institution via Google Places API?\n | |\n Enrich paper data with MAK(?) |---> Spatial and network analysis\n |\n Obtain Crossref Event data\n```\nPreamble", "%matplotlib inline\n\n#Some imports\nimport time\n#import xml.etree.ElementTree as etree\nfrom lxml import etree\nimport feedparser\n\n#Imports\n#Key imports are loaded from my profile (see standard_imports.py in src folder).\n\n#Paths\n\n#Paths\ntop = os.path.dirname(os.getcwd())\n\n#External data (to download the GRID database)\next_data = os.path.join(top,'data/external')\n\n#Interim data (to place seed etc)\nint_data = os.path.join(top,'data/interim')\n\n#Figures\nfig_path = os.path.join(top,'reports')\n\n#Models\nmod_path = os.path.join(top,'models')\n\n\n#Get date for saving files\ntoday = datetime.datetime.today()\n\ntoday_str = \"_\".join([str(x) for x in [today.day,today.month,today.year]])\n\n\n#Functions", "1. Get Arxiv data about machine learning\n\nWrite a APi querier and extract papers with the terms machine learning or artificial intelligence. Get 2000 results... and play nice!", "class Arxiv_querier():\n '''\n This class takes as an input a query and the number of results, and returns all the parsed results.\n Includes routines to deal with multiple pages of results.\n\n '''\n \n def __init__(self,base_url=\"http://export.arxiv.org/api/query?\"):\n '''\n Initialise\n '''\n \n self.base_url = base_url\n \n def query(self,query_string,max_results=100,wait_time=3):\n '''\n Query the base url\n \n '''\n #Attribute query string\n \n #Load base URL\n base_url = self.base_url\n \n #Prepare query string\n processed_query = re.sub(' ','+',query_string)\n \n self.query_string=\"_\".join(query_string.split(\" \"))\n \n start=0\n pages = 0\n \n #Run the query and store results for as long as the number of results is bigger than the max results\n keep_running = True\n \n result_store = []\n \n while keep_running==True:\n pages +=1\n print(pages)\n \n #Query url (NB the start arg, which will change as we go through different\n #pages)\n query_url = base_url+'search_query=all:{q}&start={s}&max_results={max_res}'.format(\n q=processed_query,s=start,max_res=max_results)\n \n \n #Download\n source = requests.get(query_url)\n \n #Parse the xml and get the entries (papers)\n parsed = feedparser.parse(source.content)\n \n #Extract entries\n entries = parsed['entries']\n \n #If the number of entries is bigger than the maximum number of results\n #this means we need to go to another page. We do that by offseting the\n #start with max results\n \n result_store.append(entries)\n \n if len(entries)==max_results:\n start+=max_results\n \n #If we have less than max results this means we have run out of \n #results and we toggle the keep_running switch off.\n if len(entries)<max_results:\n keep_running=False\n \n time.sleep(wait_time)\n \n #Save results in a flat list\n self.entry_results = [x for el in result_store for x in el]\n \n def extract_data(self):\n '''\n Here we extract data from the entries \n \n '''\n \n #Load entries\n entries = self.entry_results\n \n #Create df\n output = pd.concat([pd.DataFrame({\n 'query':self.query_string,\n 'id':x['id'],\n 'link':x['link'],\n 'title':x['title'],\n 'authors':\", \".join([el['name'] for el in x['authors']]),\n 'summary':x['summary'],\n 'updated':x['updated'],\n 'published':x['published'],\n 'category':x['arxiv_primary_category']['term'],\n 'pdf':str([el['href'] for el in x['links'] if el['type']=='application/pdf'][0]\n )},index=[0]) for x in entries]).reset_index(drop=True)\n \n output['year_published'] = [x.split(\"-\")[0] for x in output['published']]\n \n self.output_df = output\n\nquery_terms = ['artificial intelligence','machine learning','deep learning']\n\n\n#There are some inconsistencies in the number of results so we run the query three times for each\n#term and remove duplicated results\n\ndef extract_arxiv_data(term,max_results=1000,wait_time=10, tests=3):\n '''\n This function initialises the Arxiv_querier class, extracts the data and outputs it\n \n '''\n print(term)\n \n collected = []\n \n #We collect the data thrice\n for i in np.arange(tests):\n print('run'+ ' ' +str(i))\n initialised = Arxiv_querier()\n initialised.query(term,max_results,wait_time)\n initialised.extract_data()\n out = initialised.output_df\n collected.append(out)\n \n #We concatenate the dfs and remove the duplicates.\n \n output = pd.concat(collected)\n output_no_dupes = output.drop_duplicates('id')\n \n #Return both\n return([output,output_no_dupes])\n\n\narxiv_ai_results_three = [extract_arxiv_data(term=q) for q in query_terms]\n\nall_papers = pd.concat([x[1] for x in arxiv_ai_results_three]).drop_duplicates('id').reset_index(drop=True)\nprint(all_papers.shape)\nall_papers.head()\n\nall_papers.to_csv(int_data+'/{today}_ai_papers.csv'.format(today=today_str),index=False)", "2. Some exploratory analysis", "from nltk.corpus import stopwords\nfrom nltk.tokenize import word_tokenize, sent_tokenize, RegexpTokenizer, PunktSentenceTokenizer\nfrom nltk.stem import WordNetLemmatizer, SnowballStemmer, PorterStemmer\nimport scipy\nimport ast\nimport string as st\nfrom bs4 import BeautifulSoup\n\nimport gensim\nfrom gensim.models.coherencemodel import CoherenceModel\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom itertools import product\n\nstopwords_c = stopwords.words('english')\nstemmer = PorterStemmer()\nlemmatizer= WordNetLemmatizer()\n\n#Read papers\nall_papers = pd.read_csv(int_data+'/19_8_2017_ai_papers.csv'.format(today=today_str))\n\n#Let's begin by looking at years\n\n#When where they published?\n\n#Year distribution\nyear_pubs = all_papers['year_published'].value_counts()\nyear_pubs.index = [int(x) for x in year_pubs.index]\n\nfig,ax = plt.subplots(figsize=(10,5))\n\nyear_pubs_sorted = year_pubs[sorted(year_pubs.index)]\nyear_pubs_subset = year_pubs_sorted[year_pubs_sorted.index>1991]\n\nax.plot(np.arange(1993,2018),year_pubs_subset.cumsum(),color='red')\nax.bar(np.arange(1993,2018),year_pubs_subset)\nax.hlines(xmin=1993,xmax=2017,y=[10000,20000,30000,40000],colors='green',linestyles='dashed',alpha=0.7)\n\n\nax.set_title(\"Papers on AI, ML and DL, total per year (bar) and cumulative (red)\",size=14)\n\n\n#What are the categories of the papers? Are we capturing what we think we are capturing\n#Top 20\nall_papers['category'].value_counts()[:20]", "See <a href='https://arxiv.org/help/api/user-manual'>here</a> for abbreviations of categories.\nIn a nutshell, AI is AI, LG is 'Learning', CV is 'Computer Vision', 'CL' is 'computation and language' and NE is 'Neural and Evolutionary computing'. SL.ML is kind of self-explanatory. We seem to be picking up the main things", "#NB do we want to remove hyphens?\npunct = re.sub('-','',st.punctuation)\n\ndef comp_sentence(sentence):\n '''\n Takes a sentence and pre-processes it.\n The output is the sentence as a bag of words\n \n '''\n #Remove line breaks and hyphens\n sentence = re.sub('\\n',' ',sentence)\n sentence = re.sub('-',' ',sentence)\n \n #Lowercase and tokenise\n text_lowered = [x.lower() for x in sentence.split(\" \")]\n \n #Remove signs and digits\n text_no_signs_digits = [\"\".join([x for x in el if x not in punct+st.digits]) for \n el in text_lowered]\n \n #Remove stop words, single letters\n text_stopped = [w for w in text_no_signs_digits if w not in stopwords_c and\n len(w)>1]\n \n #Stem\n text_lemmatised = [lemmatizer.lemmatize(w) for w in text_stopped]\n \n #Output\n return(text_lemmatised)\n\n#Process text\nclean_corpus = [comp_sentence(x) for x in all_papers['summary']]\n\n#We remove rate words\nword_freqs = pd.Series([x for el in clean_corpus for x in el]).value_counts()\n\nword_freqs[:30]\n\nrare_words = word_freqs.index[word_freqs<=2]\nrare_words[:10]", "Lots of the rare words seem to be typos and so forth. We remove them", "#Removing rare words\nclean_corpus_no_rare = [[x for x in el if x not in rare_words] for el in clean_corpus]", "2 NLP (topic modelling & word embeddings)", "#Identify 2-grams (frequent in science!)\nbigram_transformer = gensim.models.Phrases(clean_corpus_no_rare)\n\n#Train the model on the corpus\n\n#Let's do a bit of grid search\n\n#model = gensim.models.Word2Vec(bigram_transformer[clean_corpus], size=360, window=15, min_count=2, iter=20)\n\nmodel.most_similar('ai_safety')\n\nmodel.most_similar('complexity')\n\nmodel.most_similar('github')\n\n#Create 3 different dictionaries and bows depending on word sizes\n\ndef remove_words_below_threshold(corpus,threshold):\n '''\n Takes a list of terms and removes any which are below a threshold of occurrences\n \n '''\n #Produce token frequencies\n token_frequencies = pd.Series([x for el in corpus for x in el]).value_counts()\n \n #Identify tokens to drop (below a threshold)\n tokens_to_drop = token_frequencies.index[token_frequencies<=threshold]\n \n #Processed corpus\n processed_corpus = [[x for x in el if x not in tokens_to_drop] for el in corpus]\n \n #Dictionary\n dictionary = gensim.corpora.Dictionary(processed_corpus)\n corpus_bow = [dictionary.doc2bow(x) for x in processed_corpus]\n \n return([dictionary,corpus_bow,processed_corpus])\n\n#Initial model run to see what comes out.\n\n#Transform corpus to bigrams\ntransformed_corpus = bigram_transformer[clean_corpus]\n\ncorpora_to_process = {str(x):remove_words_below_threshold(transformed_corpus,x) for x in [1,2,5,10]}\n\n#Need to turn this into a function.\n#Topic modelling\n\n#Parameters for Grid search.\nlda_params = list(product([100,200,300],[2,5]))\n\n#Model container\nlda_models = []\n\nfor x in lda_params:\n #Print stage\n print('{x}_{y}'.format(x=x[0],y=x[1]))\n \n #Load corpus and dict\n \n dictionary = corpora_to_process[str(x[1])][0]\n corpus_bow = corpora_to_process[str(x[1])][1]\n corpus = corpora_to_process[str(x[1])][2]\n \n print('training')\n #Train model\n mod = gensim.models.LdaModel(corpus_bow,num_topics=x[0],id2word=dictionary,\n passes=10,iterations=50)\n \n print('coherence')\n #Extract coherence\n cm = CoherenceModel(mod,texts=corpus,\n dictionary=dictionary,coherence='u_mass')\n \n #Get value\n try:\n coherence_value = cm.get_coherence()\n except:\n print('coherence_error')\n coherence_value='error'\n \n \n lda_models.append([x,mod,[coherence_value,cm]])\n\nwith open(mod_path+'/{t}_ai_topic_models.p'.format(t=today_str),'wb') as outfile:\n pickle.dump(lda_models,outfile)\n\n#Visualiase model performance\n\nmodel_eval = pd.DataFrame([[x[0][0],x[0][1],x[2][0]] for x in lda_models],columns=['topics','word_lim','coherence'])\n\nfig,ax = plt.subplots(figsize=(10,5))\n\ncols = ['red','green','blue']\nlegs = []\n\nfor num,x in enumerate(set(model_eval['word_lim'])):\n \n subset = model_eval.loc[[z == x for z in model_eval['word_lim']],:]\n \n ax.plot(subset.loc[:,'topics'],subset.loc[:,'coherence'],color=cols[num-1])\n \n legs.append([cols[num-1],x]) \n\nax.legend(labels=[x[1] for x in legs],title='Min word count')\nax.set_title('Model performance with different parameters')\n\nwith open(mod_path+'/19_8_2017_ai_topic_models.p','rb') as infile:\n lda_models = pickle.load(infile)\n\ncheck_model= lda_models[1][1]\n\n#Explore topics via LDAvis\nimport pyLDAvis.gensim\n\npyLDAvis.enable_notebook()\npyLDAvis.gensim.prepare(\n #Insert best model/corpus/topics here \n check_model, \n corpora_to_process[str(5)][1],\n corpora_to_process[str(5)][0])\n\n#Can we extract the relevant terms for the topics as in Sievert and Shirley in order to name them?\n\n#First - create a matrix with top 30 terms per topic\ntop_30_kws = [check_model.get_topic_terms(topicid=n,topn=1000) for n in np.arange(0,100)]\n\n#Keyword df where the columns are tokens and the rows are topics\ntop_30_kws_df = pd.concat([pd.DataFrame([x[1] for x in el],\n index=[x[0] for x in el]) for el in top_30_kws],\n axis=1).fillna(0).T.reset_index(drop=True)\n\n#This is the dictionary\nselected_dictionary = corpora_to_process[str(5)][0]\n\n#Total number of terms in the document\ntotal_terms = np.sum([vals for vals in selected_dictionary.dfs.values()])\n\n#Appearances of different terms\ndocument_freqs = pd.Series([v for v in selected_dictionary.dfs.values()],\n index=[k for k in selected_dictionary.dfs.keys()])[top_30_kws_df.columns]/total_terms\n\n#Normalise the terms (divide the vector of probabilities of each keywords in each topic by the totals)\ntop_30_kws_normalised = top_30_kws_df.apply(lambda x: x/document_freqs,axis=0)\n\n#Now we want to extract, for each topic, the relevance score.\n\ndef relevance_score(prob_in_topic,prob_in_corpus,id2word_lookup,lambda_par = 0.6):\n '''\n Combines the probabilities using the definition in Sievert and Shirley and returns the top 5 named\n #terms for each topic \n '''\n #Create dataframe\n combined = pd.concat([prob_in_topic,prob_in_corpus],axis=1)\n \n combined.columns=['prob_in_topic','prob_in_corpus']\n \n #Create relevance metric\n combined['relevance'] = lambda_par*combined['prob_in_topic'] + (1-lambda_par)*combined['prob_in_corpus']\n \n #Top words\n top_ids = list(combined.sort_values('relevance',ascending=False).index[:5])\n \n #Top words\n top_words = \"_\".join([id2word_lookup[this_id] for this_id in top_ids])\n \n return(top_words)\n\n\nrelevance_scores = [relevance_score(top_30_kws_df.iloc[n,:],\n top_30_kws_normalised.iloc[n,:],\n dictionary.id2token,lambda_par=0.6) for n in np.arange(len(top_30_kws_df))]\n\n%%time\n#Create a df with the topic predictions.\npaper_preds = check_model[corpora_to_process[str(5)][1]]\n\npaper_topics_df = pd.concat([pd.DataFrame([x[1] for x in el],index=[x[0] for x in el]) for el in paper_preds],\n axis=1).T\n\n#Replace NAs with zeros and drop pointless index\npaper_topics_df.fillna(value=0,inplace=True)\npaper_topics_df.reset_index(drop=True,inplace=True)\n\npaper_topics_df.columns = relevance_scores\n\npaper_topics_df.to_csv(int_data+'/{t}_paper_topic_mix.csv'.format(t=today_str),index=False)\n\n#paper_topics_df = pd.read_csv(int_data+'/{t}_paper_topic_mix.csv')\n\n#Quick test of Deep learning papers\n\n#These are papers with a topic that seems to capture deep learning\ndl_papers = [x>0.05 for x in paper_topics_df['network_training_model_deep_deep_learning']]\n\ndl_papers_metadata = pd.concat([pd.Series(dl_papers),all_papers],axis=1)\n\npaper_frequencies = pd.crosstab(dl_papers_metadata.year_published,dl_papers_metadata[0])\n\npaper_frequencies.columns=['no_dl','dl']\n\n\nfig,ax = plt.subplots(figsize=(10,5))\n\npaper_frequencies.plot.bar(stacked=True,ax=ax)\nax.set_title('Number of papers in the DL \\'topic\\'')\nax.legend(labels=['Not ANN/DL related','NN/DL topic >0.05'])", "Some of this is interesting. Doesn't seem to be picking up the policy related terms (safety, discrimination)\nNext stages - focus on policy related terms. Can we look for papers in keyword dictionaries identified through the word embeddings?\nObtain Google Scholar data", "#How many authors are there in the data? Can we collect all their institutions from Google Scholar\n\npaper_authors = pd.Series([x for el in all_papers['authors'] for x in el.split(\", \")])\n\npaper_authors_unique = paper_authors.drop_duplicates()\n\nlen(paper_authors_unique)", "We have 68,000 authors. It might take a while to get their data from Google Scholar", "#Top authors and frequencies\n\nauthors_freq = paper_authors.value_counts()\n\nfig,ax=plt.subplots(figsize=(10,3))\n\nax.hist(authors_freq,bins=30)\nax.set_title('Distribution of publications')\n\n#Pretty skewed distribution!\nprint(authors_freq.describe())\n\nnp.sum(authors_freq>2)", "Less than 10,000 authors with 3+ papers in the data", "get_scholar_data(\n\n%%time\n#Test run\nimport scholarly\n\n@ratelim.patient(max_calls=30,time_interval=60)\ndef get_scholar_data(scholarly_object):\n '''''' \n try:\n scholarly_object = next(scholarly_object)\n metadata = {}\n metadata['name']=scholarly_object.name\n metadata['affiliation'] = scholarly_object.affiliation\n metadata['interests'] = scholarly_object.interests\n return(metadata)\n \n except:\n return('nothing')\n \n\n#Extract information from each query (it is a generator)\n#Get data\n\n#ml_author_gscholar=[]\n\nfor num,x in enumerate(paper_authors_unique[1484:]):\n if num % 100 == 0:\n print(str(num)+\":\"+x) \n\n result = get_scholar_data(scholarly.search_author(x))\n ml_author_gscholar.append(result)\n\nlen(ml_author_gscholar)" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
jmschrei/pomegranate
tutorials/old/Tutorial_7_Parallelization.ipynb
mit
[ "pomegranate and parallelization\npomegranate supports parallelization through a set of built in functions based off of joblib. All computationally intensive functions in pomegranate are implemented in cython with the global interpreter lock (GIL) released, allowing for multithreading to be used for efficient parallel processing. The following functions can be called for parallelization:\n\nfit\nsummarize\npredict\npredict_proba\npredict_log_proba\nlog_probability\nprobability\n\nThese functions can all be simply parallelized by passing in n_jobs=X to the method calls. This tutorial will demonstrate how to use those calls. First we'll look at a simple multivariate Gaussian mixture model, and compare its performance to sklearn. Then we'll look at a hidden Markov model with Gaussian emissions, and lastly we'll look at a mixture of Gaussian HMMs. These can all utilize the build-in parallelization that pomegranate has.\nLet's dive right in!", "%pylab inline\nfrom sklearn.mixture import GaussianMixture\nfrom pomegranate import *\nimport seaborn, time\nseaborn.set_style('whitegrid')\n\ndef create_dataset(n_samples, n_dim, n_classes, alpha=1):\n \"\"\"Create a random dataset with n_samples in each class.\"\"\"\n \n X = numpy.concatenate([numpy.random.normal(i*alpha, 1, size=(n_samples, n_dim)) for i in range(n_classes)])\n y = numpy.concatenate([numpy.zeros(n_samples) + i for i in range(n_classes)])\n idx = numpy.arange(X.shape[0])\n numpy.random.shuffle(idx)\n return X[idx], y[idx]", "1. General Mixture Models\npomegranate has a very efficient implementation of mixture models, particularly Gaussian mixture models. Lets take a look at how fast pomegranate is versus sklearn, and then see how much faster parallelization can get it to be.", "n, d, k = 1000000, 5, 3\nX, y = create_dataset(n, d, k)\n\nprint \"sklearn GMM\"\n%timeit GaussianMixture(n_components=k, covariance_type='full', max_iter=15, tol=1e-10).fit(X)\nprint \nprint \"pomegranate GMM\"\n%timeit GeneralMixtureModel.from_samples(MultivariateGaussianDistribution, k, X, max_iterations=15, stop_threshold=1e-10)\nprint\nprint \"pomegranate GMM (4 jobs)\"\n%timeit GeneralMixtureModel.from_samples(MultivariateGaussianDistribution, k, X, n_jobs=4, max_iterations=15, stop_threshold=1e-10)", "It looks like on a large dataset not only is pomegranate faster than sklearn at performing 15 iterations of EM on 3 million 5 dimensional datapoints with 3 clusters, but the parallelization is able to help in speeding things up. \nLets now take a look at the time it takes to make predictions using GMMs. Lets fit the model to a small amount of data, and then predict a larger amount of data drawn from the same underlying distributions.", "d, k = 25, 2\nX, y = create_dataset(1000, d, k)\na = GaussianMixture(k, n_init=1, max_iter=25).fit(X)\nb = GeneralMixtureModel.from_samples(MultivariateGaussianDistribution, k, X, max_iterations=25)\n\ndel X, y\nn = 1000000\nX, y = create_dataset(n, d, k)\n\nprint \"sklearn GMM\"\n%timeit -n 1 a.predict_proba(X)\nprint\nprint \"pomegranate GMM\"\n%timeit -n 1 b.predict_proba(X)\nprint\nprint \"pomegranate GMM (4 jobs)\"\n%timeit -n 1 b.predict_proba(X, n_jobs=4)", "It looks like pomegranate can be slightly slower than sklearn when using a single processor, but that it can be parallelized to get faster performance. At the same time, predictions at this level happen so quickly (millions per second) that this may not be the most reliable test for parallelization.\nTo ensure that we're getting the exact same results just faster, lets subtract the predictions from each other and make sure that the sum is equal to 0.", "print (b.predict_proba(X) - b.predict_proba(X, n_jobs=4)).sum()", "Great, no difference between the two.\nLets now make sure that pomegranate and sklearn are learning basically the same thing. Lets fit both models to some 2 dimensional 2 component data and make sure that they both extract the underlying clusters by plotting them.", "d, k = 2, 2\nX, y = create_dataset(1000, d, k, alpha=2)\na = GaussianMixture(k, n_init=1, max_iter=25).fit(X)\nb = GeneralMixtureModel.from_samples(MultivariateGaussianDistribution, k, X, max_iterations=25)\n\ny1, y2 = a.predict(X), b.predict(X)\n\nplt.figure(figsize=(16,6))\nplt.subplot(121)\nplt.title(\"sklearn clusters\", fontsize=14)\nplt.scatter(X[y1==0, 0], X[y1==0, 1], color='m', edgecolor='m')\nplt.scatter(X[y1==1, 0], X[y1==1, 1], color='c', edgecolor='c')\n\nplt.subplot(122)\nplt.title(\"pomegranate clusters\", fontsize=14)\nplt.scatter(X[y2==0, 0], X[y2==0, 1], color='m', edgecolor='m')\nplt.scatter(X[y2==1, 0], X[y2==1, 1], color='c', edgecolor='c')", "It looks like we're getting the same basic results for the two. The two algorithms are initialized a bit differently, and so it can be difficult to directly compare the results between them, but it looks like they're getting roughly the same results.\n3. Multivariate Gaussian HMM\nNow let's move on to training a hidden Markov model with multivariate Gaussian emissions with a diagonal covariance matrix. We'll randomly generate some Gaussian distributed numbers and use pomegranate with either one or four threads to fit our model to the data.", "X = numpy.random.randn(1000, 500, 50)\n\nprint \"pomegranate Gaussian HMM (1 job)\"\n%timeit -n 1 -r 1 HiddenMarkovModel.from_samples(NormalDistribution, 5, X, max_iterations=5)\nprint\nprint \"pomegranate Gaussian HMM (2 jobs)\"\n%timeit -n 1 -r 1 HiddenMarkovModel.from_samples(NormalDistribution, 5, X, max_iterations=5, n_jobs=2)\nprint\nprint \"pomegranate Gaussian HMM (2 jobs)\"\n%timeit -n 1 -r 1 HiddenMarkovModel.from_samples(NormalDistribution, 5, X, max_iterations=5, n_jobs=4)", "All we had to do was pass in the n_jobs parameter to the fit function in order to get a speed improvement. It looks like we're getting a really good speed improvement, as well! This is mostly because the HMM algorithms perform a lot more operations than the other models, and so spend the vast majority of time with the GIL released. You may not notice as strong speedups when using a MultivariateGaussianDistribution because BLAS uses multithreaded operations already internally, even when only one job is specified.\nNow lets look at the prediction function to make sure the we're getting speedups there as well. You'll have to use a wrapper function to parallelize the predictions for a HMM because it returns an annotated sequence rather than a single value like a classic machine learning model might.", "model = HiddenMarkovModel.from_samples(NormalDistribution, 5, X, max_iterations=2, verbose=False)\n\nprint \"pomegranate Gaussian HMM (1 job)\"\n%timeit predict_proba(model, X)\nprint\nprint \"pomegranate Gaussian HMM (2 jobs)\"\n%timeit predict_proba(model, X, n_jobs=2)", "Great, we're getting a really good speedup on that as well! Looks like the parallel processing is more efficient with a bigger, more complex model, than with a simple one. This can make sense, because all inference/training is more complex, and so there is more time with the GIL released compared to with the simpler operations.\n4. Mixture of Hidden Markov Models\nLet's stack another layer onto this model by making it a mixture of these hidden Markov models, instead of a single one. At this point we're sticking a multivariate Gaussian HMM into a mixture and we're going to train this big thing in parallel.", "def create_model(mus):\n n = mus.shape[0]\n \n starts = numpy.zeros(n)\n starts[0] = 1.\n \n ends = numpy.zeros(n)\n ends[-1] = 0.5\n \n transition_matrix = numpy.zeros((n, n))\n distributions = []\n \n for i in range(n):\n transition_matrix[i, i] = 0.5\n \n if i < n - 1:\n transition_matrix[i, i+1] = 0.5\n \n distribution = IndependentComponentsDistribution([NormalDistribution(mu, 1) for mu in mus[i]])\n distributions.append(distribution)\n \n model = HiddenMarkovModel.from_matrix(transition_matrix, distributions, starts, ends)\n return model\n \n\ndef create_mixture(mus):\n hmms = [create_model(mu) for mu in mus]\n return GeneralMixtureModel(hmms)\n\nn, d = 50, 10\nmus = [(numpy.random.randn(d, n)*0.2 + numpy.random.randn(n)*2).T for i in range(2)]\n\nmodel = create_mixture(mus)\nX = numpy.random.randn(400, 150, d)\n\nprint \"pomegranate Mixture of Gaussian HMMs (1 job)\"\n%timeit model.fit(X, max_iterations=5)\nprint\n\nmodel = create_mixture(mus)\nprint \"pomegranate Mixture of Gaussian HMMs (2 jobs)\"\n%timeit model.fit(X, max_iterations=5, n_jobs=2)", "Looks like we're getting a really nice speed improvement when training this complex model. Let's take a look now at the time it takes to do inference with it.", "model = create_mixture(mus)\n\nprint \"pomegranate Mixture of Gaussian HMMs (1 job)\"\n%timeit model.predict_proba(X)\nprint\n\nmodel = create_mixture(mus)\nprint \"pomegranate Mixture of Gaussian HMMs (2 jobs)\"\n%timeit model.predict_proba(X, n_jobs=2)", "We're getting a good speed improvement here too through parallelization.\nConclusions\nHopefully you'll find pomegranate useful in your work! Parallelization should allow you to train complex models faster than before. Keep in mind though that there is an overhead to using parallel processing, and so it's possible that on some smaller examples it does not work as well. In general, the bigger the dataset, the closer to a linear speedup you'll get with pomegranate.\nIf you have any interesting examples of how you've used pomegranate in your work, I'd love to hear about them. In addition I'd like to hear any feedback you may have on features you'd like to see. Please shoot me an email. Good luck!" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
arsenovic/galgebra
examples/ipython/inner_product.ipynb
bsd-3-clause
[ "from __future__ import print_function\nfrom sympy import Symbol, symbols, sin, cos, Rational, expand, simplify, collect, S\nfrom galgebra.printer import Eprint, Get_Program, Print_Function, Format\nfrom galgebra.ga import Ga, one, zero\nfrom galgebra.mv import Nga\nFormat()\n\nX = (x, y, z) = symbols('x y z')\no3d = Ga('e_x e_y e_z', g=[1, 1, 1], coords=X)\n(ex, ey, ez) = o3d.mv()\ngrad = o3d.grad\n\nc = o3d.mv('c', 'scalar')\n\na = o3d.mv('a', 'vector')\nb = o3d.mv('b', 'vector')\n\nA = o3d.mv('A','mv')\nB = o3d.mv('B','mv')", "The inner product of blades in GAlgebra is zero if either operand is a scalar:\n$$\\begin{split}\\begin{aligned}\n {\\boldsymbol{A}}{r}{\\wedge}{\\boldsymbol{B}}{s} &\\equiv {\\left <{{\\boldsymbol{A}}{r}{\\boldsymbol{B}}{s}} \\right >{r+s}} \\\n {\\boldsymbol{A}}{r}\\cdot{\\boldsymbol{B}}{s} &\\equiv {\\left { { \\begin{array}{cc}\n r\\mbox{ and }s \\ne 0: & {\\left <{{\\boldsymbol{A}}{r}{\\boldsymbol{B}}{s}} \\right >{{\\left |{r-s}\\right |}}} \\\n r\\mbox{ or }s = 0: & 0 \\end{array}} \\right }}\n \\end{aligned}\\end{split}$$\nThis definition comes from David Hestenes and Garret Sobczyk, “Clifford Algebra to Geometric Calculus,” Kluwer Academic Publishers, 1984.\nIn some other literature, the inner product is defined without the exceptional case for scalar part and the definition above is known as \"the modified Hestenes inner product\" (this name comes from the source code of GAViewer).", "c|a\n\na|c\n\nc|A\n\nA|c", "$ab=a \\wedge b + a \\cdot b$ holds for vectors:", "a*b\n\na^b\n\na|b\n\n(a*b)-(a^b)-(a|b)", "$aA=a \\wedge A + a \\cdot A$ holds for the products between vectors and multivectors:", "a*A\n\na^A\n\na|A\n\n(a*A)-(a^A)-(a|A)", "$AB=A \\wedge B + A \\cdot B$ does NOT hold for the products between multivectors and multivectors:", "A*B\n\nA|B\n\n(A*B)-(A^B)-(A|B)\n\n(A<B)+(A|B)+(A>B)-A*B" ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
jbliss1234/ML
t81_558_class4_class_reg.ipynb
apache-2.0
[ "T81-558: Applications of Deep Neural Networks\nClass 4: Classification and Regression\n* Instructor: Jeff Heaton, School of Engineering and Applied Science, Washington University in St. Louis\n* For more information visit the class website.\nBinary Classification, Classification and Regression\n\nBinary Classification - Classification between two possibilities (positive and negative). Common in medical testing, does the person have the disease (positive) or not (negative).\nClassification - Classification between more than 2. The iris dataset (3-way classification).\nRegression - Numeric prediction. How many MPG does a car get?\n\nIn this class session we will look at some visualizations for all three.\nFeature Vector Encoding\nThese are exactly the same feature vector encoding functions from Class 3. They must be defined for this class as well. For more information, refer to class 3.", "from sklearn import preprocessing\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\n\n# Encode text values to dummy variables(i.e. [1,0,0],[0,1,0],[0,0,1] for red,green,blue)\ndef encode_text_dummy(df,name):\n dummies = pd.get_dummies(df[name])\n for x in dummies.columns:\n dummy_name = \"{}-{}\".format(name,x)\n df[dummy_name] = dummies[x]\n df.drop(name, axis=1, inplace=True)\n\n# Encode text values to indexes(i.e. [1],[2],[3] for red,green,blue).\ndef encode_text_index(df,name):\n le = preprocessing.LabelEncoder()\n df[name] = le.fit_transform(df[name])\n return le.classes_\n\n# Encode a numeric column as zscores\ndef encode_numeric_zscore(df,name,mean=None,sd=None):\n if mean is None:\n mean = df[name].mean()\n\n if sd is None:\n sd = df[name].std()\n\n df[name] = (df[name]-mean)/sd\n\n# Convert all missing values in the specified column to the median\ndef missing_median(df, name):\n med = df[name].median()\n df[name] = df[name].fillna(med)\n\n# Convert a Pandas dataframe to the x,y inputs that TensorFlow needs\ndef to_xy(df,target):\n result = []\n for x in df.columns:\n if x != target:\n result.append(x)\n\n # find out the type of the target column. Is it really this hard? :(\n target_type = df[target].dtypes\n target_type = target_type[0] if hasattr(target_type, '__iter__') else target_type\n \n # Encode to int for classification, float otherwise. TensorFlow likes 32 bits.\n if target_type in (np.int64, np.int32):\n # Classification\n return df.as_matrix(result).astype(np.float32),df.as_matrix([target]).astype(np.int32)\n else:\n # Regression\n return df.as_matrix(result).astype(np.float32),df.as_matrix([target]).astype(np.float32)\n \n# Nicely formatted time string\ndef hms_string(sec_elapsed):\n h = int(sec_elapsed / (60 * 60))\n m = int((sec_elapsed % (60 * 60)) / 60)\n s = sec_elapsed % 60\n return \"{}:{:>02}:{:>05.2f}\".format(h, m, s)", "Toolkit: Visualization Functions\nThis class will introduce 3 different visualizations that can be used with the two different classification type neural networks and regression neural networks.\n\nConfusion Matrix - For any type of classification neural network.\nROC Curve - For binary classification.\nLift Curve - For regression neural networks.\n\nThe code used to produce these visualizations is shown here:", "%matplotlib inline\nimport matplotlib.pyplot as plt\nfrom sklearn.metrics import roc_curve, auc\n\n# Plot a confusion matrix.\n# cm is the confusion matrix, names are the names of the classes.\ndef plot_confusion_matrix(cm, names, title='Confusion matrix', cmap=plt.cm.Blues):\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(names))\n plt.xticks(tick_marks, names, rotation=45)\n plt.yticks(tick_marks, names)\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n \n\n# Plot an ROC. pred - the predictions, y - the expected output.\ndef plot_roc(pred,y):\n fpr, tpr, _ = roc_curve(y_test, pred)\n roc_auc = auc(fpr, tpr)\n\n plt.figure()\n plt.plot(fpr, tpr, label='ROC curve (area = %0.2f)' % roc_auc)\n plt.plot([0, 1], [0, 1], 'k--')\n plt.xlim([0.0, 1.0])\n plt.ylim([0.0, 1.05])\n plt.xlabel('False Positive Rate')\n plt.ylabel('True Positive Rate')\n plt.title('Receiver Operating Characteristic (ROC)')\n plt.legend(loc=\"lower right\")\n plt.show()\n \n# Plot a lift curve. pred - the predictions, y - the expected output.\ndef chart_regression(pred,y):\n t = pd.DataFrame({'pred' : pred.flatten(), 'y' : y_test.flatten()})\n t.sort_values(by=['y'],inplace=True)\n\n a = plt.plot(t['y'].tolist(),label='expected')\n b = plt.plot(t['pred'].tolist(),label='prediction')\n plt.ylabel('output')\n plt.legend()\n plt.show()", "Binary Classification\nBinary classification is used to create a model that classifies between only two classes. These two classes are often called \"positive\" and \"negative\". Consider the following program that uses the wcbreast_wdbc dataset to classify if a breast tumor is cancerous (malignant) or not (benign). The iris dataset is not binary, because there are three classes (3 types of iris).", "import os\nimport pandas as pd\nfrom sklearn.cross_validation import train_test_split\nimport tensorflow.contrib.learn as skflow\nimport numpy as np\nfrom sklearn import metrics\n\npath = \"./data/\"\n \nfilename = os.path.join(path,\"wcbreast_wdbc.csv\") \ndf = pd.read_csv(filename,na_values=['NA','?'])\n\n# Encode feature vector\ndf.drop('id',axis=1,inplace=True)\nencode_numeric_zscore(df,'mean_radius')\nencode_text_index(df,'mean_texture') \nencode_text_index(df,'mean_perimeter')\nencode_text_index(df,'mean_area')\nencode_text_index(df,'mean_smoothness')\nencode_text_index(df,'mean_compactness')\nencode_text_index(df,'mean_concavity')\nencode_text_index(df,'mean_concave_points')\nencode_text_index(df,'mean_symmetry')\nencode_text_index(df,'mean_fractal_dimension')\nencode_text_index(df,'se_radius')\nencode_text_index(df,'se_texture')\nencode_text_index(df,'se_perimeter')\nencode_text_index(df,'se_area')\nencode_text_index(df,'se_smoothness')\nencode_text_index(df,'se_compactness')\nencode_text_index(df,'se_concavity')\nencode_text_index(df,'se_concave_points')\nencode_text_index(df,'se_symmetry')\nencode_text_index(df,'se_fractal_dimension')\nencode_text_index(df,'worst_radius')\nencode_text_index(df,'worst_texture')\nencode_text_index(df,'worst_perimeter')\nencode_text_index(df,'worst_area')\nencode_text_index(df,'worst_smoothness')\nencode_text_index(df,'worst_compactness')\nencode_text_index(df,'worst_concavity')\nencode_text_index(df,'worst_concave_points')\nencode_text_index(df,'worst_symmetry')\nencode_text_index(df,'worst_fractal_dimension')\ndiagnosis = encode_text_index(df,'diagnosis')\nnum_classes = len(diagnosis)\n\n# Create x & y for training\n\n# Create the x-side (feature vectors) of the training\nx, y = to_xy(df,'diagnosis')\n \n# Split into train/test\nx_train, x_test, y_train, y_test = train_test_split( \n x, y, test_size=0.25, random_state=42) \n \n# Create a deep neural network with 3 hidden layers of 10, 20, 10\nclassifier = skflow.TensorFlowDNNClassifier(hidden_units=[10, 20, 10], n_classes=num_classes,\n steps=10000)\n\n# Early stopping\nearly_stop = skflow.monitors.ValidationMonitor(x_test, y_test,\n early_stopping_rounds=200, print_steps=50, n_classes=num_classes)\n \n# Fit/train neural network\nclassifier.fit(x_train, y_train, early_stop)\n\n# Measure accuracy\nscore = metrics.accuracy_score(y, classifier.predict(x))\nprint(\"Final accuracy: {}\".format(score))\n", "Confusion Matrix\nThe confusion matrix is a common visualization for both binary and larger classification problems. Often a model will have difficulty differentiating between two classes. For example, a neural network might be really good at telling the difference between cats and dogs, but not so good at telling the difference between dogs and wolves. The following code generates a confusion matrix:", "import numpy as np\n\nfrom sklearn import svm, datasets\nfrom sklearn.cross_validation import train_test_split\nfrom sklearn.metrics import confusion_matrix\n\npred = classifier.predict(x_test)\n \n# Compute confusion matrix\ncm = confusion_matrix(y_test, pred)\nnp.set_printoptions(precision=2)\nprint('Confusion matrix, without normalization')\nprint(cm)\nplt.figure()\nplot_confusion_matrix(cm, diagnosis)\n\n# Normalize the confusion matrix by row (i.e by the number of samples\n# in each class)\ncm_normalized = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\nprint('Normalized confusion matrix')\nprint(cm_normalized)\nplt.figure()\nplot_confusion_matrix(cm_normalized, diagnosis, title='Normalized confusion matrix')\n\nplt.show()", "The above two confusion matrixes show the same network. The bottom (normalized) is the type you will normally see. Notice the two labels. The label \"B\" means benign (no cancer) and the label \"M\" means malignant (cancer). The left-right (x) axis are the predictions, the top-bottom) are the expected outcomes. A perfect model (that never makes an error) has a dark blue diagonal that runs from top-left to bottom-right. \nTo read, consider the top-left square. This square indicates \"true labeled\" of B and also \"predicted label\" of B. This is good! The prediction matched the truth. The blueness of this box represents how often \"B\" is classified correct. It is not darkest blue. This is because the square to the right(which is off the perfect diagonal) has some color. This square indicates truth of \"B\" but prediction of \"M\". The white square, at the bottom-left, indicates a true of \"M\" but predicted of \"B\". The whiteness indicates this rarely happens. \nYour conclusion from the above chart is that the model sometimes classifies \"B\" as \"M\" (a false negative), but never mis-classifis \"M\" as \"B\". Always look for the dark diagonal, this is good!\nROC Curves\nROC curves can be a bit confusing. However, they are very common. It is important to know how to read them. Even their name is confusing. Do not worry about their name, it comes from electrical engineering (EE).\nBinary classification is common in medical testing. Often you want to diagnose if someone has a disease. This can lead to two types of errors, know as false positives and false negatives:\n\nFalse Positive - Your test (neural network) indicated that the patient had the disease; however, the patient did not have the disease.\nFalse Negative - Your test (neural network) indicated that the patient did not have the disease; however, the patient did have the disease.\nTrue Positive - Your test (neural network) correctly identified that the patient had the disease.\nTrue Negative - Your test (neural network) correctly identified that the patient did not have the disease.\n\nTypes of errors:\n\nNeural networks classify in terms of probbility of it being positive. However, at what probability do you give a positive result? Is the cutoff 50%? 90%? Where you set this cutoff is called the threshold. Anything above the cutoff is positive, anything below is negative. Setting this cutoff allows the model to be more sensative or specific:\n\nThe following shows a more sensitive cutoff:\n\nAn ROC curve measures how good a model is regardless of the cutoff. The following shows how to read a ROC chart:\n\nThe following code shows an ROC chart for the breast cancer neural network. The area under the curve (AUC) is also an important measure. The larger the AUC, the better.", "pred = classifier.predict_proba(x_test)\npred = pred[:,1] # Only positive cases\n# print(pred[:,1])\nplot_roc(pred,y_test)\n", "Classification\nWe've already seen multi-class classification, with the iris dataset. Confusion matrixes work just fine with 3 classes. The following code generates a confusion matrix for iris.", "import os\nimport pandas as pd\nfrom sklearn.cross_validation import train_test_split\nimport tensorflow.contrib.learn as skflow\nimport numpy as np\n\npath = \"./data/\"\n \nfilename = os.path.join(path,\"iris.csv\") \ndf = pd.read_csv(filename,na_values=['NA','?'])\n\n# Encode feature vector\nencode_numeric_zscore(df,'petal_w')\nencode_numeric_zscore(df,'petal_l')\nencode_numeric_zscore(df,'sepal_w')\nencode_numeric_zscore(df,'sepal_l')\nspecies = encode_text_index(df,\"species\")\nnum_classes = len(species)\n\n# Create x & y for training\n\n# Create the x-side (feature vectors) of the training\nx, y = to_xy(df,'species')\n \n# Split into train/test\nx_train, x_test, y_train, y_test = train_test_split( \n x, y, test_size=0.25, random_state=45) \n # as much as I would like to use 42, it gives a perfect result, and a boring confusion matrix!\n \n# Create a deep neural network with 3 hidden layers of 10, 20, 10\nclassifier = skflow.TensorFlowDNNClassifier(hidden_units=[10, 20, 10], n_classes=num_classes,\n steps=10000)\n\n# Early stopping\nearly_stop = skflow.monitors.ValidationMonitor(x_test, y_test,\n early_stopping_rounds=200, print_steps=50, n_classes=num_classes)\n \n# Fit/train neural network\nclassifier.fit(x_train, y_train, early_stop)\n\n\nimport numpy as np\n\nfrom sklearn import svm, datasets\nfrom sklearn.cross_validation import train_test_split\nfrom sklearn.metrics import confusion_matrix\n\n\n\npred = classifier.predict(x_test)\n \n# Compute confusion matrix\ncm = confusion_matrix(y_test, pred)\nnp.set_printoptions(precision=2)\nprint('Confusion matrix, without normalization')\nprint(cm)\nplt.figure()\nplot_confusion_matrix(cm, species)\n\n# Normalize the confusion matrix by row (i.e by the number of samples\n# in each class)\ncm_normalized = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\nprint('Normalized confusion matrix')\nprint(cm_normalized)\nplt.figure()\nplot_confusion_matrix(cm_normalized, species, title='Normalized confusion matrix')\n\nplt.show()", "See the strong diagonal? Iris is easy. See the light blue near the bottom? Sometimes virginica is confused for versicolor.\nRegression\nWe've already seen regression with the MPG dataset. Regression uses its own set of visualizations, one of the most common is the lift chart. The following code generates a lift chart.", "import tensorflow.contrib.learn as skflow\nimport pandas as pd\nimport os\nimport numpy as np\nfrom sklearn import metrics\nfrom scipy.stats import zscore\n\npath = \"./data/\"\n\nfilename_read = os.path.join(path,\"auto-mpg.csv\")\ndf = pd.read_csv(filename_read,na_values=['NA','?'])\n\n# create feature vector\nmissing_median(df, 'horsepower')\ndf.drop('name',1,inplace=True)\nencode_numeric_zscore(df, 'horsepower')\nencode_numeric_zscore(df, 'weight')\nencode_numeric_zscore(df, 'cylinders')\nencode_numeric_zscore(df, 'displacement')\nencode_numeric_zscore(df, 'acceleration')\nencode_text_dummy(df, 'origin')\n\n# Encode to a 2D matrix for training\nx,y = to_xy(df,['mpg'])\n\n# Split into train/test\nx_train, x_test, y_train, y_test = train_test_split(\n x, y, test_size=0.25, random_state=42)\n\n# Create a deep neural network with 3 hidden layers of 50, 25, 10\nregressor = skflow.TensorFlowDNNRegressor(hidden_units=[50, 25, 10], steps=5000)\n\n# Early stopping\nearly_stop = skflow.monitors.ValidationMonitor(x_test, y_test,\n early_stopping_rounds=200, print_steps=50)\n\n# Fit/train neural network\nregressor.fit(x_train, y_train, early_stop)\n\npred = regressor.predict(x_test)\n\nchart_regression(pred,y_test)", "To generate a lift chart, perform the following activities:\n\nSort the data by expected output. Plot the blue line above.\nFor every point on the x-axis plot the predicted value for that same data point. This is the green line above.\nThe x-axis is just 0 to 100% of the dataset. The expected always starts low and ends high.\nThe y-axis is ranged according to the values predicted.\n\nReading a lift chart:\n* The expected and predict lines should be close. Notice where one is above the ot other.\n* The above chart is the most accurate on lower MPG." ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
agmarrugo/sensors-actuators
notebooks/Ex_2_3.ipynb
mit
[ "The transfer function\nAnalytic form of transfer function. In certain cases the transfer function is available as an analytic expression. One common transfer function used for resistance temperature sensors (to be discussed in Chapter 3) is the Callendar– Van Duzen equation. It gives the resistance of the sensor at a temperature T as\n$$R(T)=R_{0}(1+AT+BT^2+C(T-100)T^3) \\enspace,$$\nwhere the constants A, B, and C are determined by direct measurement of resistance for the specific material used in the sensor and $R_0$ is the temperature of the sensor at 0 ºC. Typical temperatures used for calibration are the oxygen point (-182.962 ºC; the equilibrium between liquid oxygen and its vapor), the triple point of water (0.01 ºC; the point of equilibrium temperature between ice, liquid water, and water vapor), the steam point (100 ºC; the equilibrium point between water and vapor), the zinc point (419.58 ºC; the equilibrium point between solid and liquid zinc), the silver point (961.93 ºC), and the gold point (1064.43 ºC), as well as others. Consider a platinum resistance sensor with a nominal resistance of 25 $\\Omega $ at 0 C. To calibrate the sensor its resistance is measured at the oxygen point as 6.2 $\\Omega $, at the steam point as 35.6 $\\Omega $, and at the zinc point as 66.1 $\\Omega $. Calculate the coefficients A, B, and C and plot the transfer function between -200 ºC and 600 ºC.\nSolution\nIn order to obtain the sensor calibration, several measurements at different temperatures where taken:\n\n\n6.2 $\\Omega $ at a temperature of -182.962 ºC (oxygen point).\n\n\n35.6 $\\Omega $ at a temperature of 100 ºC (steam point).\n\n\n66.1 $\\Omega $ at a temperature of 419.58 ºC (zinc point)\n\n\nLet's plot the points,", "import matplotlib.pyplot as plt\nimport numpy as np\nfrom math import log, exp\n%matplotlib inline\nfrom scipy.interpolate import InterpolatedUnivariateSpline\n\n\nT_exp = np.array([-182.962,100,419.58]);# Celcius\nR_exp = np.array([6.2 ,35.6,66.1])# Ohm\nplt.plot(T_exp,R_exp,'*');\nplt.ylabel('Resistance of the sensor [Ohm]')\nplt.xlabel('Temperature [C]')\nplt.show()", "Reordering the Callendar-Van Duzen equation we obtain the following\n$$ AT+BT^2+C(T-100)T^3 =\\frac{R(T)}{R_0}-1 \\enspace,$$\nwhich we can write in matrix form as $Mx=p$, where\n$$\\begin{bmatrix} T_1 & T_1^2 & (T_1-100)T_1^3 \\ T_2 & T_2^2 & (T_2-100)T_2^3 \\ T_3 & T_3^2 & (T_3-100)T_3^3\\end{bmatrix} \\begin{bmatrix} A\\ B \\ C\\end{bmatrix} = \\begin{bmatrix} \\frac{R(T_1)}{R_0}-1 \\ \\frac{R(T_2)}{R_0}-1 \\ \\frac{R(T_3)}{R_0}-1\\end{bmatrix} \\enspace.$$\nBecause $M$ is square we can solve by computing $M^{-1}$ directly.", "R0=25;\nM=np.array([[T_exp[0],(T_exp[0])**2,(T_exp[0]-100)*(T_exp[0])**3],[T_exp[1],(T_exp[1])**2,(T_exp[1]-100)*(T_exp[1])**3],[T_exp[2],(T_exp[2])**2,(T_exp[2]-100)*(T_exp[2])**3]]);\np=np.array([[(R_exp[0]/R0)-1],[(R_exp[1]/R0)-1],[(R_exp[2]/R0)-1]]);\nx = np.linalg.solve(M,p) #solve linear equations system\n\nnp.set_printoptions(precision=3)\n\nprint('M')\nprint(M)\nprint('\\n')\nprint('p')\nprint(p)\nprint('\\n')\nprint('x')\nprint(x)", "We have found the coeffiecients $A$, $B$, and $C$ necessary to describe the sensor's transfer function. Now we plot it from -200 C a 600 C.", "A=x[0];B=x[1];C=x[2];\nT_range= np.arange(start = -200, stop = 601, step = 1);\nR_funT= R0*(1+A[0]*T_range+B[0]*(T_range)**2+C[0]*(T_range-100)*(T_range)**3);\nplt.plot(T_range,R_funT,T_exp[0],R_exp[0],'ro',T_exp[1],R_exp[1],'ro',T_exp[2],R_exp[2],'ro');\nplt.ylabel('Sensor resistance [Ohm]')\nplt.xlabel('Temperature [C]')\nplt.show()\n", "We see the fit is accurate. Note that our approach is also valid if we have more experimental points, in which case the system of equations $Mx=p$ is solved in the Least-Squares sense.\n\nThis page was written in the IPython Jupyter Notebook. To download the notebook click on this option at the top menu or get it from the github repo." ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
rayjustinhuang/DataAnalysisandMachineLearning
Linear Programming with OR-Tools.ipynb
mit
[ "Linear Programming with OR-Tools\nIn this notebook, we do some basic LP solving with Google's OR-Tools. Problems used will be examples in Hamdy Taha's Operations Research: An Introduction, 9th Edition, which I have in paperback.", "from ortools.linear_solver import pywraplp", "Reddy Mikks model\nGiven the following variables:\n$\\begin{aligned}\nx_1 = \\textrm{Tons of exterior paint produced daily} \\newline\nx_2 = \\textrm{Tons of interior paint produced daily}\n\\end{aligned}$\nand knowing that we want to maximize the profit, where \\$5000 is the profit from exterior paint and \\$4000 is the profit from a ton of interior paint, the Reddy Mikks model is:\n$$\\textrm{Maximize } z = 5x_1 + 4x_2$$\nsubject to\n$$6x_1 + 4x_2 \\le 24$$\n$$x_1 + 2x_2 \\le 6$$\n$$-x_1 + x_2 \\le 1$$\n$$x_2 \\le 2$$\n$$x_1, x_2 \\ge 0$$", "reddymikks = pywraplp.Solver('Reddy_Mikks', pywraplp.Solver.GLOP_LINEAR_PROGRAMMING)\n\nx1 = reddymikks.NumVar(0, reddymikks.infinity(), 'x1')\nx2 = reddymikks.NumVar(0, reddymikks.infinity(), 'x2')\n\nreddymikks.Add(6*x1 + 4*x2 <= 24)\nreddymikks.Add(x1 + 2*x2 <= 6)\nreddymikks.Add(-x1 + x2 <= 1)\nreddymikks.Add(x2 <= 2)\n\nprofit = reddymikks.Objective()\nprofit.SetCoefficient(x1, 5)\nprofit.SetCoefficient(x2, 4)\nprofit.SetMaximization()\n\nstatus = reddymikks.Solve()\n\nif status not in [reddymikks.OPTIMAL, reddymikks.FEASIBLE]:\n raise Exception('No feasible solution found')\n \nprint(\"The company should produce\",round(x1.solution_value(),2),\"tons of exterior paint\")\nprint(\"The company should produce\",round(x2.solution_value(),2),\"tons of interior paint\")\nprint(\"The optimal profit is\", profit.Value(), 'thousand USD')", "More simple problems\nA company that operates 10 hours a day manufactures two products on three sequential processes. The following data characterizes the problem:", "import pandas as pd\n\nproblemdata = pd.DataFrame({'Process 1': [10, 5], 'Process 2':[6, 20], 'Process 3':[8, 10], 'Unit profit':[20, 30]})\nproblemdata.index = ['Product 1', 'Product 2']\n\nproblemdata", "Where there are 10 hours a day dedicated to production. Process times are given in minutes per unit while profit is given in USD.\nThe optimal mix of the two products would be characterized by the following model:\n$\\begin{aligned}\nx_1 = \\textrm{Units of product 1} \\newline\nx_2 = \\textrm{Units of product 2}\n\\end{aligned}$\n$$\\textrm{Maximize } z = 20x_1 + 30x_2$$\nsubject to\n$$\\begin{array}{rcl}\n10x_1 + 5x_2 \\le 600 \\newline\n6x_1 + 20x_2 \\le 600 \\newline\n8x_1 + 10x_2 \\le 600 \\newline\nx_1, x_2 \\ge 0\n\\end{array}$$\n(we will assume that continuous solution values are acceptable for this problem)", "simpleprod = pywraplp.Solver('Simple_Production', pywraplp.Solver.GLOP_LINEAR_PROGRAMMING)\n\nx1 = simpleprod.NumVar(0, simpleprod.infinity(), 'x1')\nx2 = simpleprod.NumVar(0, simpleprod.infinity(), 'x2')\n\nfor i in problemdata.columns[:-1]:\n simpleprod.Add(problemdata.loc[problemdata.index[0], i]*x1 + problemdata.loc[problemdata.index[1], i]*x2 <= 600)\n\nprofit = simpleprod.Objective()\nprofit.SetCoefficient(x1, 20)\nprofit.SetCoefficient(x2, 30)\nprofit.SetMaximization()\n\nstatus = simpleprod.Solve()\n\nif status not in [simpleprod.OPTIMAL, simpleprod.FEASIBLE]:\n raise Exception('No feasible solution found')\n \nprint(\"The company should produce\",round(x1.solution_value(),2),\"units of product 1\")\nprint(\"The company should produce\",round(x2.solution_value(),2),\"units of product 2\")\nprint(\"The optimal profit is\", round(profit.Value(),2), 'USD')" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
RaRe-Technologies/gensim
docs/notebooks/nmslibtutorial.ipynb
lgpl-2.1
[ "Similarity Queries using Nmslib Tutorial\nThis tutorial is about using the (Non-Metric Space Library (NMSLIB)) library for similarity queries with a Word2Vec model built with gensim.\nWhy use Nmslib?\nThe current implementation for finding k nearest neighbors in a vector space in gensim has linear complexity via brute force in the number of indexed documents, although with extremely low constant factors. The retrieved results are exact, which is an overkill in many applications: approximate results retrieved in sub-linear time may be enough. Nmslib can find approximate nearest neighbors much faster.\nCompared to annoy, nmslib has more parameteres to control the build and query time and accuracy. Nmslib can achieve faster and more accurate nearest neighbors search than annoy. This figure shows a comparison between annoy and nmslib indexer with differents parameters. This shows nmslib is better than annoy.\n\nPrerequisites\nAdditional libraries needed for this tutorial:\n- nmslib\n- annoy\n- psutil\n- matplotlib\nOutline\n\nDownload Text8 Corpus\nBuild Word2Vec Model\nConstruct NmslibIndex with model & make a similarity query\nVerify & Evaluate performance\nEvaluate relationship of parameters to initialization/query time and accuracy, compared with annoy\nWork with Google's word2vec C formats", "# pip install watermark\n%reload_ext watermark\n%watermark -v -m -p gensim,numpy,scipy,psutil,matplotlib", "1. Download Text8 Corpus", "import os.path\nif not os.path.isfile('text8'):\n !wget -c http://mattmahoney.net/dc/text8.zip\n !unzip text8.zip", "Import & Set up Logging\nI'm not going to set up logging due to the verbose input displaying in notebooks, but if you want that, uncomment the lines in the cell below.", "LOGS = False\n\nif LOGS:\n import logging\n logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)", "2. Build Word2Vec Model", "from gensim.models import Word2Vec, KeyedVectors\nfrom gensim.models.word2vec import Text8Corpus\n\n# Using params from Word2Vec_FastText_Comparison\n\nparams = {\n 'alpha': 0.05,\n 'size': 100,\n 'window': 5,\n 'iter': 5,\n 'min_count': 5,\n 'sample': 1e-4,\n 'sg': 1,\n 'hs': 0,\n 'negative': 5\n}\n\nmodel = Word2Vec(Text8Corpus('text8'), **params)\nprint(model)", "See the Word2Vec tutorial for how to initialize and save this model.\nComparing the traditional implementation, Annoy and Nmslib approximation", "# Set up the model and vector that we are using in the comparison\nfrom gensim.similarities.index import AnnoyIndexer\nfrom gensim.similarities.nmslib import NmslibIndexer\n\nmodel.init_sims()\nannoy_index = AnnoyIndexer(model, 300)\nnmslib_index = NmslibIndexer(model, {'M': 100, 'indexThreadQty': 1, 'efConstruction': 100}, {'efSearch': 10})\n\n# Dry run to make sure both indices are fully in RAM\nvector = model.wv.syn0norm[0]\nprint(model.most_similar([vector], topn=5, indexer=annoy_index))\nprint(model.most_similar([vector], topn=5, indexer=nmslib_index))\nprint(model.most_similar([vector], topn=5))\n\nimport time\nimport numpy as np\n\ndef avg_query_time(annoy_index=None, queries=1000):\n \"\"\"\n Average query time of a most_similar method over 1000 random queries,\n uses annoy if given an indexer\n \"\"\"\n total_time = 0\n for _ in range(queries):\n rand_vec = model.wv.syn0norm[np.random.randint(0, len(model.wv.vocab))]\n start_time = time.clock()\n model.most_similar([rand_vec], topn=5, indexer=annoy_index)\n total_time += time.clock() - start_time\n return total_time / queries\n\nqueries = 10000\n\ngensim_time = avg_query_time(queries=queries)\nannoy_time = avg_query_time(annoy_index, queries=queries)\nnmslib_time = avg_query_time(nmslib_index, queries=queries)\nprint(\"Gensim (s/query):\\t{0:.5f}\".format(gensim_time))\nprint(\"Annoy (s/query):\\t{0:.5f}\".format(annoy_time))\nprint(\"Nmslib (s/query):\\t{0:.5f}\".format(nmslib_time))\nspeed_improvement_gensim = gensim_time / nmslib_time\nspeed_improvement_annoy = annoy_time / nmslib_time\nprint (\"\\nNmslib is {0:.2f} times faster on average on this particular run\".format(speed_improvement_gensim))\nprint (\"\\nNmslib is {0:.2f} times faster on average than annoy on this particular run\".format(speed_improvement_annoy))\n", "3. Construct Nmslib Index with model & make a similarity query\nCreating an indexer\nAn instance of NmslibIndexer needs to be created in order to use Nmslib in gensim. The NmslibIndexer class is located in gensim.similarities.nmslib\nNmslibIndexer() takes three parameters:\nmodel: A Word2Vec or Doc2Vec model\nindex_params: Parameters for building nmslib indexer. index_params effects the build time and the index size. The example is {'M': 100, 'indexThreadQty': 1, 'efConstruction': 100}. Increasing the value of M and efConstruction improves the accuracy of search. However this also leads to longer indexing times. indexThreadQty is the number of thread. \nquery_time_params: Parameters for querying on nmslib indexer. query_time_params effects the query time and the search accuracy. The example is {'efSearch': 100}. A larger efSearch will give more accurate results, but larger query time. \nMore information can be found here. The relationship between parameters, build/query time, and accuracy will be investigated later in the tutorial. \nNow that we are ready to make a query, lets find the top 5 most similar words to \"science\" in the Text8 corpus. To make a similarity query we call Word2Vec.most_similar like we would traditionally, but with an added parameter, indexer. The only supported indexerers in gensim as of now are Annoy and Nmslib.", "# Building nmslib indexer\nnmslib_index = NmslibIndexer(model, {'M': 100, 'indexThreadQty': 1, 'efConstruction': 100}, {'efSearch': 10})\n# Derive the vector for the word \"science\" in our model\nvector = model[\"science\"]\n# The instance of AnnoyIndexer we just created is passed \napproximate_neighbors = model.most_similar([vector], topn=11, indexer=nmslib_index)\n\n# Neatly print the approximate_neighbors and their corresponding cosine similarity values\nprint(\"Approximate Neighbors\")\nfor neighbor in approximate_neighbors:\n print(neighbor)\n\nnormal_neighbors = model.most_similar([vector], topn=11)\nprint(\"\\nNormal (not nmslib-indexed) Neighbors\")\nfor neighbor in normal_neighbors:\n print(neighbor)", "Analyzing the results\nThe closer the cosine similarity of a vector is to 1, the more similar that word is to our query, which was the vector for \"science\". In this case the results are almostly same.\n4. Verify & Evaluate performance\nPersisting Indexes\nYou can save and load your indexes from/to disk to prevent having to construct them each time. This will create two files on disk, fname and fname.d. Both files are needed to correctly restore all attributes.", "import os\n\nfname = '/tmp/mymodel.index'\n\n# Persist index to disk\nnmslib_index.save(fname)\n\n# Load index back\nif os.path.exists(fname):\n nmslib_index2 = NmslibIndexer.load(fname)\n nmslib_index2.model = model\n\n# Results should be identical to above\nvector = model[\"science\"]\napproximate_neighbors2 = model.most_similar([vector], topn=11, indexer=nmslib_index2)\nfor neighbor in approximate_neighbors2:\n print(neighbor)\n \nassert approximate_neighbors == approximate_neighbors2", "Be sure to use the same model at load that was used originally, otherwise you will get unexpected behaviors.\nSave memory by memory-mapping indices saved to disk\nNmslib library has a useful feature that indices can be memory-mapped from disk. It saves memory when the same index is used by several processes.\nBelow are two snippets of code. First one has a separate index for each process. The second snipped shares the index between two processes via memory-mapping. The second example uses less total RAM as it is shared.", "# Remove verbosity from code below (if logging active)\n\nif LOGS:\n logging.disable(logging.CRITICAL)\n\nfrom multiprocessing import Process\nimport psutil", "Bad Example: Two processes load the Word2vec model from disk and create there own Nmslib indices from that model.", "%%time\n\nmodel.save('/tmp/mymodel.pkl')\n\ndef f(process_id):\n print('Process Id: {}'.format(os.getpid()))\n process = psutil.Process(os.getpid())\n new_model = Word2Vec.load('/tmp/mymodel.pkl')\n vector = new_model[\"science\"]\n nmslib_index = NmslibIndexer(new_model, {'M': 100, 'indexThreadQty': 1, 'efConstruction': 100}, {'efSearch': 10})\n approximate_neighbors = new_model.most_similar([vector], topn=5, indexer=nmslib_index)\n print('\\nMemory used by process {}: {}\\n---'.format(os.getpid(), process.memory_info()))\n\n# Creating and running two parallel process to share the same index file.\np1 = Process(target=f, args=('1',))\np1.start()\np1.join()\np2 = Process(target=f, args=('2',))\np2.start()\np2.join()", "Good example. Two processes load both the Word2vec model and index from disk and memory-map the index", "%%time\n\nmodel.save('/tmp/mymodel.pkl')\n\ndef f(process_id):\n print('Process Id: {}'.format(os.getpid()))\n process = psutil.Process(os.getpid())\n new_model = Word2Vec.load('/tmp/mymodel.pkl')\n vector = new_model[\"science\"]\n nmslib_index = NmslibIndexer.load('/tmp/mymodel.index')\n nmslib_index.model = new_model\n approximate_neighbors = new_model.most_similar([vector], topn=5, indexer=nmslib_index)\n print('\\nMemory used by process {}: {}\\n---'.format(os.getpid(), process.memory_info()))\n\n# Creating and running two parallel process to share the same index file.\np1 = Process(target=f, args=('1',))\np1.start()\np1.join()\np2 = Process(target=f, args=('2',))\np2.start()\np2.join()", "5. Evaluate relationship of parameters to initialization/query time and accuracy, compared with annoy", "import matplotlib.pyplot as plt\n%matplotlib inline", "Build dataset of Initialization times and accuracy measures", "exact_results = [element[0] for element in model.most_similar([model.wv.syn0norm[0]], topn=100)]\n\n# For calculating query time\nqueries = 1000\n\ndef create_evaluation_graph(x_values, y_values_init, y_values_accuracy, y_values_query, param_name):\n plt.figure(1, figsize=(12, 6))\n plt.subplot(231)\n plt.plot(x_values, y_values_init)\n plt.title(\"{} vs initalization time\".format(param_name))\n plt.ylabel(\"Initialization time (s)\")\n plt.xlabel(param_name)\n plt.subplot(232)\n plt.plot(x_values, y_values_accuracy)\n plt.title(\"{} vs accuracy\".format(param_name))\n plt.ylabel(\"% accuracy\")\n plt.xlabel(param_name)\n plt.tight_layout()\n plt.subplot(233)\n plt.plot(y_values_init, y_values_accuracy)\n plt.title(\"Initialization time vs accuracy\")\n plt.ylabel(\"% accuracy\")\n plt.xlabel(\"Initialization time (s)\")\n plt.tight_layout()\n plt.subplot(234)\n plt.plot(x_values, y_values_query)\n plt.title(\"{} vs query time\".format(param_name))\n plt.ylabel(\"query time\")\n plt.xlabel(param_name)\n plt.tight_layout()\n plt.subplot(235)\n plt.plot(y_values_query, y_values_accuracy)\n plt.title(\"query time vs accuracy\")\n plt.ylabel(\"% accuracy\")\n plt.xlabel(\"query time (s)\")\n plt.tight_layout()\n plt.show()\n\ndef evaluate_nmslib_performance(parameter, is_parameter_query, parameter_start, parameter_end, parameter_step):\n nmslib_x_values = []\n nmslib_y_values_init = []\n nmslib_y_values_accuracy = []\n nmslib_y_values_query = []\n index_params = {'M': 100, 'indexThreadQty': 10, 'efConstruction': 100, 'post': 0}\n query_params = {'efSearch': 100}\n \n for x in range(parameter_start, parameter_end, parameter_step):\n nmslib_x_values.append(x)\n start_time = time.time()\n if is_parameter_query:\n query_params[parameter] = x\n else:\n index_params[parameter] = x\n nmslib_index = NmslibIndexer(model\n , index_params\n , query_params)\n nmslib_y_values_init.append(time.time() - start_time)\n approximate_results = model.most_similar([model.wv.syn0norm[0]], topn=100, indexer=nmslib_index)\n top_words = [result[0] for result in approximate_results]\n nmslib_y_values_accuracy.append(len(set(top_words).intersection(exact_results)))\n nmslib_y_values_query.append(avg_query_time(nmslib_index, queries=queries))\n create_evaluation_graph(nmslib_x_values,\n nmslib_y_values_init, \n nmslib_y_values_accuracy, \n nmslib_y_values_query, \n parameter)\n\n# Evaluate nmslib indexer, changing the parameter M\nevaluate_nmslib_performance(\"M\", False, 50, 401, 50)\n\n# Evaluate nmslib indexer, changing the parameter efConstruction\nevaluate_nmslib_performance(\"efConstruction\", False, 50, 1001, 100)\n\n# Evaluate nmslib indexer, changing the parameter efSearch\nevaluate_nmslib_performance(\"efSearch\", True, 50, 401, 100)\n\n# Evaluate annoy indexer, changing the parameter num_tree\nannoy_x_values = []\nannoy_y_values_init = []\nannoy_y_values_accuracy = []\nannoy_y_values_query = []\n\nfor x in range(100, 401, 50):\n annoy_x_values.append(x)\n start_time = time.time()\n annoy_index = AnnoyIndexer(model, x)\n annoy_y_values_init.append(time.time() - start_time)\n approximate_results = model.most_similar([model.wv.syn0norm[0]], topn=100, indexer=annoy_index)\n top_words = [result[0] for result in approximate_results]\n annoy_y_values_accuracy.append(len(set(top_words).intersection(exact_results)))\n annoy_y_values_query.append(avg_query_time(annoy_index, queries=queries))\ncreate_evaluation_graph(annoy_x_values,\n annoy_y_values_init, \n annoy_y_values_accuracy, \n annoy_y_values_query, \n \"num_tree\")\n\n# nmslib indexer changing the parameter M, efConstruction, efSearch\nnmslib_y_values_init = []\nnmslib_y_values_accuracy = []\nnmslib_y_values_query = []\n\nfor M in [100, 200]:\n for efConstruction in [100, 200]:\n for efSearch in [100, 200]:\n start_time = time.time()\n nmslib_index = NmslibIndexer(model, \n {'M': M, 'indexThreadQty': 10, 'efConstruction': efConstruction, 'post': 0},\n {'efSearch': efSearch})\n nmslib_y_values_init.append(time.time() - start_time)\n approximate_results = model.most_similar([model.wv.syn0norm[0]], topn=100, indexer=nmslib_index)\n top_words = [result[0] for result in approximate_results]\n nmslib_y_values_accuracy.append(len(set(top_words).intersection(exact_results)))\n nmslib_y_values_query.append(avg_query_time(nmslib_index, queries=queries))\n\n\n# Make a comparison between annoy and nmslib indexer\nplt.figure(1, figsize=(12, 6))\nplt.subplot(121)\nplt.scatter(nmslib_y_values_init, nmslib_y_values_accuracy, label=\"nmslib\", color='r', marker='o')\nplt.scatter(annoy_y_values_init, annoy_y_values_accuracy, label=\"annoy\", color='b', marker='x')\nplt.legend()\nplt.title(\"Initialization time vs accuracy. Upper left is better.\")\nplt.ylabel(\"% accuracy\")\nplt.xlabel(\"Initialization time (s)\")\nplt.subplot(122)\nplt.scatter(nmslib_y_values_query, nmslib_y_values_accuracy, label=\"nmslib\", color='r', marker='o')\nplt.scatter(annoy_y_values_query, annoy_y_values_accuracy, label=\"annoy\", color='b', marker='x')\nplt.legend()\nplt.title(\"Query time vs accuracy. Upper left is better.\")\nplt.ylabel(\"% accuracy\")\nplt.xlabel(\"Query time (s)\")\nplt.xlim(min(nmslib_y_values_query+annoy_y_values_query), max(nmslib_y_values_query+annoy_y_values_query))\nplt.tight_layout()\nplt.show()", "6. Work with Google word2vec files\nOur model can be exported to a word2vec C format. There is a binary and a plain text word2vec format. Both can be read with a variety of other software, or imported back into gensim as a KeyedVectors object.", "# To export our model as text\nmodel.wv.save_word2vec_format('/tmp/vectors.txt', binary=False)\n\nfrom smart_open import open\n# View the first 3 lines of the exported file\n\n# The first line has the total number of entries and the vector dimension count. \n# The next lines have a key (a string) followed by its vector.\nwith open('/tmp/vectors.txt') as myfile:\n for i in range(3):\n print(myfile.readline().strip())\n\n# To import a word2vec text model\nwv = KeyedVectors.load_word2vec_format('/tmp/vectors.txt', binary=False)\n\n# To export our model as binary\nmodel.wv.save_word2vec_format('/tmp/vectors.bin', binary=True)\n\n# To import a word2vec binary model\nwv = KeyedVectors.load_word2vec_format('/tmp/vectors.bin', binary=True)\n\n# To create and save Nmslib Index from a loaded `KeyedVectors` object \nnmslib_index = NmslibIndexer(wv, \n {'M': 100, 'indexThreadQty': 1, 'efConstruction': 100}, {'efSearch': 100})\nnmslib_index.save('/tmp/mymodel.index')\n\n# Load and test the saved word vectors and saved nmslib index\nwv = KeyedVectors.load_word2vec_format('/tmp/vectors.bin', binary=True)\nnmslib_index = NmslibIndexer.load('/tmp/mymodel.index')\nnmslib_index.model = wv\n\nvector = wv[\"cat\"]\napproximate_neighbors = wv.most_similar([vector], topn=11, indexer=nmslib_index)\n# Neatly print the approximate_neighbors and their corresponding cosine similarity values\nprint(\"Approximate Neighbors\")\nfor neighbor in approximate_neighbors:\n print(neighbor)\n\nnormal_neighbors = wv.most_similar([vector], topn=11)\nprint(\"\\nNormal (not Nmslib-indexed) Neighbors\")\nfor neighbor in normal_neighbors:\n print(neighbor)", "Recap\nIn this notebook we used the Nmslib module to build an indexed approximation of our word embeddings. To do so, we did the following steps:\n1. Download Text8 Corpus\n2. Build Word2Vec Model\n3. Construct NmslibIndex with model & make a similarity query\n4. Verify & Evaluate performance\n5. Evaluate relationship of parameters to initialization/query time and accuracy, compared with annoy\n6. Work with Google's word2vec C formats" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
dipanjanS/BerkeleyX-CS100.1x-Big-Data-with-Apache-Spark
Week 2 - Introduction to Apache Spark/lab1_word_count_student.ipynb
mit
[ "+ \nWord Count Lab: Building a word count application\nThis lab will build on the techniques covered in the Spark tutorial to develop a simple word count application. The volume of unstructured text in existence is growing dramatically, and Spark is an excellent tool for analyzing this type of data. In this lab, we will write code that calculates the most common words in the Complete Works of William Shakespeare retrieved from Project Gutenberg. This could also be scaled to find the most common words on the Internet.\n During this lab we will cover: \nPart 1: Creating a base RDD and pair RDDs\nPart 2: Counting with pair RDDs\nPart 3: Finding unique words and a mean value\nPart 4: Apply word count to a file\nNote that, for reference, you can look up the details of the relevant methods in Spark's Python API\n Part 1: Creating a base RDD and pair RDDs \nIn this part of the lab, we will explore creating a base RDD with parallelize and using pair RDDs to count words.\n (1a) Create a base RDD \nWe'll start by generating a base RDD by using a Python list and the sc.parallelize method. Then we'll print out the type of the base RDD.", "wordsList = ['cat', 'elephant', 'rat', 'rat', 'cat']\nwordsRDD = sc.parallelize(wordsList, 4)\n# Print out the type of wordsRDD\nprint type(wordsRDD)", "(1b) Pluralize and test \nLet's use a map() transformation to add the letter 's' to each string in the base RDD we just created. We'll define a Python function that returns the word with an 's' at the end of the word. Please replace &lt;FILL IN&gt; with your solution. If you have trouble, the next cell has the solution. After you have defined makePlural you can run the third cell which contains a test. If you implementation is correct it will print 1 test passed.\nThis is the general form that exercises will take, except that no example solution will be provided. Exercises will include an explanation of what is expected, followed by code cells where one cell will have one or more &lt;FILL IN&gt; sections. The cell that needs to be modified will have # TODO: Replace &lt;FILL IN&gt; with appropriate code on its first line. Once the &lt;FILL IN&gt; sections are updated and the code is run, the test cell can then be run to verify the correctness of your solution. The last code cell before the next markdown section will contain the tests.", "# TODO: Replace <FILL IN> with appropriate code\ndef makePlural(word):\n \"\"\"Adds an 's' to `word`.\n\n Note:\n This is a simple function that only adds an 's'. No attempt is made to follow proper\n pluralization rules.\n\n Args:\n word (str): A string.\n\n Returns:\n str: A string with 's' added to it.\n \"\"\"\n return word + 's'\n\nprint makePlural('cat')\n\n# One way of completing the function\ndef makePlural(word):\n return word + 's'\n\nprint makePlural('cat')\n\n# Load in the testing code and check to see if your answer is correct\n# If incorrect it will report back '1 test failed' for each failed test\n# Make sure to rerun any cell you change before trying the test again\nfrom test_helper import Test\n# TEST Pluralize and test (1b)\nTest.assertEquals(makePlural('rat'), 'rats', 'incorrect result: makePlural does not add an s')", "(1c) Apply makePlural to the base RDD \nNow pass each item in the base RDD into a map() transformation that applies the makePlural() function to each element. And then call the collect() action to see the transformed RDD.", "# TODO: Replace <FILL IN> with appropriate code\npluralRDD = wordsRDD.map(makePlural)\nprint pluralRDD.collect()\n\n# TEST Apply makePlural to the base RDD(1c)\nTest.assertEquals(pluralRDD.collect(), ['cats', 'elephants', 'rats', 'rats', 'cats'],\n 'incorrect values for pluralRDD')", "(1d) Pass a lambda function to map \nLet's create the same RDD using a lambda function.", "# TODO: Replace <FILL IN> with appropriate code\npluralLambdaRDD = wordsRDD.map(lambda word: word + 's')\nprint pluralLambdaRDD.collect()\n\n# TEST Pass a lambda function to map (1d)\nTest.assertEquals(pluralLambdaRDD.collect(), ['cats', 'elephants', 'rats', 'rats', 'cats'],\n 'incorrect values for pluralLambdaRDD (1d)')", "(1e) Length of each word \nNow use map() and a lambda function to return the number of characters in each word. We'll collect this result directly into a variable.", "# TODO: Replace <FILL IN> with appropriate code\npluralLengths = (pluralRDD\n .map(lambda word: len(word))\n .collect())\nprint pluralLengths\n\n# TEST Length of each word (1e)\nTest.assertEquals(pluralLengths, [4, 9, 4, 4, 4],\n 'incorrect values for pluralLengths')", "(1f) Pair RDDs \nThe next step in writing our word counting program is to create a new type of RDD, called a pair RDD. A pair RDD is an RDD where each element is a pair tuple (k, v) where k is the key and v is the value. In this example, we will create a pair consisting of ('&lt;word&gt;', 1) for each word element in the RDD.\nWe can create the pair RDD using the map() transformation with a lambda() function to create a new RDD.", "# TODO: Replace <FILL IN> with appropriate code\nwordPairs = wordsRDD.map(lambda word: (word, 1))\nprint wordPairs.collect()\n\n# TEST Pair RDDs (1f)\nTest.assertEquals(wordPairs.collect(),\n [('cat', 1), ('elephant', 1), ('rat', 1), ('rat', 1), ('cat', 1)],\n 'incorrect value for wordPairs')", "Part 2: Counting with pair RDDs \nNow, let's count the number of times a particular word appears in the RDD. There are multiple ways to perform the counting, but some are much less efficient than others.\nA naive approach would be to collect() all of the elements and count them in the driver program. While this approach could work for small datasets, we want an approach that will work for any size dataset including terabyte- or petabyte-sized datasets. In addition, performing all of the work in the driver program is slower than performing it in parallel in the workers. For these reasons, we will use data parallel operations.\n (2a) groupByKey() approach \nAn approach you might first consider (we'll see shortly that there are better ways) is based on using the groupByKey() transformation. As the name implies, the groupByKey() transformation groups all the elements of the RDD with the same key into a single list in one of the partitions. There are two problems with using groupByKey():\n\n\nThe operation requires a lot of data movement to move all the values into the appropriate partitions.\n\n\nThe lists can be very large. Consider a word count of English Wikipedia: the lists for common words (e.g., the, a, etc.) would be huge and could exhaust the available memory in a worker.\n\n\nUse groupByKey() to generate a pair RDD of type ('word', iterator).", "# TODO: Replace <FILL IN> with appropriate code\n# Note that groupByKey requires no parameters\nwordsGrouped = wordPairs.groupByKey()\nfor key, value in wordsGrouped.collect():\n print '{0}: {1}'.format(key, list(value))\n\n# TEST groupByKey() approach (2a)\nTest.assertEquals(sorted(wordsGrouped.mapValues(lambda x: list(x)).collect()),\n [('cat', [1, 1]), ('elephant', [1]), ('rat', [1, 1])],\n 'incorrect value for wordsGrouped')", "(2b) Use groupByKey() to obtain the counts \nUsing the groupByKey() transformation creates an RDD containing 3 elements, each of which is a pair of a word and a Python iterator.\nNow sum the iterator using a map() transformation. The result should be a pair RDD consisting of (word, count) pairs.", "# TODO: Replace <FILL IN> with appropriate code\nwordCountsGrouped = wordsGrouped.map(lambda (k,v): (k, sum(v)))\nprint wordCountsGrouped.collect()\n\n# TEST Use groupByKey() to obtain the counts (2b)\nTest.assertEquals(sorted(wordCountsGrouped.collect()),\n [('cat', 2), ('elephant', 1), ('rat', 2)],\n 'incorrect value for wordCountsGrouped')", "(2c) Counting using reduceByKey \nA better approach is to start from the pair RDD and then use the reduceByKey() transformation to create a new pair RDD. The reduceByKey() transformation gathers together pairs that have the same key and applies the function provided to two values at a time, iteratively reducing all of the values to a single value. reduceByKey() operates by applying the function first within each partition on a per-key basis and then across the partitions, allowing it to scale efficiently to large datasets.", "# TODO: Replace <FILL IN> with appropriate code\n# Note that reduceByKey takes in a function that accepts two values and returns a single value\n\nwordCounts = wordPairs.reduceByKey(lambda a,b: a+b)\nprint wordCounts.collect()\n\n# TEST Counting using reduceByKey (2c)\nTest.assertEquals(sorted(wordCounts.collect()), [('cat', 2), ('elephant', 1), ('rat', 2)],\n 'incorrect value for wordCounts')", "(2d) All together \nThe expert version of the code performs the map() to pair RDD, reduceByKey() transformation, and collect in one statement.", "# TODO: Replace <FILL IN> with appropriate code\nwordCountsCollected = (wordsRDD\n .map(lambda word: (word, 1))\n .reduceByKey(lambda a,b: a+b)\n .collect())\nprint wordCountsCollected\n\n# TEST All together (2d)\nTest.assertEquals(sorted(wordCountsCollected), [('cat', 2), ('elephant', 1), ('rat', 2)],\n 'incorrect value for wordCountsCollected')", "Part 3: Finding unique words and a mean value \n (3a) Unique words \nCalculate the number of unique words in wordsRDD. You can use other RDDs that you have already created to make this easier.", "# TODO: Replace <FILL IN> with appropriate code\nuniqueWords = wordsRDD.map(lambda word: (word, 1)).distinct().count()\nprint uniqueWords\n\n# TEST Unique words (3a)\nTest.assertEquals(uniqueWords, 3, 'incorrect count of uniqueWords')", "(3b) Mean using reduce \nFind the mean number of words per unique word in wordCounts.\nUse a reduce() action to sum the counts in wordCounts and then divide by the number of unique words. First map() the pair RDD wordCounts, which consists of (key, value) pairs, to an RDD of values.", "# TODO: Replace <FILL IN> with appropriate code\nfrom operator import add\n\ntotalCount = (wordCounts\n .map(lambda (a,b): b)\n .reduce(add))\naverage = totalCount / float(wordCounts.distinct().count())\nprint totalCount\nprint round(average, 2)\n\n# TEST Mean using reduce (3b)\nTest.assertEquals(round(average, 2), 1.67, 'incorrect value of average')", "Part 4: Apply word count to a file \nIn this section we will finish developing our word count application. We'll have to build the wordCount function, deal with real world problems like capitalization and punctuation, load in our data source, and compute the word count on the new data.\n (4a) wordCount function \nFirst, define a function for word counting. You should reuse the techniques that have been covered in earlier parts of this lab. This function should take in an RDD that is a list of words like wordsRDD and return a pair RDD that has all of the words and their associated counts.", "# TODO: Replace <FILL IN> with appropriate code\ndef wordCount(wordListRDD):\n \"\"\"Creates a pair RDD with word counts from an RDD of words.\n\n Args:\n wordListRDD (RDD of str): An RDD consisting of words.\n\n Returns:\n RDD of (str, int): An RDD consisting of (word, count) tuples.\n \"\"\"\n return (wordListRDD\n .map(lambda a : (a,1))\n .reduceByKey(lambda a,b: a+b))\nprint wordCount(wordsRDD).collect()\n\n# TEST wordCount function (4a)\nTest.assertEquals(sorted(wordCount(wordsRDD).collect()),\n [('cat', 2), ('elephant', 1), ('rat', 2)],\n 'incorrect definition for wordCount function')", "(4b) Capitalization and punctuation \nReal world files are more complicated than the data we have been using in this lab. Some of the issues we have to address are:\n\n\nWords should be counted independent of their capitialization (e.g., Spark and spark should be counted as the same word).\n\n\nAll punctuation should be removed.\n\n\nAny leading or trailing spaces on a line should be removed.\n\n\nDefine the function removePunctuation that converts all text to lower case, removes any punctuation, and removes leading and trailing spaces. Use the Python re module to remove any text that is not a letter, number, or space. Reading help(re.sub) might be useful.", "# TODO: Replace <FILL IN> with appropriate code\nimport re\ndef removePunctuation(text):\n \"\"\"Removes punctuation, changes to lower case, and strips leading and trailing spaces.\n\n Note:\n Only spaces, letters, and numbers should be retained. Other characters should should be\n eliminated (e.g. it's becomes its). Leading and trailing spaces should be removed after\n punctuation is removed.\n\n Args:\n text (str): A string.\n\n Returns:\n str: The cleaned up string.\n \"\"\"\n return re.sub(\"[^a-zA-Z0-9 ]\", \"\", text.strip(\" \").lower())\nprint removePunctuation('Hi, you!')\nprint removePunctuation(' No under_score!')\n\n# TEST Capitalization and punctuation (4b)\nTest.assertEquals(removePunctuation(\" The Elephant's 4 cats. \"),\n 'the elephants 4 cats',\n 'incorrect definition for removePunctuation function')", "(4c) Load a text file \nFor the next part of this lab, we will use the Complete Works of William Shakespeare from Project Gutenberg. To convert a text file into an RDD, we use the SparkContext.textFile() method. We also apply the recently defined removePunctuation() function using a map() transformation to strip out the punctuation and change all text to lowercase. Since the file is large we use take(15), so that we only print 15 lines.", "# Just run this code\nimport os.path\nbaseDir = os.path.join('data')\ninputPath = os.path.join('cs100', 'lab1', 'shakespeare.txt')\nfileName = os.path.join(baseDir, inputPath)\n\nshakespeareRDD = (sc\n .textFile(fileName, 8)\n .map(removePunctuation))\nprint '\\n'.join(shakespeareRDD\n .zipWithIndex() # to (line, lineNum)\n .map(lambda (l, num): '{0}: {1}'.format(num, l)) # to 'lineNum: line'\n .take(15))", "(4d) Words from lines \nBefore we can use the wordcount() function, we have to address two issues with the format of the RDD:\n\n\nThe first issue is that that we need to split each line by its spaces.\n\n\nThe second issue is we need to filter out empty lines.\n\n\nApply a transformation that will split each element of the RDD by its spaces. For each element of the RDD, you should apply Python's string split() function. You might think that a map() transformation is the way to do this, but think about what the result of the split() function will be.", "# TODO: Replace <FILL IN> with appropriate code\nshakespeareWordsRDD = shakespeareRDD.flatMap(lambda a: a.split(\" \"))\nshakespeareWordCount = shakespeareWordsRDD.count()\nprint shakespeareWordsRDD.top(5)\nprint shakespeareWordCount\n\n# TEST Words from lines (4d)\n# This test allows for leading spaces to be removed either before or after\n# punctuation is removed.\nTest.assertTrue(shakespeareWordCount == 927631 or shakespeareWordCount == 928908,\n 'incorrect value for shakespeareWordCount')\nTest.assertEquals(shakespeareWordsRDD.top(5),\n [u'zwaggerd', u'zounds', u'zounds', u'zounds', u'zounds'],\n 'incorrect value for shakespeareWordsRDD')", "(4e) Remove empty elements \nThe next step is to filter out the empty elements. Remove all entries where the word is ''.", "# TODO: Replace <FILL IN> with appropriate code\nshakeWordsRDD = shakespeareWordsRDD.filter(lambda word: len(word) > 0)\nshakeWordCount = shakeWordsRDD.count()\nprint shakeWordCount\n\n# TEST Remove empty elements (4e)\nTest.assertEquals(shakeWordCount, 882996, 'incorrect value for shakeWordCount')", "(4f) Count the words \nWe now have an RDD that is only words. Next, let's apply the wordCount() function to produce a list of word counts. We can view the top 15 words by using the takeOrdered() action; however, since the elements of the RDD are pairs, we need a custom sort function that sorts using the value part of the pair.\nYou'll notice that many of the words are common English words. These are called stopwords. In a later lab, we will see how to eliminate them from the results.\nUse the wordCount() function and takeOrdered() to obtain the fifteen most common words and their counts.", "# TODO: Replace <FILL IN> with appropriate code\ntop15WordsAndCounts = wordCount(shakeWordsRDD).takeOrdered(15, lambda (a,b): -b)\nprint '\\n'.join(map(lambda (w, c): '{0}: {1}'.format(w, c), top15WordsAndCounts))\n\n# TEST Count the words (4f)\nTest.assertEquals(top15WordsAndCounts,\n [(u'the', 27361), (u'and', 26028), (u'i', 20681), (u'to', 19150), (u'of', 17463),\n (u'a', 14593), (u'you', 13615), (u'my', 12481), (u'in', 10956), (u'that', 10890),\n (u'is', 9134), (u'not', 8497), (u'with', 7771), (u'me', 7769), (u'it', 7678)],\n 'incorrect value for top15WordsAndCounts')" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
guillaume-chevalier/LSTM-Human-Activity-Recognition
LSTM.ipynb
mit
[ "<a title=\"Activity Recognition\" href=\"https://github.com/guillaume-chevalier/LSTM-Human-Activity-Recognition\" > LSTMs for Human Activity Recognition</a>\nHuman Activity Recognition (HAR) using smartphones dataset and an LSTM RNN. Classifying the type of movement amongst six categories:\n- WALKING,\n- WALKING_UPSTAIRS,\n- WALKING_DOWNSTAIRS,\n- SITTING,\n- STANDING,\n- LAYING.\nCompared to a classical approach, using a Recurrent Neural Networks (RNN) with Long Short-Term Memory cells (LSTMs) require no or almost no feature engineering. Data can be fed directly into the neural network who acts like a black box, modeling the problem correctly. Other research on the activity recognition dataset can use a big amount of feature engineering, which is rather a signal processing approach combined with classical data science techniques. The approach here is rather very simple in terms of how much was the data preprocessed. \nLet's use Google's neat Deep Learning library, TensorFlow, demonstrating the usage of an LSTM, a type of Artificial Neural Network that can process sequential data / time series. \nVideo dataset overview\nFollow this link to see a video of the 6 activities recorded in the experiment with one of the participants:\n<p align=\"center\">\n <a href=\"http://www.youtube.com/watch?feature=player_embedded&v=XOEN9W05_4A\n\" target=\"_blank\"><img src=\"http://img.youtube.com/vi/XOEN9W05_4A/0.jpg\" \nalt=\"Video of the experiment\" width=\"400\" height=\"300\" border=\"10\" /></a>\n <a href=\"https://youtu.be/XOEN9W05_4A\"><center>[Watch video]</center></a>\n</p>\n\nDetails about the input data\nI will be using an LSTM on the data to learn (as a cellphone attached on the waist) to recognise the type of activity that the user is doing. The dataset's description goes like this:\n\nThe sensor signals (accelerometer and gyroscope) were pre-processed by applying noise filters and then sampled in fixed-width sliding windows of 2.56 sec and 50% overlap (128 readings/window). The sensor acceleration signal, which has gravitational and body motion components, was separated using a Butterworth low-pass filter into body acceleration and gravity. The gravitational force is assumed to have only low frequency components, therefore a filter with 0.3 Hz cutoff frequency was used. \n\nThat said, I will use the almost raw data: only the gravity effect has been filtered out of the accelerometer as a preprocessing step for another 3D feature as an input to help learning. If you'd ever want to extract the gravity by yourself, you could fork my code on using a Butterworth Low-Pass Filter (LPF) in Python and edit it to have the right cutoff frequency of 0.3 Hz which is a good frequency for activity recognition from body sensors.\nWhat is an RNN?\nAs explained in this article, an RNN takes many input vectors to process them and output other vectors. It can be roughly pictured like in the image below, imagining each rectangle has a vectorial depth and other special hidden quirks in the image below. In our case, the \"many to one\" architecture is used: we accept time series of feature vectors (one vector per time step) to convert them to a probability vector at the output for classification. Note that a \"one to one\" architecture would be a standard feedforward neural network. \n\n<a href=\"https://www.dl-rnn-course.neuraxio.com/start?utm_source=github_lstm\" ><img src=\"https://raw.githubusercontent.com/Neuraxio/Machine-Learning-Figures/master/rnn-architectures.png\" /></a>\nLearn more on RNNs\n\nWhat is an LSTM?\nAn LSTM is an improved RNN. It is more complex, but easier to train, avoiding what is called the vanishing gradient problem. I recommend this course for you to learn more on LSTMs.\n\nLearn more on LSTMs\n\nResults\nScroll on! Nice visuals awaits.", "# All Includes\n\nimport numpy as np\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport tensorflow as tf # Version 1.0.0 (some previous versions are used in past commits)\nfrom sklearn import metrics\n\nimport os\n\n# Useful Constants\n\n# Those are separate normalised input features for the neural network\nINPUT_SIGNAL_TYPES = [\n \"body_acc_x_\",\n \"body_acc_y_\",\n \"body_acc_z_\",\n \"body_gyro_x_\",\n \"body_gyro_y_\",\n \"body_gyro_z_\",\n \"total_acc_x_\",\n \"total_acc_y_\",\n \"total_acc_z_\"\n]\n\n# Output classes to learn how to classify\nLABELS = [\n \"WALKING\", \n \"WALKING_UPSTAIRS\", \n \"WALKING_DOWNSTAIRS\", \n \"SITTING\", \n \"STANDING\", \n \"LAYING\"\n] \n", "Let's start by downloading the data:", "# Note: Linux bash commands start with a \"!\" inside those \"ipython notebook\" cells\n\nDATA_PATH = \"data/\"\n\n!pwd && ls\nos.chdir(DATA_PATH)\n!pwd && ls\n\n!python download_dataset.py\n\n!pwd && ls\nos.chdir(\"..\")\n!pwd && ls\n\nDATASET_PATH = DATA_PATH + \"UCI HAR Dataset/\"\nprint(\"\\n\" + \"Dataset is now located at: \" + DATASET_PATH)\n", "Preparing dataset:", "TRAIN = \"train/\"\nTEST = \"test/\"\n\n\n# Load \"X\" (the neural network's training and testing inputs)\n\ndef load_X(X_signals_paths):\n X_signals = []\n \n for signal_type_path in X_signals_paths:\n file = open(signal_type_path, 'r')\n # Read dataset from disk, dealing with text files' syntax\n X_signals.append(\n [np.array(serie, dtype=np.float32) for serie in [\n row.replace(' ', ' ').strip().split(' ') for row in file\n ]]\n )\n file.close()\n \n return np.transpose(np.array(X_signals), (1, 2, 0))\n\nX_train_signals_paths = [\n DATASET_PATH + TRAIN + \"Inertial Signals/\" + signal + \"train.txt\" for signal in INPUT_SIGNAL_TYPES\n]\nX_test_signals_paths = [\n DATASET_PATH + TEST + \"Inertial Signals/\" + signal + \"test.txt\" for signal in INPUT_SIGNAL_TYPES\n]\n\nX_train = load_X(X_train_signals_paths)\nX_test = load_X(X_test_signals_paths)\n\n\n# Load \"y\" (the neural network's training and testing outputs)\n\ndef load_y(y_path):\n file = open(y_path, 'r')\n # Read dataset from disk, dealing with text file's syntax\n y_ = np.array(\n [elem for elem in [\n row.replace(' ', ' ').strip().split(' ') for row in file\n ]], \n dtype=np.int32\n )\n file.close()\n \n # Substract 1 to each output class for friendly 0-based indexing \n return y_ - 1\n\ny_train_path = DATASET_PATH + TRAIN + \"y_train.txt\"\ny_test_path = DATASET_PATH + TEST + \"y_test.txt\"\n\ny_train = load_y(y_train_path)\ny_test = load_y(y_test_path)\n", "Additionnal Parameters:\nHere are some core parameter definitions for the training. \nFor example, the whole neural network's structure could be summarised by enumerating those parameters and the fact that two LSTM are used one on top of another (stacked) output-to-input as hidden layers through time steps.", "# Input Data \n\ntraining_data_count = len(X_train) # 7352 training series (with 50% overlap between each serie)\ntest_data_count = len(X_test) # 2947 testing series\nn_steps = len(X_train[0]) # 128 timesteps per series\nn_input = len(X_train[0][0]) # 9 input parameters per timestep\n\n\n# LSTM Neural Network's internal structure\n\nn_hidden = 32 # Hidden layer num of features\nn_classes = 6 # Total classes (should go up, or should go down)\n\n\n# Training \n\nlearning_rate = 0.0025\nlambda_loss_amount = 0.0015\ntraining_iters = training_data_count * 300 # Loop 300 times on the dataset\nbatch_size = 1500\ndisplay_iter = 30000 # To show test set accuracy during training\n\n\n# Some debugging info\n\nprint(\"Some useful info to get an insight on dataset's shape and normalisation:\")\nprint(\"(X shape, y shape, every X's mean, every X's standard deviation)\")\nprint(X_test.shape, y_test.shape, np.mean(X_test), np.std(X_test))\nprint(\"The dataset is therefore properly normalised, as expected, but not yet one-hot encoded.\")\n", "Utility functions for training:", "def LSTM_RNN(_X, _weights, _biases):\n # Function returns a tensorflow LSTM (RNN) artificial neural network from given parameters. \n # Moreover, two LSTM cells are stacked which adds deepness to the neural network. \n # Note, some code of this notebook is inspired from an slightly different \n # RNN architecture used on another dataset, some of the credits goes to \n # \"aymericdamien\" under the MIT license.\n\n # (NOTE: This step could be greatly optimised by shaping the dataset once\n # input shape: (batch_size, n_steps, n_input)\n _X = tf.transpose(_X, [1, 0, 2]) # permute n_steps and batch_size\n # Reshape to prepare input to hidden activation\n _X = tf.reshape(_X, [-1, n_input]) \n # new shape: (n_steps*batch_size, n_input)\n \n # ReLU activation, thanks to Yu Zhao for adding this improvement here:\n _X = tf.nn.relu(tf.matmul(_X, _weights['hidden']) + _biases['hidden'])\n # Split data because rnn cell needs a list of inputs for the RNN inner loop\n _X = tf.split(_X, n_steps, 0) \n # new shape: n_steps * (batch_size, n_hidden)\n\n # Define two stacked LSTM cells (two recurrent layers deep) with tensorflow\n lstm_cell_1 = tf.contrib.rnn.BasicLSTMCell(n_hidden, forget_bias=1.0, state_is_tuple=True)\n lstm_cell_2 = tf.contrib.rnn.BasicLSTMCell(n_hidden, forget_bias=1.0, state_is_tuple=True)\n lstm_cells = tf.contrib.rnn.MultiRNNCell([lstm_cell_1, lstm_cell_2], state_is_tuple=True)\n # Get LSTM cell output\n outputs, states = tf.contrib.rnn.static_rnn(lstm_cells, _X, dtype=tf.float32)\n\n # Get last time step's output feature for a \"many-to-one\" style classifier, \n # as in the image describing RNNs at the top of this page\n lstm_last_output = outputs[-1]\n \n # Linear activation\n return tf.matmul(lstm_last_output, _weights['out']) + _biases['out']\n\n\ndef extract_batch_size(_train, step, batch_size):\n # Function to fetch a \"batch_size\" amount of data from \"(X|y)_train\" data. \n \n shape = list(_train.shape)\n shape[0] = batch_size\n batch_s = np.empty(shape)\n\n for i in range(batch_size):\n # Loop index\n index = ((step-1)*batch_size + i) % len(_train)\n batch_s[i] = _train[index] \n\n return batch_s\n\n\ndef one_hot(y_, n_classes=n_classes):\n # Function to encode neural one-hot output labels from number indexes \n # e.g.: \n # one_hot(y_=[[5], [0], [3]], n_classes=6):\n # return [[0, 0, 0, 0, 0, 1], [1, 0, 0, 0, 0, 0], [0, 0, 0, 1, 0, 0]]\n \n y_ = y_.reshape(len(y_))\n return np.eye(n_classes)[np.array(y_, dtype=np.int32)] # Returns FLOATS\n", "Let's get serious and build the neural network:", "\n# Graph input/output\nx = tf.placeholder(tf.float32, [None, n_steps, n_input])\ny = tf.placeholder(tf.float32, [None, n_classes])\n\n# Graph weights\nweights = {\n 'hidden': tf.Variable(tf.random_normal([n_input, n_hidden])), # Hidden layer weights\n 'out': tf.Variable(tf.random_normal([n_hidden, n_classes], mean=1.0))\n}\nbiases = {\n 'hidden': tf.Variable(tf.random_normal([n_hidden])),\n 'out': tf.Variable(tf.random_normal([n_classes]))\n}\n\npred = LSTM_RNN(x, weights, biases)\n\n# Loss, optimizer and evaluation\nl2 = lambda_loss_amount * sum(\n tf.nn.l2_loss(tf_var) for tf_var in tf.trainable_variables()\n) # L2 loss prevents this overkill neural network to overfit the data\ncost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y, logits=pred)) + l2 # Softmax loss\noptimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost) # Adam Optimizer\n\ncorrect_pred = tf.equal(tf.argmax(pred,1), tf.argmax(y,1))\naccuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))\n", "Hooray, now train the neural network:", "# To keep track of training's performance\ntest_losses = []\ntest_accuracies = []\ntrain_losses = []\ntrain_accuracies = []\n\n# Launch the graph\nsess = tf.InteractiveSession(config=tf.ConfigProto(log_device_placement=True))\ninit = tf.global_variables_initializer()\nsess.run(init)\n\n# Perform Training steps with \"batch_size\" amount of example data at each loop\nstep = 1\nwhile step * batch_size <= training_iters:\n batch_xs = extract_batch_size(X_train, step, batch_size)\n batch_ys = one_hot(extract_batch_size(y_train, step, batch_size))\n\n # Fit training using batch data\n _, loss, acc = sess.run(\n [optimizer, cost, accuracy],\n feed_dict={\n x: batch_xs, \n y: batch_ys\n }\n )\n train_losses.append(loss)\n train_accuracies.append(acc)\n \n # Evaluate network only at some steps for faster training: \n if (step*batch_size % display_iter == 0) or (step == 1) or (step * batch_size > training_iters):\n \n # To not spam console, show training accuracy/loss in this \"if\"\n print(\"Training iter #\" + str(step*batch_size) + \\\n \": Batch Loss = \" + \"{:.6f}\".format(loss) + \\\n \", Accuracy = {}\".format(acc))\n \n # Evaluation on the test set (no learning made here - just evaluation for diagnosis)\n loss, acc = sess.run(\n [cost, accuracy], \n feed_dict={\n x: X_test,\n y: one_hot(y_test)\n }\n )\n test_losses.append(loss)\n test_accuracies.append(acc)\n print(\"PERFORMANCE ON TEST SET: \" + \\\n \"Batch Loss = {}\".format(loss) + \\\n \", Accuracy = {}\".format(acc))\n\n step += 1\n\nprint(\"Optimization Finished!\")\n\n# Accuracy for test data\n\none_hot_predictions, accuracy, final_loss = sess.run(\n [pred, accuracy, cost],\n feed_dict={\n x: X_test,\n y: one_hot(y_test)\n }\n)\n\ntest_losses.append(final_loss)\ntest_accuracies.append(accuracy)\n\nprint(\"FINAL RESULT: \" + \\\n \"Batch Loss = {}\".format(final_loss) + \\\n \", Accuracy = {}\".format(accuracy))\n", "Training is good, but having visual insight is even better:\nOkay, let's plot this simply in the notebook for now.", "# (Inline plots: )\n%matplotlib inline\n\nfont = {\n 'family' : 'Bitstream Vera Sans',\n 'weight' : 'bold',\n 'size' : 18\n}\nmatplotlib.rc('font', **font)\n\nwidth = 12\nheight = 12\nplt.figure(figsize=(width, height))\n\nindep_train_axis = np.array(range(batch_size, (len(train_losses)+1)*batch_size, batch_size))\nplt.plot(indep_train_axis, np.array(train_losses), \"b--\", label=\"Train losses\")\nplt.plot(indep_train_axis, np.array(train_accuracies), \"g--\", label=\"Train accuracies\")\n\nindep_test_axis = np.append(\n np.array(range(batch_size, len(test_losses)*display_iter, display_iter)[:-1]),\n [training_iters]\n)\nplt.plot(indep_test_axis, np.array(test_losses), \"b-\", label=\"Test losses\")\nplt.plot(indep_test_axis, np.array(test_accuracies), \"g-\", label=\"Test accuracies\")\n\nplt.title(\"Training session's progress over iterations\")\nplt.legend(loc='upper right', shadow=True)\nplt.ylabel('Training Progress (Loss or Accuracy values)')\nplt.xlabel('Training iteration')\n\nplt.show()", "And finally, the multi-class confusion matrix and metrics!", "# Results\n\npredictions = one_hot_predictions.argmax(1)\n\nprint(\"Testing Accuracy: {}%\".format(100*accuracy))\n\nprint(\"\")\nprint(\"Precision: {}%\".format(100*metrics.precision_score(y_test, predictions, average=\"weighted\")))\nprint(\"Recall: {}%\".format(100*metrics.recall_score(y_test, predictions, average=\"weighted\")))\nprint(\"f1_score: {}%\".format(100*metrics.f1_score(y_test, predictions, average=\"weighted\")))\n\nprint(\"\")\nprint(\"Confusion Matrix:\")\nconfusion_matrix = metrics.confusion_matrix(y_test, predictions)\nprint(confusion_matrix)\nnormalised_confusion_matrix = np.array(confusion_matrix, dtype=np.float32)/np.sum(confusion_matrix)*100\n\nprint(\"\")\nprint(\"Confusion matrix (normalised to % of total test data):\")\nprint(normalised_confusion_matrix)\nprint(\"Note: training and testing data is not equally distributed amongst classes, \")\nprint(\"so it is normal that more than a 6th of the data is correctly classifier in the last category.\")\n\n# Plot Results: \nwidth = 12\nheight = 12\nplt.figure(figsize=(width, height))\nplt.imshow(\n normalised_confusion_matrix, \n interpolation='nearest', \n cmap=plt.cm.rainbow\n)\nplt.title(\"Confusion matrix \\n(normalised to % of total test data)\")\nplt.colorbar()\ntick_marks = np.arange(n_classes)\nplt.xticks(tick_marks, LABELS, rotation=90)\nplt.yticks(tick_marks, LABELS)\nplt.tight_layout()\nplt.ylabel('True label')\nplt.xlabel('Predicted label')\nplt.show()\n\nsess.close()", "Conclusion\nOutstandingly, the final accuracy is of 91%! And it can peak to values such as 93.25%, at some moments of luck during the training, depending on how the neural network's weights got initialized at the start of the training, randomly. \nThis means that the neural networks is almost always able to correctly identify the movement type! Remember, the phone is attached on the waist and each series to classify has just a 128 sample window of two internal sensors (a.k.a. 2.56 seconds at 50 FPS), so it amazes me how those predictions are extremely accurate given this small window of context and raw data. I've validated and re-validated that there is no important bug, and the community used and tried this code a lot. (Note: be sure to report something in the issue tab if you find bugs, otherwise Quora, StackOverflow, and other StackExchange sites are the places for asking questions.)\nI specially did not expect such good results for guessing between the labels \"SITTING\" and \"STANDING\". Those are seemingly almost the same thing from the point of view of a device placed at waist level according to how the dataset was originally gathered. Thought, it is still possible to see a little cluster on the matrix between those classes, which drifts away just a bit from the identity. This is great.\nIt is also possible to see that there was a slight difficulty in doing the difference between \"WALKING\", \"WALKING_UPSTAIRS\" and \"WALKING_DOWNSTAIRS\". Obviously, those activities are quite similar in terms of movements. \nI also tried my code without the gyroscope, using only the 3D accelerometer's 6 features (and not changing the training hyperparameters), and got an accuracy of 87%. In general, gyroscopes consumes more power than accelerometers, so it is preferable to turn them off. \nImprovements\nIn another open-source repository of mine, the accuracy is pushed up to nearly 94% using a special deep LSTM architecture which combines the concepts of bidirectional RNNs, residual connections, and stacked cells. This architecture is also tested on another similar activity dataset. It resembles the nice architecture used in \"Google’s Neural Machine Translation System: Bridging the Gap between Human and Machine Translation\", without an attention mechanism, and with just the encoder part - as a \"many to one\" architecture instead of a \"many to many\" to be adapted to the Human Activity Recognition (HAR) problem. I also worked more on the problem and came up with the LARNN, however it's complicated for just a little gain. Thus the current, original activity recognition project is simply better to use for its outstanding simplicity. \nIf you want to learn more about deep learning, I have also built a list of the learning ressources for deep learning which have revealed to be the most useful to me here. \nReferences\nThe dataset can be found on the UCI Machine Learning Repository: \n\nDavide Anguita, Alessandro Ghio, Luca Oneto, Xavier Parra and Jorge L. Reyes-Ortiz. A Public Domain Dataset for Human Activity Recognition Using Smartphones. 21th European Symposium on Artificial Neural Networks, Computational Intelligence and Machine Learning, ESANN 2013. Bruges, Belgium 24-26 April 2013.\n\nCitation\nCopyright (c) 2016 Guillaume Chevalier. To cite my code, you can point to the URL of the GitHub repository, for example: \n\nGuillaume Chevalier, LSTMs for Human Activity Recognition, 2016, \nhttps://github.com/guillaume-chevalier/LSTM-Human-Activity-Recognition\n\nMy code is available for free and even for private usage for anyone under the MIT License, however I ask to cite for using the code. \nHere is the BibTeX citation code: \n@misc{chevalier2016lstms,\n title={LSTMs for human activity recognition},\n author={Chevalier, Guillaume},\n year={2016}\n}\nExtra links\nConnect with me\n\nLinkedIn\nTwitter\nGitHub\nQuora\nYouTube\nDev/Consulting\n\nLiked this project? Did it help you? Leave a star, fork and share the love!\nThis activity recognition project has been seen in:\n\nHacker News 1st page\nAwesome TensorFlow\nTensorFlow World\nAnd more.", "# Let's convert this notebook to a README automatically for the GitHub project's title page:\n!jupyter nbconvert --to markdown LSTM.ipynb\n!mv LSTM.md README.md" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
telegraphic/allantools
examples/gradev-demo.ipynb
gpl-3.0
[ "GRADEV: gap robust allan deviation\nNotebook setup & package imports", "%matplotlib inline\n\nimport pylab as plt\nimport numpy as np", "Gap robust allan deviation comparison\nCompute the GRADEV of a white phase noise. Compares two different\nscenarios. 1) The original data and 2) ADEV estimate with gap robust ADEV.", "def example1():\n \"\"\"\n Compute the GRADEV of a white phase noise. Compares two different \n scenarios. 1) The original data and 2) ADEV estimate with gap robust ADEV.\n \"\"\"\n N = 1000\n f = 1\n y = np.random.randn(1,N)[0,:]\n x = np.linspace(1,len(y),len(y))\n x_ax, y_ax, err_l,err_h, ns = allan.gradev(y,f,x)\n plt.errorbar(x_ax, y_ax,yerr=[err_l,err_h],label='GRADEV, no gaps')\n \n \n y[np.floor(0.4*N):np.floor(0.6*N)] = np.NaN # Simulate missing data\n x_ax, y_ax, err_l,err_h, ns = allan.gradev(y,f,x)\n plt.errorbar(x_ax, y_ax,yerr=[err_l,err_h], label='GRADEV, with gaps')\n plt.xscale('log')\n plt.yscale('log')\n plt.grid()\n plt.legend()\n plt.xlabel('Tau / s')\n plt.ylabel('Overlapping Allan deviation')\n plt.show()\n\nexample1()", "White phase noise\nCompute the GRADEV of a nonstationary white phase noise.", "def example2():\n \"\"\"\n Compute the GRADEV of a nonstationary white phase noise.\n \"\"\"\n N=1000 # number of samples\n f = 1 # data samples per second\n s=1+5/N*np.arange(0,N)\n y=s*np.random.randn(1,N)[0,:]\n x = np.linspace(1,len(y),len(y))\n x_ax, y_ax, err_l, err_h, ns = allan.gradev(y,f,x)\n plt.loglog(x_ax, y_ax,'b.',label=\"No gaps\")\n y[int(0.4*N):int(0.6*N,)] = np.NaN # Simulate missing data\n x_ax, y_ax, err_l, err, ns = allan.gradev(y,f,x)\n plt.loglog(x_ax, y_ax,'g.',label=\"With gaps\")\n plt.grid()\n plt.legend()\n plt.xlabel('Tau / s')\n plt.ylabel('Overlapping Allan deviation')\n plt.show()\n\nexample2()" ]
[ "markdown", "code", "markdown", "code", "markdown", "code" ]
ethen8181/machine-learning
model_selection/partial_dependence/partial_dependence.ipynb
mit
[ "<h1>Table of Contents<span class=\"tocSkip\"></span></h1>\n<div class=\"toc\"><ul class=\"toc-item\"><li><span><a href=\"#Partial-Dependence-Plot\" data-toc-modified-id=\"Partial-Dependence-Plot-1\"><span class=\"toc-item-num\">1&nbsp;&nbsp;</span>Partial Dependence Plot</a></span><ul class=\"toc-item\"><li><span><a href=\"#Individual-Conditional-Expectation-(ICE)-Plot\" data-toc-modified-id=\"Individual-Conditional-Expectation-(ICE)-Plot-1.1\"><span class=\"toc-item-num\">1.1&nbsp;&nbsp;</span>Individual Conditional Expectation (ICE) Plot</a></span></li><li><span><a href=\"#Implementation\" data-toc-modified-id=\"Implementation-1.2\"><span class=\"toc-item-num\">1.2&nbsp;&nbsp;</span>Implementation</a></span></li></ul></li><li><span><a href=\"#Reference\" data-toc-modified-id=\"Reference-2\"><span class=\"toc-item-num\">2&nbsp;&nbsp;</span>Reference</a></span></li></ul></div>", "# code for loading the format for the notebook\nimport os\n\n# path : store the current path to convert back to it later\npath = os.getcwd()\nos.chdir(os.path.join('..', '..', 'notebook_format'))\n\nfrom formats import load_style\nload_style(css_style = 'custom2.css', plot_style = False)\n\nos.chdir(path)\n\n# 1. magic for inline plot\n# 2. magic to print version\n# 3. magic so that the notebook will reload external python modules\n# 4. magic to enable retina (high resolution) plots\n# https://gist.github.com/minrk/3301035\n%matplotlib inline\n%load_ext watermark\n%load_ext autoreload\n%autoreload 2\n%config InlineBackend.figure_format = 'retina'\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom pathlib import Path\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.model_selection import train_test_split\n\n%watermark -a 'Ethen' -d -t -v -p numpy,pandas,matplotlib,sklearn", "Partial Dependence Plot\nDuring the talk, Youtube: PyData - Random Forests Best Practices for the Business World, one of the best practices that the speaker mentioned when using tree-based models is to check for directional relationships. When using non-linear machine learning algorithms, such as popular tree-based models random forest and gradient boosted trees, it can be hard to understand the relations between predictors and model outcome as they do not give us handy coefficients like linear-based models. For example, in terms of random forest, all we get is the feature importance. Although based on that information, we can tell which feature is significantly influencing the outcome based on the importance calculation, it does not inform us in which direction is the predictor influencing outcome. In this notebook, we'll be exploring Partial dependence plot (PDP), a model agnostic technique that gives us an approximate directional influence for a given feature that was used in the model. Note much of the explanation is \"borrowed\" from the blog post at the following link, Blog: Introducing PDPbox, this documentation aims to improve upon it by giving a cleaner implementation.\nPartial dependence plot (PDP) aims to visualize the marginal effect of a given predictor towards the model outcome by plotting out the average model outcome in terms of different values of the predictor. Let's first gain some intuition of how it works with a made up example. Assume we have a data set that only contains three data points and three features (A, B, C) as shown below.\n<img src=\"img/pd1.png\" width=\"30%\" height=\"30%\">\nIf we wish to see how feature A is influencing the prediction Y, what PDP does is to generate a new data set as follow. (here we assume that feature A only has three unique values: A1, A2, A3)\n<img src=\"img/pd2.png\" width=\"30%\" height=\"30%\">\nWe then perform the prediction as usual with this new set of data. As we can imagine, PDP would generate num_rows * num_grid_points (here, the number of grid point equals the number of unique values of the target feature, more on this later) number of predictions and average them for each unique value of Feature A.\n<img src=\"img/pd3.png\" width=\"30%\" height=\"30%\">\nIn the end, PDP would only plot out the average predictions for each unique value of our target feature.\n<img src=\"img/pd4.png\" width=\"30%\" height=\"30%\">\nLet's now formalize this idea with some notation. The partial dependence function is defined as:\n$$\n\\begin{align}\n\\hat{f}{x_S}(x_S) = E{x_C} \\left[ f(x_S, x_C) \\right]\n\\end{align}\n$$\nThe term $x_S$ denotes the set of features for which the partial dependence function should be plotting and $x_C$ are all other features that were used in the machine learning model $f$. In other words, if there were $p$ predictors, $S$ is a subset of our $p$ predictors, $S \\subset \\left{ x_1, x_2, \\ldots, x_p \\right}$, $C$ would be complementing $S$ such that $S \\cup C = \\left{x_1, x_2, \\ldots, x_p\\right}$. The function above is then estimated by calculating averages in the training data, which is also known as Monte Carlo method:\n$$\n\\begin{align}\n\\hat{f}{x_S}(x_S) = \\frac{1}{n} \\sum{i=1}^n f(x_S, x_{Ci})\n\\end{align}\n$$\nWhere $\\left{x_{C1}, x_{C2}, \\ldots, x_{CN}\\right}$ are the values of $X_C$ occurring over all observations in the training data. In other words, in order to calculate the partial dependence of a given variable (or variables), the entire training set must be utilized for every set of joint values. For classification, where the machine learning model outputs probabilities, the partial dependence function displays the probability for a certain class given different values for features $x_s$, a straightforward way to handle multi-class problems is to plot one line per class.\nIndividual Conditional Expectation (ICE) Plot\nAs an extension of a PDP, ICE plot visualizes the relationship between a feature and the predicted responses for each observation. While a PDP visualizes the averaged relationship between features and predicted responses, a set of ICE plots disaggregates the averaged information and visualizes an individual dependence for each observation. Hence, instead of only plotting out the average predictions, ICEbox displays all individual lines. (three lines in total in this case)\n<img src=\"img/pd5.png\" width=\"30%\" height=\"30%\">\nThe authors of the Paper: A. Goldstein, A. Kapelner, J. Bleich, E. Pitkin Peeking Inside the Black Box: Visualizing Statistical Learning with Plots of Individual Conditional Expectation claims with everything displayed in its raw state, any interesting discovers wouldn’t be shielded because of the averaging inherented with PDP. A vivid example from the paper is shown below:\n<img src=\"img/pd6.png\" width=\"50%\" height=\"50%\">\nIn this example, if we only look at the PDP in Figure b, we would think that on average, the feature X2 is not meaningfully associated with the our target response variable Y. However, if judging from the scatter plot showed in Figure a, this conclusion is plainly wrong. Now if we were to plot out the individual estimated conditional expectation curves, everything becomes more obvious.\n<img src=\"img/pd7.png\" width=\"30%\" height=\"30%\">\nAfter having an understand of the procedure for PDP and ICE plot, we can observe that:\n\nPDP is a global method, it takes into account all instances and makes a statement about the global relationship of a feature with the predicted outcome.\nOne of the main advantage of PDP is that it can be used to interpret the result of any \"black box\" learning methods.\nPDP can be quite computationally expensive when the data set becomes large.\nOwing to the limitations of computer graphics, and human perception, the size of the subsets $x_S$ must be small (l ≈ 1,2,3). There are of course a large number of such subsets, but only those chosen from among the usually much smaller set of highly relevant predictors are likely to be informative.\nPDP can obfuscate relationship that comes from interactions. PDPs show us how the average relationship between feature $x_S$ and $\\hat{y}$ looks like. This works well only in cases where the interactions between $x_S$ and the remaining features $x_C$ are weak. In cases where interactions do exist, the ICE plot may give a lot more insight of the underlying relationship.\n\nImplementation\nWe'll be using the titanic dataset (details of the dataset is listed in the link) to test our implementation.", "# we download the training data and store it\n# under the `data` directory\ndata_dir = Path('data')\ndata_path = data_dir / 'train.csv'\ndata = pd.read_csv(data_path)\nprint('dimension: ', data.shape)\nprint('features: ', data.columns)\ndata.head()\n\n# some naive feature engineering\ndata['Age'] = data['Age'].fillna(data['Age'].median())\ndata['Embarked'] = data['Embarked'].fillna('S')\ndata['Sex'] = data['Sex'].apply(lambda x: 1 if x == 'male' else 0)\ndata = pd.get_dummies(data, columns = ['Embarked'])\n\n# features/columns that are used\nlabel = data['Survived']\nfeatures = [\n 'Pclass', 'Sex',\n 'Age', 'SibSp',\n 'Parch', 'Fare',\n 'Embarked_C', 'Embarked_Q', 'Embarked_S']\ndata = data[features]\n\nX_train, X_test, y_train, y_test = train_test_split(\n data, label, test_size = 0.2, random_state = 1234, stratify = label)\n\n# fit a baseline random forest model and show its top 2 most important features\nrf = RandomForestClassifier(n_estimators = 50, random_state = 1234)\nrf.fit(X_train, y_train)\n\nprint('top 2 important features:')\nimp_index = np.argsort(rf.feature_importances_)\nprint(features[imp_index[-1]])\nprint(features[imp_index[-2]])", "Aforementioned, tree-based models lists out the top important features, but it is not clear whether they have a positive or negative impact on the result. This is where tools such as partial dependence plots can aid us communicate the results better to others.", "from partial_dependence import PartialDependenceExplainer\nplt.rcParams['figure.figsize'] = 16, 9\n\n\n# we specify the feature name and its type to fit the partial dependence\n# result, after fitting the result, we can call .plot to visualize it\n# since this is a binary classification model, when we call the plot\n# method, we tell it which class are we targeting, in this case 1 means\n# the passenger did indeed survive (more on centered argument later)\npd_explainer = PartialDependenceExplainer(estimator = rf, verbose = 0)\npd_explainer.fit(data, feature_name = 'Sex', feature_type = 'cat')\npd_explainer.plot(centered = False, target_class = 1)\nplt.show()", "Hopefully, we can agree that the partial dependence plot makes intuitive sense, as for the categorical feature Sex, 1 indicates that the passenger was a male. And we know that during the titanic accident, the majority of the survivors were female passenger, thus the plot is telling us male passengers will on average have around 40% chance lower of surviving when compared with female passengers. Also instead of only plotting the \"partial dependence\" plot, the plot also fills between the standard deviation range. This is essentially borrowing the idea from ICE plot that only plotting the average may obfuscate the relationship.\nCentered plot can be useful when we are not interested in seeing the absolute change of a predicted value, but rather the difference in prediction compared to a fixed point of the feature range.", "# centered = True is actually the default\npd_explainer.plot(centered = True, target_class = 1)\nplt.show()", "We can perform the same process for numerical features such as Fare. We know that more people from the upper class survived, and people from the upper class generally have to pay more Fare to get onboard the titanic. The partial dependence plot below also depicts this trend.", "pd_explainer.fit(data, feature_name = 'Fare', feature_type = 'num')\npd_explainer.plot(target_class = 1)\nplt.show()", "If you prefer to create your own visualization, you can call the results_ attribute to access the partial dependence result. And for those that are interested in the implementation details, the code can be obtained at the following link.\nWe'll conclude our discussion on parital dependence plot by providing a link to another blog that showcases this method's usefulness in ensuring the behavior of the new machine learning model does intuitively and logically match our intuition and does not differ significantly from a baseline model. Blog: Using Partial Dependence to Compare Sort Algorithms\nReference\n\nBlog: Introducing PDPbox\nOnline Book: Partial Dependence Plot (PDP)\nMathworks Documentation: plotPartialDependence\nGithub: PDPbox - python partial dependence plot toolbox" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
eshlykov/mipt-day-after-day
statistics/hw-13/hw-13.3.ipynb
unlicense
[ "Теоретическое домашнее задание 13\nЗадача 3\nИспользуя метод линейной регрессии, постройте приближение функции $f$ многочленом третьей степени по следующим данным:\n|$f$|3.9|5.0|5.7|6.5|7.1|7.6|7.8|8.1|8.4| \n|---|---|---|---|---|---|---|---|---|---|\n|$x$|4.0|5.2|6.1|7.0|7.9|8.6|8.9|9.5|9.9|", "import numpy\nimport scipy\nfrom scipy.linalg import inv\nimport matplotlib.pyplot\n%matplotlib inline", "Решение.\nЯсно, что нам нужна модель $y=\\theta_0 + \\theta_1 x + \\theta_2 x^2 + \\theta_3 x^3$.", "n = 9 # Размер выборки\nk = 4 # Количество параметров", "Рассмотрим отклик.", "Y = numpy.array([3.9, 5.0, 5.7, 6.5, 7.1, 7.6, 7.8, 8.1, 8.4]).reshape(n, 1)\nprint(Y)", "Рассмотрим регрессор.", "x = numpy.array([4.0, 5.2, 6.1, 7.0, 7.9, 8.6, 8.9, 9.5, 9.9])\nX = numpy.ones((n, k))\nX[:, 1] = x\nX[:, 2] = x ** 2\nX[:, 3] = x ** 3\nprint(X)", "Воспользуемся классической формулой для получения оценки.", "Theta = inv(X.T @ X) @ X.T @ Y\nprint(Theta)", "Построим график полученной функции и нанесем точки выборки.", "x = numpy.linspace(3.5, 10.4, 1000)\ny = Theta[0] + x * Theta[1] + x ** 2 * Theta[2] + x ** 3 * Theta[3]\n\nmatplotlib.pyplot.figure(figsize=(20, 8))\nmatplotlib.pyplot.plot(x, y, color='turquoise', label='Предсказание', linewidth=2.5)\nmatplotlib.pyplot.scatter(X[:, 1], Y, s=40.0, label='Выборка', color='blue', alpha=0.5)\nmatplotlib.pyplot.legend()\nmatplotlib.pyplot.title('Функция $f(x)$')\nmatplotlib.pyplot.grid()\nmatplotlib.pyplot.show()", "Вывод. Кубический многочлен, полученный методом линейной регресии, отлично приближает данную функцию. По графику видно, однако, что ее может хорошо приблизить и линейный многочлен.\n\n<font color=\"#808080\"> Странно, что в этом задании ничего больше не требуют, но что просили, то я и сделал. Даже график построил.</font>" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
pycrystem/pycrystem
doc/demos/02 GaAs Nanowire - Phase Mapping - Orientation Mapping.ipynb
gpl-3.0
[ "Phase/Orientation Mapping\nThis tutorial demonstrates how to achieve phase and orientation mapping via scanning electron diffraction using both pattern and vector matching.\nThe data was acquired from a GaAs nanowire displaying polymorphism between zinc blende and wurtzite structures.\nThis functionaility has been checked to run in pyxem-0.13.0 (Feb 2021). Bugs are always possible, do not trust the code blindly, and if you experience any issues please report them here: https://github.com/pyxem/pyxem-demos/issues\n\n<a href='#loa'> Load & Inspect Data</a>\n<a href='#pre'> Pre-processing</a>\n<a href='#tem'> Template matching</a>\n<a href='#tema'> [Build Template Library]</a>\n<a href='#temb'>[Indexing]</a>\n\n\n<a href='#vec'> Vector Matching</a>\n<a href='#veca'> [Build Vector Library]</a>\n<a href='#vecb'>[Indexing Vectors]</a>\n\n\n\nImport pyxem and other required libraries", "%matplotlib inline\n\nimport numpy as np\nimport diffpy.structure\nimport pyxem as pxm\nimport hyperspy.api as hs\n\naccelarating_voltage = 200 # kV\ncamera_length = 0.2 # m\ndiffraction_calibration = 0.032 # px / Angstrom", "<a id='loa'></a>\n1. Loading and Inspection\nLoad the demo data", "dp = hs.load('./data/02/polymorphic_nanowire.hdf5')\ndp", "Set data type, scale intensity range and set calibration", "dp.data = dp.data.astype('float64')\ndp.data *= 1 / dp.data.max()", "Inspect metadata", "dp.metadata", "Plot an interactive virtual image to inspect data", "roi = hs.roi.CircleROI(cx=72, cy=72, r_inner=0, r=2)\ndp.plot_integrated_intensity(roi=roi, cmap='viridis')", "<a id='pre'></a>\n2. Pre-processing\nApply affine transformation to correct for off axis camera geometry", "scale_x = 0.995\nscale_y = 1.031\noffset_x = 0.631\noffset_y = -0.351\ndp.apply_affine_transformation(np.array([[scale_x, 0, offset_x],\n [0, scale_y, offset_y],\n [0, 0, 1]]))", "Perform difference of gaussian background subtraction with various parameters on one selected diffraction pattern and plot to identify good parameters", "from pyxem.utils.expt_utils import investigate_dog_background_removal_interactive\n\ndp_test_area = dp.inav[0, 0]\n\ngauss_stddev_maxs = np.arange(2, 12, 0.2) # min, max, step\ngauss_stddev_mins = np.arange(1, 4, 0.2) # min, max, step\n\ninvestigate_dog_background_removal_interactive(dp_test_area,\n gauss_stddev_maxs,\n gauss_stddev_mins)", "Remove background using difference of gaussians method with parameters identified above", "dp = dp.subtract_diffraction_background('difference of gaussians',\n min_sigma=2, max_sigma=8,\n lazy_result=False)", "Perform further adjustments to the data ranges", "dp.data -= dp.data.min()\ndp.data *= 1 / dp.data.max()", "Set diffraction calibration and scan calibration", "dp = pxm.signals.ElectronDiffraction2D(dp) #this is needed because of a bug in the code\ndp.set_diffraction_calibration(diffraction_calibration)\ndp.set_scan_calibration(10)", "<a id='tem'></a>\n3. Pattern Matching\nPattern matching generates a database of simulated diffraction patterns and then compares all simulated patterns against each experimental pattern to find the best match\nImport generators required for simulation and indexation", "from diffsims.libraries.structure_library import StructureLibrary\nfrom diffsims.generators.diffraction_generator import DiffractionGenerator\nfrom diffsims.generators.library_generator import DiffractionLibraryGenerator\n\nfrom diffsims.generators.zap_map_generator import get_rotation_from_z_to_direction\nfrom diffsims.generators.rotation_list_generators import get_grid_around_beam_direction\n\nfrom pyxem.generators.indexation_generator import TemplateIndexationGenerator", "3.1. Define Library of Structures & Orientations\nDefine the crystal phases to be included in the simulated library", "structure_zb = diffpy.structure.loadStructure('./data/02/GaAs_mp-2534_conventional_standard.cif')\nstructure_wz = diffpy.structure.loadStructure('./data/02/GaAs_mp-8883_conventional_standard.cif')", "Create a basic rotations list.", "za110c = get_rotation_from_z_to_direction(structure_zb, [1,1,0])\nrot_list_cubic = get_grid_around_beam_direction(beam_rotation=za110c, resolution=1, angular_range=(0,180))\n\nza110h = get_rotation_from_z_to_direction(structure_wz, [1,1,0])\nrot_list_hex = get_grid_around_beam_direction(beam_rotation=za110h, resolution=1, angular_range=(0,180))", "Construct a StructureLibrary defining crystal structures and orientations for which diffraction will be simulated", "struc_lib = StructureLibrary(['ZB','WZ'],\n [structure_zb,structure_wz],\n [rot_list_cubic,rot_list_hex])", "<a id='temb'></a>\n3.2. Simulate Diffraction for all Structures & Orientations\nDefine a diffsims DiffractionGenerator with diffraction simulation parameters", "diff_gen = DiffractionGenerator(accelerating_voltage=accelarating_voltage)", "Initialize a diffsims DiffractionLibraryGenerator", "lib_gen = DiffractionLibraryGenerator(diff_gen)", "Calulate library of diffraction patterns for all phases and unique orientations", "target_pattern_dimension_pixels = dp.axes_manager.signal_shape[0]\nhalf_size = target_pattern_dimension_pixels // 2\nreciprocal_radius = diffraction_calibration*(half_size - 1)\n\ndiff_lib = lib_gen.get_diffraction_library(struc_lib,\n calibration=diffraction_calibration,\n reciprocal_radius=reciprocal_radius,\n half_shape=(half_size, half_size),\n max_excitation_error=1/10,\n with_direct_beam=False)", "Optionally, save the library for later use.", "#diff_lib.pickle_library('./GaAs_cubic_hex.pickle')", "If saved, the library can be loaded as follows", "#from diffsims.libraries.diffraction_library import load_DiffractionLibrary\n#diff_lib = load_DiffractionLibrary('./GaAs_cubic_hex.pickle', safety=True)", "<a id='temb'></a>\n3.3. Pattern Matching Indexation\nInitialize TemplateIndexationGenerator with the experimental data and diffraction library and perform correlation, returning the n_largest matches with highest correlation.\n<div class=\"alert alert-block alert-warning\"><b>Note:</b> This workflow has been changed from previous version, make sure you have pyxem 0.13.0 or later installed</div>", "indexer = TemplateIndexationGenerator(dp, diff_lib)\nindexation_results = indexer.correlate(n_largest=3)", "Check the solutions via a plotting (can be slow, so we don't run by default)", "if False:\n indexation_results.plot_best_matching_results_on_signal(dp, diff_lib)", "Get crystallographic map from indexation results", "crystal_map = indexation_results.to_crystal_map()", "crystal_map is now a CrystalMap object, which comes from orix, see their documentation for details. Below we lift their code to plot a phase map", "from matplotlib import pyplot as plt\nfrom orix import plot\n\nfig, ax = plt.subplots(subplot_kw=dict(projection=\"plot_map\"))\nim = ax.plot_map(crystal_map)", "<a id='vec'></a>\n4. Vector Matching\n<div class=\"alert alert-block alert-danger\"><b>Note:</b> This workflow is less well developed than the template matching one, and may well be broken</div>\n\nVector matching generates a database of vector pairs (magnitues and inter-vector angles) and then compares all theoretical values against each measured diffraction vector pair to find the best match\nImport generators required for simulation and indexation", "from diffsims.generators.library_generator import VectorLibraryGenerator\nfrom diffsims.libraries.structure_library import StructureLibrary\nfrom diffsims.libraries.vector_library import load_VectorLibrary\n\nfrom pyxem.generators.indexation_generator import VectorIndexationGenerator\n\nfrom pyxem.generators.subpixelrefinement_generator import SubpixelrefinementGenerator\nfrom pyxem.signals.diffraction_vectors import DiffractionVectors", "<a id='veca'></a>\n4.1. Define Library of Structures\nDefine crystal structure for which to determine theoretical vector pairs", "structure_zb = diffpy.structure.loadStructure('./data/02/GaAs_mp-2534_conventional_standard.cif')\nstructure_wz = diffpy.structure.loadStructure('./data/02/GaAs_mp-8883_conventional_standard.cif')\n\nstructure_library = StructureLibrary(['ZB', 'WZ'],\n [structure_zb, structure_wz],\n [[], []])", "Initialize VectorLibraryGenerator with structures to be considered", "vlib_gen = VectorLibraryGenerator(structure_library)", "Determine VectorLibrary with all vectors within given reciprocal radius", "reciprocal_radius = diffraction_calibration*(half_size - 1)/2\n\nreciprocal_radius\n\nvec_lib = vlib_gen.get_vector_library(reciprocal_radius)", "Optionally, save the library for later use", "#vec_lib.pickle_library('./GaAs_cubic_hex_vectors.pickle')\n\n#vec_lib = load_VectorLibrary('./GaAs_cubic_hex_vectors.pickle',safety=True)", "4.2. Find Diffraction Peaks\nTune peak finding parameters interactively", "dp.find_peaks(interactive=False)", "Perform peak finding on the data with parameters from above", "peaks = dp.find_peaks(method='difference_of_gaussian',\n min_sigma=0.005,\n max_sigma=5.0,\n sigma_ratio=2.0,\n threshold=0.06,\n overlap=0.8,\n interactive=False)", "coaxing peaks back into a DiffractionVectors", "peaks = DiffractionVectors(peaks).T", "peaks now contain the 2D positions of the diffraction spots on the detector. The vector matching method works in 3D coordinates, which are found by projecting the detector positions back onto the Ewald sphere. Because the methods that follow are slow, we constrain ourselves to looking at a smaller subset of the data", "peaks = peaks.inav[:2,:2]\n\npeaks.calculate_cartesian_coordinates?\n\npeaks.calculate_cartesian_coordinates(accelerating_voltage=accelarating_voltage,\n camera_length=camera_length)", "<a id='vecb'></a>\n4.3. Vector Matching Indexation\nInitialize VectorIndexationGenerator with the experimental data and vector library and perform indexation using n_peaks_to_index and returning the n_best indexation results.\n<div class=\"alert alert-block alert-danger\"><b>Alert: This code no longer works on this example, and may even be completely broken. Caution is advised.</b> </div>", "#indexation_generator = VectorIndexationGenerator(peaks, vec_lib)\n\n#indexation_results = indexation_generator.index_vectors(mag_tol=3*diffraction_calibration,\n# angle_tol=4, # degree\n# index_error_tol=0.2,\n# n_peaks_to_index=7,\n# n_best=5,\n# show_progressbar=True)\n\n#indexation_results.data", "Refine all crystal orientations for improved phase reliability and orientation reliability maps.", "#refined_results = indexation_generator.refine_n_best_orientations(indexation_results,\n# accelarating_voltage=accelarating_voltage,\n# camera_length=camera_length,\n# index_error_tol=0.2,\n# vary_angles=True,\n# vary_scale=True,\n# method=\"leastsq\")\"\"\"", "Get crystallographic map from optimized indexation results.", "#crystal_map = refined_results.get_crystallographic_map()", "See the objections documentation for further details", "#crystal_map?" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
davofis/computational_seismology
05_pseudospectral/cheby_derivative_solution.ipynb
gpl-3.0
[ "<div style='background-image: url(\"../../share/images/header.svg\") ; padding: 0px ; background-size: cover ; border-radius: 5px ; height: 250px'>\n <div style=\"float: right ; margin: 50px ; padding: 20px ; background: rgba(255 , 255 , 255 , 0.7) ; width: 50% ; height: 150px\">\n <div style=\"position: relative ; top: 50% ; transform: translatey(-50%)\">\n <div style=\"font-size: xx-large ; font-weight: 900 ; color: rgba(0 , 0 , 0 , 0.8) ; line-height: 100%\">Computational Seismology</div>\n <div style=\"font-size: large ; padding-top: 20px ; color: rgba(0 , 0 , 0 , 0.5)\">Numerical derivatives based on a derivative matrix</div>\n </div>\n </div>\n</div>\n\nSeismo-Live: http://seismo-live.org\nAuthors:\n\nFabian Linder (@fablindner)\nHeiner Igel (@heinerigel)\nDavid Vargas (@dvargas)\n\n\nBasic Equations\nCalculating a derivative using the differentation theorem of the Fourier Transform is in the mathematical sense a convolution of the function $f(x)$ with $ik$, where $k$ is the wavenumber and $i$ the imaginary unit. This can also be formulated as a matrix-vector product involving so-called Toeplitz matrices. An elegant (but inefficient) way of performing a derivative operation on a space-dependent function described on the Chebyshev collocation points is by defining a derivative matrix $D_{ij}$\n$$ D_{ij} \\ = \\ -\\frac{2 N^2 + 1}{6} \\hspace{1.5cm} \\text{for i = j = N} $$\n$$ D_{ij} \\ = \\ -\\frac{1}{2} \\frac{x_i}{1-x_i^2} \\hspace{1.5cm} \\text{for i = j = 1,2,...,N-1} $$\n$$ D_{ij} \\ = \\ \\frac{c_i}{c_j} \\frac{(-1)^{i+j}}{x_i - x_j} \\hspace{1.5cm} \\text{for i $\\neq$ j =\n0,1,...,N}$$\nwhere $N+1$ is the number of Chebyshev collocation points $ \\ x_i = cos(i\\pi / N)$, $ \\ i=0,...,N$ and the $c_i$ are given as\n$$ c_i = 2 \\hspace{1.5cm} \\text{for i = 0 or N} $$\n$$ c_i = 1 \\hspace{1.5cm} \\text{otherwise} $$\nThis differentiation matrix allows us to write the derivative of the function $f_i = f(x_i)$ (possibly depending on time) simply as\n$$\\partial_x u_i = D_{ij} \\ u_j$$\nwhere the right-hand side is a matrix-vector product, and the Einstein summation convention applies.", "# This is a configuration step for the exercise. Please run it before calculating the derivative!\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n# Show the plots in the Notebook.\nplt.switch_backend(\"nbagg\")", "Exercise 1\nDefine a python function call \"get_cheby_matrix(nx)\" that initializes the Chebyshev derivative matrix $D_{ij}$", "# Function for setting up the Chebyshev derivative matrix\ndef get_cheby_matrix(nx):\n cx = np.zeros(nx+1)\n x = np.zeros(nx+1)\n for ix in range(0,nx+1):\n x[ix] = np.cos(np.pi * ix / nx)\n \n cx[0] = 2.\n cx[nx] = 2.\n cx[1:nx] = 1.\n \n D = np.zeros((nx+1,nx+1))\n for i in range(0, nx+1):\n for j in range(0, nx+1):\n if i==j and i!=0 and i!=nx:\n D[i,i]=-x[i]/(2.0*(1.0-x[i]*x[i]))\n else:\n D[i,j]=(cx[i]*(-1)**(i+j))/(cx[j]*(x[i]-x[j]))\n \n D[0,0] = (2.*nx**2+1.)/6.\n D[nx,nx] = -D[0,0]\n return D ", "Exercise 2\nCalculate the numerical derivative by applying the differentiation matrix $D_{ij}$. Define an arbitrary function (e.g. a Gaussian) and initialize its analytical derivative on the Chebyshev collocation points. Calculate the numerical derivative and the difference to the analytical solution. Vary the wavenumber content of the analytical function. Does it make a difference? Why is the numerical result not entirely exact?", "# Initialize arbitrary test function on Chebyshev collocation points\nnx = 200 # Number of grid points\nx = np.zeros(nx+1)\nfor ix in range(0,nx+1):\n x[ix] = np.cos(ix * np.pi / nx) \ndxmin = min(abs(np.diff(x)))\ndxmax = max(abs(np.diff(x)))\n\n# Function example: Gaussian\n# Width of Gaussian\ns = .2 \n# Gaussian function (modify!)\nf = np.exp(-1/s**2 * x**2)\n\n# Initialize differentiation matrix\nD = get_cheby_matrix(nx)\n\n# Analytical derivative\ndf_ana = -2/s**2 * x * np.exp(-1/s**2 * x**2)\n\n# Calculate numerical derivative using differentiation matrix\ndf_num = D @ f\n\n# To make the error visible, it is multiply by 10^12\ndf_err = 1e12*(df_ana - df_num)\n\n# Calculate error between analytical and numerical solution\nerr = np.sum((df_num - df_ana)**2) / np.sum(df_ana**2) * 100\nprint('Error: %s' %err)", "Exercise 3\nNow that the numerical derivative is available, we can visually inspect our results. Make a plot of both, the analytical and numerical derivatives together with the difference error.", "# Plot analytical and numerical derivatives\n# ---------------------------------------------------------------\n\nplt.subplot(2,1,1)\nplt.plot(x, f, \"g\", lw = 1.5, label='Gaussian')\nplt.legend(loc='upper right', shadow=True)\nplt.xlabel('$x$') \nplt.ylabel('$f(x)$')\n\nplt.subplot(2,1,2)\nplt.plot(x, df_ana, \"b\", lw = 1.5, label='Analytical')\nplt.plot(x, df_num, 'k--', lw = 1.5, label='Numerical')\nplt.plot(x, df_err, \"r\", lw = 1.5, label='Difference')\nplt.legend(loc='upper right', shadow=True)\nplt.xlabel('$x$') \nplt.ylabel('$\\partial_x f(x)$')\n\nplt.show()" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
ES-DOC/esdoc-jupyterhub
notebooks/cccr-iitm/cmip6/models/sandbox-1/ocean.ipynb
gpl-3.0
[ "ES-DOC CMIP6 Model Properties - Ocean\nMIP Era: CMIP6\nInstitute: CCCR-IITM\nSource ID: SANDBOX-1\nTopic: Ocean\nSub-Topics: Timestepping Framework, Advection, Lateral Physics, Vertical Physics, Uplow Boundaries, Boundary Forcing. \nProperties: 133 (101 required)\nModel descriptions: Model description details\nInitialized From: -- \nNotebook Help: Goto notebook help page\nNotebook Initialised: 2018-02-15 16:53:48\nDocument Setup\nIMPORTANT: to be executed each time you run the notebook", "# DO NOT EDIT ! \nfrom pyesdoc.ipython.model_topic import NotebookOutput \n\n# DO NOT EDIT ! \nDOC = NotebookOutput('cmip6', 'cccr-iitm', 'sandbox-1', 'ocean')", "Document Authors\nSet document authors", "# Set as follows: DOC.set_author(\"name\", \"email\") \n# TODO - please enter value(s)", "Document Contributors\nSpecify document contributors", "# Set as follows: DOC.set_contributor(\"name\", \"email\") \n# TODO - please enter value(s)", "Document Publication\nSpecify document publication status", "# Set publication status: \n# 0=do not publish, 1=publish. \nDOC.set_publication_status(0)", "Document Table of Contents\n1. Key Properties\n2. Key Properties --&gt; Seawater Properties\n3. Key Properties --&gt; Bathymetry\n4. Key Properties --&gt; Nonoceanic Waters\n5. Key Properties --&gt; Software Properties\n6. Key Properties --&gt; Resolution\n7. Key Properties --&gt; Tuning Applied\n8. Key Properties --&gt; Conservation\n9. Grid\n10. Grid --&gt; Discretisation --&gt; Vertical\n11. Grid --&gt; Discretisation --&gt; Horizontal\n12. Timestepping Framework\n13. Timestepping Framework --&gt; Tracers\n14. Timestepping Framework --&gt; Baroclinic Dynamics\n15. Timestepping Framework --&gt; Barotropic\n16. Timestepping Framework --&gt; Vertical Physics\n17. Advection\n18. Advection --&gt; Momentum\n19. Advection --&gt; Lateral Tracers\n20. Advection --&gt; Vertical Tracers\n21. Lateral Physics\n22. Lateral Physics --&gt; Momentum --&gt; Operator\n23. Lateral Physics --&gt; Momentum --&gt; Eddy Viscosity Coeff\n24. Lateral Physics --&gt; Tracers\n25. Lateral Physics --&gt; Tracers --&gt; Operator\n26. Lateral Physics --&gt; Tracers --&gt; Eddy Diffusity Coeff\n27. Lateral Physics --&gt; Tracers --&gt; Eddy Induced Velocity\n28. Vertical Physics\n29. Vertical Physics --&gt; Boundary Layer Mixing --&gt; Details\n30. Vertical Physics --&gt; Boundary Layer Mixing --&gt; Tracers\n31. Vertical Physics --&gt; Boundary Layer Mixing --&gt; Momentum\n32. Vertical Physics --&gt; Interior Mixing --&gt; Details\n33. Vertical Physics --&gt; Interior Mixing --&gt; Tracers\n34. Vertical Physics --&gt; Interior Mixing --&gt; Momentum\n35. Uplow Boundaries --&gt; Free Surface\n36. Uplow Boundaries --&gt; Bottom Boundary Layer\n37. Boundary Forcing\n38. Boundary Forcing --&gt; Momentum --&gt; Bottom Friction\n39. Boundary Forcing --&gt; Momentum --&gt; Lateral Friction\n40. Boundary Forcing --&gt; Tracers --&gt; Sunlight Penetration\n41. Boundary Forcing --&gt; Tracers --&gt; Fresh Water Forcing \n1. Key Properties\nOcean key properties\n1.1. Model Overview\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nOverview of ocean model.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.model_overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "1.2. Model Name\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nName of ocean model code (NEMO 3.6, MOM 5.0,...)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.model_name') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "1.3. Model Family\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nType of ocean model.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.model_family') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"OGCM\" \n# \"slab ocean\" \n# \"mixed layer ocean\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "1.4. Basic Approximations\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nBasic approximations made in the ocean.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.basic_approximations') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Primitive equations\" \n# \"Non-hydrostatic\" \n# \"Boussinesq\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "1.5. Prognostic Variables\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nList of prognostic variables in the ocean component.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.prognostic_variables') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Potential temperature\" \n# \"Conservative temperature\" \n# \"Salinity\" \n# \"U-velocity\" \n# \"V-velocity\" \n# \"W-velocity\" \n# \"SSH\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "2. Key Properties --&gt; Seawater Properties\nPhysical properties of seawater in ocean\n2.1. Eos Type\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nType of EOS for sea water", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.seawater_properties.eos_type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Linear\" \n# \"Wright, 1997\" \n# \"Mc Dougall et al.\" \n# \"Jackett et al. 2006\" \n# \"TEOS 2010\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "2.2. Eos Functional Temp\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nTemperature used in EOS for sea water", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.seawater_properties.eos_functional_temp') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Potential temperature\" \n# \"Conservative temperature\" \n# TODO - please enter value(s)\n", "2.3. Eos Functional Salt\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nSalinity used in EOS for sea water", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.seawater_properties.eos_functional_salt') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Practical salinity Sp\" \n# \"Absolute salinity Sa\" \n# TODO - please enter value(s)\n", "2.4. Eos Functional Depth\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDepth or pressure used in EOS for sea water ?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.seawater_properties.eos_functional_depth') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Pressure (dbars)\" \n# \"Depth (meters)\" \n# TODO - please enter value(s)\n", "2.5. Ocean Freezing Point\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nEquation used to compute the freezing point (in deg C) of seawater, as a function of salinity and pressure", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.seawater_properties.ocean_freezing_point') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"TEOS 2010\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "2.6. Ocean Specific Heat\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: FLOAT&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nSpecific heat in ocean (cpocean) in J/(kg K)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.seawater_properties.ocean_specific_heat') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "2.7. Ocean Reference Density\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: FLOAT&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nBoussinesq reference density (rhozero) in kg / m3", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.seawater_properties.ocean_reference_density') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "3. Key Properties --&gt; Bathymetry\nProperties of bathymetry in ocean\n3.1. Reference Dates\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nReference date of bathymetry", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.bathymetry.reference_dates') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Present day\" \n# \"21000 years BP\" \n# \"6000 years BP\" \n# \"LGM\" \n# \"Pliocene\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "3.2. Type\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nIs the bathymetry fixed in time in the ocean ?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.bathymetry.type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "3.3. Ocean Smoothing\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDescribe any smoothing or hand editing of bathymetry in ocean", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.bathymetry.ocean_smoothing') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "3.4. Source\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDescribe source of bathymetry in ocean", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.bathymetry.source') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "4. Key Properties --&gt; Nonoceanic Waters\nNon oceanic waters treatement in ocean\n4.1. Isolated Seas\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDescribe if/how isolated seas is performed", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.nonoceanic_waters.isolated_seas') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "4.2. River Mouth\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDescribe if/how river mouth mixing or estuaries specific treatment is performed", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.nonoceanic_waters.river_mouth') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "5. Key Properties --&gt; Software Properties\nSoftware properties of ocean code\n5.1. Repository\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nLocation of code for this component.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.software_properties.repository') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "5.2. Code Version\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nCode version identifier.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.software_properties.code_version') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "5.3. Code Languages\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N\nCode language(s).", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.software_properties.code_languages') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "6. Key Properties --&gt; Resolution\nResolution in the ocean grid\n6.1. Name\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nThis is a string usually used by the modelling group to describe the resolution of this grid, e.g. ORCA025, N512L180, T512L70 etc.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.resolution.name') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "6.2. Canonical Horizontal Resolution\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nExpression quoted for gross comparisons of resolution, eg. 50km or 0.1 degrees etc.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.resolution.canonical_horizontal_resolution') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "6.3. Range Horizontal Resolution\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nRange of horizontal resolution with spatial details, eg. 50(Equator)-100km or 0.1-0.5 degrees etc.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.resolution.range_horizontal_resolution') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "6.4. Number Of Horizontal Gridpoints\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nTotal number of horizontal (XY) points (or degrees of freedom) on computational grid.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.resolution.number_of_horizontal_gridpoints') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "6.5. Number Of Vertical Levels\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nNumber of vertical levels resolved on computational grid.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.resolution.number_of_vertical_levels') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "6.6. Is Adaptive Grid\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDefault is False. Set true if grid resolution changes during execution.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.resolution.is_adaptive_grid') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "6.7. Thickness Level 1\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: FLOAT&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nThickness of first surface ocean level (in meters)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.resolution.thickness_level_1') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "7. Key Properties --&gt; Tuning Applied\nTuning methodology for ocean component\n7.1. Description\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nGeneral overview description of tuning: explain and motivate the main targets and metrics retained. &amp;Document the relative weight given to climate performance metrics versus process oriented metrics, &amp;and on the possible conflicts with parameterization level tuning. In particular describe any struggle &amp;with a parameter value that required pushing it to its limits to solve a particular model deficiency.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.tuning_applied.description') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "7.2. Global Mean Metrics Used\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N\nList set of metrics of the global mean state used in tuning model/component", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.tuning_applied.global_mean_metrics_used') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "7.3. Regional Metrics Used\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N\nList of regional metrics of mean state (e.g THC, AABW, regional means etc) used in tuning model/component", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.tuning_applied.regional_metrics_used') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "7.4. Trend Metrics Used\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N\nList observed trend metrics used in tuning model/component", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.tuning_applied.trend_metrics_used') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "8. Key Properties --&gt; Conservation\nConservation in the ocean component\n8.1. Description\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nBrief description of conservation methodology", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.conservation.description') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "8.2. Scheme\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nProperties conserved in the ocean by the numerical schemes", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.conservation.scheme') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Energy\" \n# \"Enstrophy\" \n# \"Salt\" \n# \"Volume of ocean\" \n# \"Momentum\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "8.3. Consistency Properties\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nAny additional consistency properties (energy conversion, pressure gradient discretisation, ...)?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.conservation.consistency_properties') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "8.4. Corrected Conserved Prognostic Variables\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nSet of variables which are conserved by more than the numerical scheme alone.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.conservation.corrected_conserved_prognostic_variables') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "8.5. Was Flux Correction Used\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDoes conservation involve flux correction ?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.conservation.was_flux_correction_used') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "9. Grid\nOcean grid\n9.1. Overview\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nOverview of grid in ocean", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.grid.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "10. Grid --&gt; Discretisation --&gt; Vertical\nProperties of vertical discretisation in ocean\n10.1. Coordinates\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nType of vertical coordinates in ocean", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.grid.discretisation.vertical.coordinates') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Z-coordinate\" \n# \"Z*-coordinate\" \n# \"S-coordinate\" \n# \"Isopycnic - sigma 0\" \n# \"Isopycnic - sigma 2\" \n# \"Isopycnic - sigma 4\" \n# \"Isopycnic - other\" \n# \"Hybrid / Z+S\" \n# \"Hybrid / Z+isopycnic\" \n# \"Hybrid / other\" \n# \"Pressure referenced (P)\" \n# \"P*\" \n# \"Z**\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "10.2. Partial Steps\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nUsing partial steps with Z or Z vertical coordinate in ocean ?*", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.grid.discretisation.vertical.partial_steps') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "11. Grid --&gt; Discretisation --&gt; Horizontal\nType of horizontal discretisation scheme in ocean\n11.1. Type\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nHorizontal grid type", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.grid.discretisation.horizontal.type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Lat-lon\" \n# \"Rotated north pole\" \n# \"Two north poles (ORCA-style)\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "11.2. Staggering\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nHorizontal grid staggering type", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.grid.discretisation.horizontal.staggering') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Arakawa B-grid\" \n# \"Arakawa C-grid\" \n# \"Arakawa E-grid\" \n# \"N/a\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "11.3. Scheme\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nHorizontal discretisation scheme in ocean", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.grid.discretisation.horizontal.scheme') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Finite difference\" \n# \"Finite volumes\" \n# \"Finite elements\" \n# \"Unstructured grid\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "12. Timestepping Framework\nOcean Timestepping Framework\n12.1. Overview\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nOverview of time stepping in ocean", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.timestepping_framework.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "12.2. Diurnal Cycle\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDiurnal cycle type", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.timestepping_framework.diurnal_cycle') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"None\" \n# \"Via coupling\" \n# \"Specific treatment\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "13. Timestepping Framework --&gt; Tracers\nProperties of tracers time stepping in ocean\n13.1. Scheme\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nTracers time stepping scheme", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.timestepping_framework.tracers.scheme') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Leap-frog + Asselin filter\" \n# \"Leap-frog + Periodic Euler\" \n# \"Predictor-corrector\" \n# \"Runge-Kutta 2\" \n# \"AM3-LF\" \n# \"Forward-backward\" \n# \"Forward operator\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "13.2. Time Step\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nTracers time step (in seconds)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.timestepping_framework.tracers.time_step') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "14. Timestepping Framework --&gt; Baroclinic Dynamics\nBaroclinic dynamics in ocean\n14.1. Type\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nBaroclinic dynamics type", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.timestepping_framework.baroclinic_dynamics.type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Preconditioned conjugate gradient\" \n# \"Sub cyling\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "14.2. Scheme\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nBaroclinic dynamics scheme", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.timestepping_framework.baroclinic_dynamics.scheme') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Leap-frog + Asselin filter\" \n# \"Leap-frog + Periodic Euler\" \n# \"Predictor-corrector\" \n# \"Runge-Kutta 2\" \n# \"AM3-LF\" \n# \"Forward-backward\" \n# \"Forward operator\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "14.3. Time Step\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nBaroclinic time step (in seconds)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.timestepping_framework.baroclinic_dynamics.time_step') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "15. Timestepping Framework --&gt; Barotropic\nBarotropic time stepping in ocean\n15.1. Splitting\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nTime splitting method", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.timestepping_framework.barotropic.splitting') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"None\" \n# \"split explicit\" \n# \"implicit\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "15.2. Time Step\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nBarotropic time step (in seconds)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.timestepping_framework.barotropic.time_step') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "16. Timestepping Framework --&gt; Vertical Physics\nVertical physics time stepping in ocean\n16.1. Method\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDetails of vertical time stepping in ocean", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.timestepping_framework.vertical_physics.method') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "17. Advection\nOcean advection\n17.1. Overview\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nOverview of advection in ocean", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.advection.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "18. Advection --&gt; Momentum\nProperties of lateral momemtum advection scheme in ocean\n18.1. Type\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nType of lateral momemtum advection scheme in ocean", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.advection.momentum.type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Flux form\" \n# \"Vector form\" \n# TODO - please enter value(s)\n", "18.2. Scheme Name\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nName of ocean momemtum advection scheme", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.advection.momentum.scheme_name') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "18.3. ALE\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nUsing ALE for vertical advection ? (if vertical coordinates are sigma)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.advection.momentum.ALE') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "19. Advection --&gt; Lateral Tracers\nProperties of lateral tracer advection scheme in ocean\n19.1. Order\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nOrder of lateral tracer advection scheme in ocean", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.advection.lateral_tracers.order') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "19.2. Flux Limiter\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nMonotonic flux limiter for lateral tracer advection scheme in ocean ?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.advection.lateral_tracers.flux_limiter') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "19.3. Effective Order\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: FLOAT&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nEffective order of limited lateral tracer advection scheme in ocean", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.advection.lateral_tracers.effective_order') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "19.4. Name\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDescriptive text for lateral tracer advection scheme in ocean (e.g. MUSCL, PPM-H5, PRATHER,...)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.advection.lateral_tracers.name') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "19.5. Passive Tracers\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N\nPassive tracers advected", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.advection.lateral_tracers.passive_tracers') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Ideal age\" \n# \"CFC 11\" \n# \"CFC 12\" \n# \"SF6\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "19.6. Passive Tracers Advection\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nIs advection of passive tracers different than active ? if so, describe.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.advection.lateral_tracers.passive_tracers_advection') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "20. Advection --&gt; Vertical Tracers\nProperties of vertical tracer advection scheme in ocean\n20.1. Name\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDescriptive text for vertical tracer advection scheme in ocean (e.g. MUSCL, PPM-H5, PRATHER,...)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.advection.vertical_tracers.name') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "20.2. Flux Limiter\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nMonotonic flux limiter for vertical tracer advection scheme in ocean ?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.advection.vertical_tracers.flux_limiter') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "21. Lateral Physics\nOcean lateral physics\n21.1. Overview\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nOverview of lateral physics in ocean", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.lateral_physics.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "21.2. Scheme\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nType of transient eddy representation in ocean", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.lateral_physics.scheme') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"None\" \n# \"Eddy active\" \n# \"Eddy admitting\" \n# TODO - please enter value(s)\n", "22. Lateral Physics --&gt; Momentum --&gt; Operator\nProperties of lateral physics operator for momentum in ocean\n22.1. Direction\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDirection of lateral physics momemtum scheme in the ocean", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.lateral_physics.momentum.operator.direction') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Horizontal\" \n# \"Isopycnal\" \n# \"Isoneutral\" \n# \"Geopotential\" \n# \"Iso-level\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "22.2. Order\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nOrder of lateral physics momemtum scheme in the ocean", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.lateral_physics.momentum.operator.order') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Harmonic\" \n# \"Bi-harmonic\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "22.3. Discretisation\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDiscretisation of lateral physics momemtum scheme in the ocean", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.lateral_physics.momentum.operator.discretisation') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Second order\" \n# \"Higher order\" \n# \"Flux limiter\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "23. Lateral Physics --&gt; Momentum --&gt; Eddy Viscosity Coeff\nProperties of eddy viscosity coeff in lateral physics momemtum scheme in the ocean\n23.1. Type\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nLateral physics momemtum eddy viscosity coeff type in the ocean", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.lateral_physics.momentum.eddy_viscosity_coeff.type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Constant\" \n# \"Space varying\" \n# \"Time + space varying (Smagorinsky)\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "23.2. Constant Coefficient\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nIf constant, value of eddy viscosity coeff in lateral physics momemtum scheme (in m2/s)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.lateral_physics.momentum.eddy_viscosity_coeff.constant_coefficient') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "23.3. Variable Coefficient\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nIf space-varying, describe variations of eddy viscosity coeff in lateral physics momemtum scheme", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.lateral_physics.momentum.eddy_viscosity_coeff.variable_coefficient') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "23.4. Coeff Background\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDescribe background eddy viscosity coeff in lateral physics momemtum scheme (give values in m2/s)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.lateral_physics.momentum.eddy_viscosity_coeff.coeff_background') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "23.5. Coeff Backscatter\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nIs there backscatter in eddy viscosity coeff in lateral physics momemtum scheme ?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.lateral_physics.momentum.eddy_viscosity_coeff.coeff_backscatter') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "24. Lateral Physics --&gt; Tracers\nProperties of lateral physics for tracers in ocean\n24.1. Mesoscale Closure\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nIs there a mesoscale closure in the lateral physics tracers scheme ?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.lateral_physics.tracers.mesoscale_closure') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "24.2. Submesoscale Mixing\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nIs there a submesoscale mixing parameterisation (i.e Fox-Kemper) in the lateral physics tracers scheme ?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.lateral_physics.tracers.submesoscale_mixing') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "25. Lateral Physics --&gt; Tracers --&gt; Operator\nProperties of lateral physics operator for tracers in ocean\n25.1. Direction\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDirection of lateral physics tracers scheme in the ocean", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.lateral_physics.tracers.operator.direction') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Horizontal\" \n# \"Isopycnal\" \n# \"Isoneutral\" \n# \"Geopotential\" \n# \"Iso-level\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "25.2. Order\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nOrder of lateral physics tracers scheme in the ocean", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.lateral_physics.tracers.operator.order') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Harmonic\" \n# \"Bi-harmonic\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "25.3. Discretisation\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDiscretisation of lateral physics tracers scheme in the ocean", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.lateral_physics.tracers.operator.discretisation') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Second order\" \n# \"Higher order\" \n# \"Flux limiter\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "26. Lateral Physics --&gt; Tracers --&gt; Eddy Diffusity Coeff\nProperties of eddy diffusity coeff in lateral physics tracers scheme in the ocean\n26.1. Type\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nLateral physics tracers eddy diffusity coeff type in the ocean", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_diffusity_coeff.type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Constant\" \n# \"Space varying\" \n# \"Time + space varying (Smagorinsky)\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "26.2. Constant Coefficient\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nIf constant, value of eddy diffusity coeff in lateral physics tracers scheme (in m2/s)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_diffusity_coeff.constant_coefficient') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "26.3. Variable Coefficient\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nIf space-varying, describe variations of eddy diffusity coeff in lateral physics tracers scheme", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_diffusity_coeff.variable_coefficient') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "26.4. Coeff Background\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDescribe background eddy diffusity coeff in lateral physics tracers scheme (give values in m2/s)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_diffusity_coeff.coeff_background') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "26.5. Coeff Backscatter\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nIs there backscatter in eddy diffusity coeff in lateral physics tracers scheme ?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_diffusity_coeff.coeff_backscatter') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "27. Lateral Physics --&gt; Tracers --&gt; Eddy Induced Velocity\nProperties of eddy induced velocity (EIV) in lateral physics tracers scheme in the ocean\n27.1. Type\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nType of EIV in lateral physics tracers in the ocean", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_induced_velocity.type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"GM\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "27.2. Constant Val\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nIf EIV scheme for tracers is constant, specify coefficient value (M2/s)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_induced_velocity.constant_val') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "27.3. Flux Type\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nType of EIV flux (advective or skew)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_induced_velocity.flux_type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "27.4. Added Diffusivity\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nType of EIV added diffusivity (constant, flow dependent or none)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_induced_velocity.added_diffusivity') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "28. Vertical Physics\nOcean Vertical Physics\n28.1. Overview\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nOverview of vertical physics in ocean", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.vertical_physics.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "29. Vertical Physics --&gt; Boundary Layer Mixing --&gt; Details\nProperties of vertical physics in ocean\n29.1. Langmuir Cells Mixing\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nIs there Langmuir cells mixing in upper ocean ?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.details.langmuir_cells_mixing') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "30. Vertical Physics --&gt; Boundary Layer Mixing --&gt; Tracers\n*Properties of boundary layer (BL) mixing on tracers in the ocean *\n30.1. Type\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nType of boundary layer mixing for tracers in ocean", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.tracers.type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Constant value\" \n# \"Turbulent closure - TKE\" \n# \"Turbulent closure - KPP\" \n# \"Turbulent closure - Mellor-Yamada\" \n# \"Turbulent closure - Bulk Mixed Layer\" \n# \"Richardson number dependent - PP\" \n# \"Richardson number dependent - KT\" \n# \"Imbeded as isopycnic vertical coordinate\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "30.2. Closure Order\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: FLOAT&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nIf turbulent BL mixing of tracers, specific order of closure (0, 1, 2.5, 3)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.tracers.closure_order') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "30.3. Constant\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nIf constant BL mixing of tracers, specific coefficient (m2/s)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.tracers.constant') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "30.4. Background\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nBackground BL mixing of tracers coefficient, (schema and value in m2/s - may by none)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.tracers.background') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "31. Vertical Physics --&gt; Boundary Layer Mixing --&gt; Momentum\n*Properties of boundary layer (BL) mixing on momentum in the ocean *\n31.1. Type\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nType of boundary layer mixing for momentum in ocean", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.momentum.type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Constant value\" \n# \"Turbulent closure - TKE\" \n# \"Turbulent closure - KPP\" \n# \"Turbulent closure - Mellor-Yamada\" \n# \"Turbulent closure - Bulk Mixed Layer\" \n# \"Richardson number dependent - PP\" \n# \"Richardson number dependent - KT\" \n# \"Imbeded as isopycnic vertical coordinate\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "31.2. Closure Order\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: FLOAT&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nIf turbulent BL mixing of momentum, specific order of closure (0, 1, 2.5, 3)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.momentum.closure_order') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "31.3. Constant\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nIf constant BL mixing of momentum, specific coefficient (m2/s)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.momentum.constant') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "31.4. Background\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nBackground BL mixing of momentum coefficient, (schema and value in m2/s - may by none)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.momentum.background') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "32. Vertical Physics --&gt; Interior Mixing --&gt; Details\n*Properties of interior mixing in the ocean *\n32.1. Convection Type\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nType of vertical convection in ocean", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.details.convection_type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Non-penetrative convective adjustment\" \n# \"Enhanced vertical diffusion\" \n# \"Included in turbulence closure\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "32.2. Tide Induced Mixing\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDescribe how tide induced mixing is modelled (barotropic, baroclinic, none)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.details.tide_induced_mixing') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "32.3. Double Diffusion\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nIs there double diffusion", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.details.double_diffusion') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "32.4. Shear Mixing\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nIs there interior shear mixing", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.details.shear_mixing') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "33. Vertical Physics --&gt; Interior Mixing --&gt; Tracers\n*Properties of interior mixing on tracers in the ocean *\n33.1. Type\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nType of interior mixing for tracers in ocean", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.tracers.type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Constant value\" \n# \"Turbulent closure / TKE\" \n# \"Turbulent closure - Mellor-Yamada\" \n# \"Richardson number dependent - PP\" \n# \"Richardson number dependent - KT\" \n# \"Imbeded as isopycnic vertical coordinate\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "33.2. Constant\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nIf constant interior mixing of tracers, specific coefficient (m2/s)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.tracers.constant') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "33.3. Profile\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nIs the background interior mixing using a vertical profile for tracers (i.e is NOT constant) ?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.tracers.profile') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "33.4. Background\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nBackground interior mixing of tracers coefficient, (schema and value in m2/s - may by none)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.tracers.background') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "34. Vertical Physics --&gt; Interior Mixing --&gt; Momentum\n*Properties of interior mixing on momentum in the ocean *\n34.1. Type\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nType of interior mixing for momentum in ocean", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.momentum.type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Constant value\" \n# \"Turbulent closure / TKE\" \n# \"Turbulent closure - Mellor-Yamada\" \n# \"Richardson number dependent - PP\" \n# \"Richardson number dependent - KT\" \n# \"Imbeded as isopycnic vertical coordinate\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "34.2. Constant\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nIf constant interior mixing of momentum, specific coefficient (m2/s)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.momentum.constant') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "34.3. Profile\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nIs the background interior mixing using a vertical profile for momentum (i.e is NOT constant) ?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.momentum.profile') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "34.4. Background\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nBackground interior mixing of momentum coefficient, (schema and value in m2/s - may by none)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.momentum.background') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "35. Uplow Boundaries --&gt; Free Surface\nProperties of free surface in ocean\n35.1. Overview\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nOverview of free surface in ocean", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.uplow_boundaries.free_surface.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "35.2. Scheme\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nFree surface scheme in ocean", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.uplow_boundaries.free_surface.scheme') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Linear implicit\" \n# \"Linear filtered\" \n# \"Linear semi-explicit\" \n# \"Non-linear implicit\" \n# \"Non-linear filtered\" \n# \"Non-linear semi-explicit\" \n# \"Fully explicit\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "35.3. Embeded Seaice\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nIs the sea-ice embeded in the ocean model (instead of levitating) ?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.uplow_boundaries.free_surface.embeded_seaice') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "36. Uplow Boundaries --&gt; Bottom Boundary Layer\nProperties of bottom boundary layer in ocean\n36.1. Overview\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nOverview of bottom boundary layer in ocean", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.uplow_boundaries.bottom_boundary_layer.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "36.2. Type Of Bbl\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nType of bottom boundary layer in ocean", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.uplow_boundaries.bottom_boundary_layer.type_of_bbl') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Diffusive\" \n# \"Acvective\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "36.3. Lateral Mixing Coef\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nIf bottom BL is diffusive, specify value of lateral mixing coefficient (in m2/s)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.uplow_boundaries.bottom_boundary_layer.lateral_mixing_coef') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "36.4. Sill Overflow\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDescribe any specific treatment of sill overflows", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.uplow_boundaries.bottom_boundary_layer.sill_overflow') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "37. Boundary Forcing\nOcean boundary forcing\n37.1. Overview\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nOverview of boundary forcing in ocean", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.boundary_forcing.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "37.2. Surface Pressure\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDescribe how surface pressure is transmitted to ocean (via sea-ice, nothing specific,...)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.boundary_forcing.surface_pressure') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "37.3. Momentum Flux Correction\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDescribe any type of ocean surface momentum flux correction and, if applicable, how it is applied and where.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.boundary_forcing.momentum_flux_correction') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "37.4. Tracers Flux Correction\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDescribe any type of ocean surface tracers flux correction and, if applicable, how it is applied and where.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.boundary_forcing.tracers_flux_correction') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "37.5. Wave Effects\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDescribe if/how wave effects are modelled at ocean surface.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.boundary_forcing.wave_effects') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "37.6. River Runoff Budget\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDescribe how river runoff from land surface is routed to ocean and any global adjustment done.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.boundary_forcing.river_runoff_budget') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "37.7. Geothermal Heating\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDescribe if/how geothermal heating is present at ocean bottom.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.boundary_forcing.geothermal_heating') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "38. Boundary Forcing --&gt; Momentum --&gt; Bottom Friction\nProperties of momentum bottom friction in ocean\n38.1. Type\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nType of momentum bottom friction in ocean", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.boundary_forcing.momentum.bottom_friction.type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Linear\" \n# \"Non-linear\" \n# \"Non-linear (drag function of speed of tides)\" \n# \"Constant drag coefficient\" \n# \"None\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "39. Boundary Forcing --&gt; Momentum --&gt; Lateral Friction\nProperties of momentum lateral friction in ocean\n39.1. Type\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nType of momentum lateral friction in ocean", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.boundary_forcing.momentum.lateral_friction.type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"None\" \n# \"Free-slip\" \n# \"No-slip\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "40. Boundary Forcing --&gt; Tracers --&gt; Sunlight Penetration\nProperties of sunlight penetration scheme in ocean\n40.1. Scheme\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nType of sunlight penetration scheme in ocean", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.boundary_forcing.tracers.sunlight_penetration.scheme') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"1 extinction depth\" \n# \"2 extinction depth\" \n# \"3 extinction depth\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "40.2. Ocean Colour\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nIs the ocean sunlight penetration scheme ocean colour dependent ?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.boundary_forcing.tracers.sunlight_penetration.ocean_colour') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "40.3. Extinction Depth\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDescribe and list extinctions depths for sunlight penetration scheme (if applicable).", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.boundary_forcing.tracers.sunlight_penetration.extinction_depth') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "41. Boundary Forcing --&gt; Tracers --&gt; Fresh Water Forcing\nProperties of surface fresh water forcing in ocean\n41.1. From Atmopshere\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nType of surface fresh water forcing from atmos in ocean", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.boundary_forcing.tracers.fresh_water_forcing.from_atmopshere') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Freshwater flux\" \n# \"Virtual salt flux\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "41.2. From Sea Ice\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nType of surface fresh water forcing from sea-ice in ocean", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.boundary_forcing.tracers.fresh_water_forcing.from_sea_ice') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Freshwater flux\" \n# \"Virtual salt flux\" \n# \"Real salt flux\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "41.3. Forced Mode Restoring\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nType of surface salinity restoring in forced mode (OMIP)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.boundary_forcing.tracers.fresh_water_forcing.forced_mode_restoring') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "©2017 ES-DOC" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
RobbieNesmith/PandasTutorial
Tutorial/Exercises-4.ipynb
mit
[ "%matplotlib inline\nimport pandas as pd\nimport seaborn as sbn\nsbn.set()\n\nfrom IPython.core.display import HTML\ncss = open('style-table.css').read() + open('style-notebook.css').read()\nHTML('<style>{}</style>'.format(css))\n\ntitles = pd.DataFrame.from_csv('data/titles.csv', index_col=None)\ntitles.head()\n\ncast = pd.DataFrame.from_csv('data/cast.csv', index_col=None)\ncast.head()", "Define a year as a \"Superman year\" whose films feature more Superman characters than Batman. How many years in film history have been Superman years?", "both = cast[(cast.character=='Superman') | (cast.character == 'Batman')].groupby(['year','character']).size().unstack().fillna(0)\ndiff = both.Superman - both.Batman\nprint(\"Superman: \" + str(len(diff[diff>0])))", "How many years have been \"Batman years\", with more Batman characters than Superman characters?", "both = cast[(cast.character=='Superman') | (cast.character == 'Batman')].groupby(['year','character']).size().unstack().fillna(0)\ndiff = both.Batman - both.Superman\nprint(\"Batman: \" + str(len(diff[diff>0])))", "Plot the number of actor roles each year and the number of actress roles each year over the history of film.", "cast.groupby(['year','type']).size().unstack().plot()", "Plot the number of actor roles each year and the number of actress roles each year, but this time as a kind='area' plot.", "cast.groupby(['year','type']).size().unstack().plot(kind='area')", "Plot the difference between the number of actor roles each year and the number of actress roles each year over the history of film.", "foo = cast.groupby(['year','type']).size().unstack().fillna(0)\n\nfoo['diff'] = foo['actor']-foo['actress']\nfoo['diff'].plot()", "Plot the fraction of roles that have been 'actor' roles each year in the hitsory of film.", "foo['totalRoles'] = foo['actor']+foo['actress']\nfoo['manFrac'] = foo['actor']/foo['totalRoles']\nfoo['manFrac'].plot()", "Plot the fraction of supporting (n=2) roles that have been 'actor' roles each year in the history of film.", "support = cast[cast.n==2]\nbar = support.groupby(['year','type']).size().unstack().fillna(0)\nbar['totalRoles'] = bar['actor']+bar['actress']\nbar['manFrac'] = bar['actor']/bar['totalRoles']\nbar['manFrac'].plot()", "Build a plot with a line for each rank n=1 through n=3, where the line shows what fraction of that rank's roles were 'actor' roles for each year in the history of film.", "thirdWheel = cast[cast.n==3]\nbaz = thirdWheel.groupby(['year','type']).size().unstack().fillna(0)\nbaz['totalRoles'] = baz['actor']+baz['actress']\nbaz['manFrac'] = baz['actor']/baz['totalRoles']\nfoo['manFrac'].plot() + (bar['manFrac'].plot() + baz['manFrac'].plot())" ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
megatharun/basic-python-for-researcher
Tutorial 3 - Conditional Expression.ipynb
artistic-2.0
[ "<span style=\"color: #B40486\">BASIC PYTHON FOR RESEARCHERS</span>\nby Megat Harun Al Rashid bin Megat Ahmad\nlast updated: April 14, 2016\n\n<span style=\"color: #29088A\">3. Conditional Expressions</span>\n$Python$ conditional expressions include the <span style=\"color: #0000FF\">$if/elif/else$</span> statement. In addition the <span style=\"color: #0000FF\">$for$</span> and <span style=\"color: #0000FF\">$while$</span> statements can be used in conditional looping. $Python$ also has the <span style=\"color: #0000FF\">$enumerate$&#40; &#41;</span> function for conditional looping.\n\n3.1 The <span style=\"color: #0000FF\">if/else</span> condition\nThe <span style=\"color: #0000FF\">$if/else$</span> conditional expression allows statement to be executed if a condition is fulfilled:", "# Calculate the remainder of a divisional operation\nx = 56\ny = 3\nz = x % y # Modulo operation\n\nif z > 0:\n print (\"The remainder of %d divided by %d is %d\" % (x,y,z))\nelse:\n print \"There's no remainder\"", "The $z = x$ &#37; $y$ operation means that the remainder of $x$ when divided with $y$ will be assigned to $z$. The <span style=\"color: #0000FF\">$if/else$</span> conditional expression above resulted in the printing of the variable $z$ if its value is positive, i.e. if $x$ when divided with $y$ has a remainder. If this condition is not fulfilled, then the text \"There's no remainder\" will be printed.\nStatements inside conditional expression must be indented with space (not tab). The indentation must be consistent throughout the condition.\n\n3.2 The <span style=\"color: #0000FF\">if/elif/else</span> condition\nThe <span style=\"color: #0000FF\">$if/elif/else$</span> conditional expression allows multiple conditions to be applied.", "# Compare the values of two integers\n\nint1 = 45\nint2 = 55\n\nif int1 > int2:\n print \"%d is larger than %d\" % (int1,int2)\nelif int1 == int2:\n print \"%d is equal to %d\" % (int1,int2)\nelse:\n print \"%d is less than %d\" % (int1,int2)", "In the example above, the first condition always use the <span style=\"color: #0000FF\">$if$</span> condition expression (i.e. $int1$ > $int2$). Only if this is not fulfilled will the second condition be evaluated i.e. the <span style=\"color: #0000FF\">$elif$</span> condition expression. If this condition is also not fulfilled, then the <span style=\"color: #0000FF\">$else$</span> condition statement will be executed.\nIn multiple conditional expressions, all the conditions will be evaluated in sequence. When one of the condition is fulfilled, the sequential evaluation will stop and the statement for that conditions will be executed.\nSome of the conditional operators that can be used in a conditional expression:\n|Condition|Function|\n|---|---|\n|>|more than|\n|<|less than|\n|>=|equal or more than|\n|<=|equal or less than|\n|==|equal to|\n|!=|not equal to|\n|and|more than one conditional operations are true|\n|or|either one conditional operations is true|\nIn general, the multiple conditional expressions format is: \nif (condition/s 1):\n\nstatement 1.1\nstatement 1.2\n......\n\nelif (condition/s 2):\n\nstatement 2.1\n...... \n\nelif (condition/s 3): \n\nstatement 3.1\n...... \n\n......\n......\n...... \nelse: \n\nstatement\n......\n\nThe statement in each conditional expression can also be a conditional expression.\n<span style=\"color: #F5DA81; background-color: #610B4B\">Example 3.1</span>: Determine the maximum and minimum of three different integers: 34,12,67.", "x = 34\ny = 12\nz = 67\n\nif x > y:\n if y > z:\n print 'Maximum integer is %d' % x\n print 'Minimum integer is %d' % z\n elif z > x:\n print 'Maximum integer is %d' % z\n print 'Minimum integer is %d' % y\n else:\n print 'Maximum integer is %d' % x\n print 'Minimum integer is %d' % y\n \nelse: # y > x\n if x > z:\n print 'Maximum integer is %d' % y\n print 'Minimum integer is %d' % z\n elif z > y:\n print 'Maximum integer is %d' % z\n print 'Minimum integer is %d' % x\n else:\n print 'Maximum integer is %d' % y\n print 'Minimum integer is %d' % x\n\n", "<span style=\"color: #F5DA81; background-color: #610B4B\">Example 3.2</span>: Use only one type of conditional operator for Exercise 3.1.", "x = 34\ny = 12\nz = 67\n\nif x > y > z:\n print 'Maximum integer is %d' % x\n print 'Minimum integer is %d' % z\n\nelif x > z > y:\n print 'Maximum integer is %d' % x\n print 'Minimum integer is %d' % y\n \nelif y > x > z:\n print 'Maximum integer is %d' % y\n print 'Minimum integer is %d' % z\n\nelif y > z > x:\n print 'Maximum integer is %d' % y\n print 'Minimum integer is %d' % x\n\nelif z > x > y:\n print 'Maximum integer is %d' % z\n print 'Minimum integer is %d' % y\n\nelse:\n print 'Maximum integer is %d' % z\n print 'Minimum integer is %d' % x", "<span style=\"color: #F5DA81; background-color: #610B4B\">Exercise 3.1</span>: What if two or all integers have the same value. Try this and run the codes that solve Examples 3.1 and 3.2.\nCodes in Example 3.1 seems more robust but 3.2 can be made more robust adding '&gt;=' instead of '&gt;'.", "x = 78\ny = 78\nz = 99\n\nif x >= y >= z:\n print 'Maximum integer is %d' % x\n print 'Minimum integer is %d' % z\n\nelif x >= z >= y:\n print 'Maximum integer is %d' % x\n print 'Minimum integer is %d' % y\n \nelif y >= x >= z:\n print 'Maximum integer is %d' % y\n print 'Minimum integer is %d' % z\n\nelif y >= z >= x:\n print 'Maximum integer is %d' % y\n print 'Minimum integer is %d' % x\n\nelif z >= x >= y:\n print 'Maximum integer is %d' % z\n print 'Minimum integer is %d' % y\n\nelse:\n print 'Maximum integer is %d' % z\n print 'Minimum integer is %d' % x", "3.3 The <span style=\"color: #0000FF\">for</span> and <span style=\"color: #0000FF\">while</span> conditions\nThe <span style=\"color: #0000FF\">$for$</span> and <span style=\"color: #0000FF\">$while$</span> functions can be used to do repetitive action. The indentation with space (not tab) for statements inside the loop is also applied and consistent throughout the condition.", "for i in range(0,5,1):\n print i", "Here the variable $i$ will be assigned the value of $0$ and cyclically incremented $5$ times by adding the integer $1$ to it each time. Only integer values are accepted in the parenthesis of the range statement. The first integer is the intial value of the $i$ variable, the second integer indicates (not-inclusive) the limiting value of the $i$ variable and the third integer represent the integer added to the variable $i$ for each cycles.", "for i in range(4,17,3):\n print i*2", "The conditional looping can be nested as examplified below:", "for i in range(1,6,1):\n for j in range(6,11,1):\n print '%d x %d = %d' % (i,j,i*j)", "It is also possible to loop into the elements of a string (i.e. a $list$).", "for name in 'Numpy':\n print name", "The <span style=\"color: #0000FF\">$while$</span> function works similarly like <span style=\"color: #0000FF\">$for$</span> but initialization of the variable is performed before the <span style=\"color: #0000FF\">$while$</span> statement and incrementing process is carried out as part of the loop argument.", "z = 0\nwhile z < 27:\n print z\n z = z + 6", "3.3 The <span style=\"color: #0000FF\">$enumerate$&#40; &#41;</span> function\nThe <span style=\"color: #0000FF\">$enumerate$&#40; &#41;</span> function will make the <span style=\"color: #0000FF\">$for$</span> looping condition looking more comprehensible. The argument for this function is a $list$.", "for i,j in enumerate('Numpy'):\n print i, '\\t', j\n\nfor item in enumerate('Numpy'):\n print item", "The <span style=\"color: #0000FF\">$enumerate$&#40; &#41;</span> function allows the extraction of both the default position and its element of a $list$. In the first example, the two variables $i$ and $j$ will be assigned the $list$ default positional number and its element, respectively. In the second example, the variable $item$ will be assigned a tuple that consists the pair of default positional number and its element of the $list$.\nThe default positional number can be initiated to a different number. This can be done by passing the initial number as another argument in the <span style=\"color: #0000FF\">$enumerate$&#40; &#41;</span> function.", "for item in enumerate('Numpy',5):\n print item", "More on conditional expressions and looping can be found on https://docs.python.org/2/tutorial/controlflow.html" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
stitchfix/d3-jupyter-tutorial
3d_meshing.ipynb
mit
[ "3D Visualization of a Convex Hull with D3\nThis notebook provides a simple example of convex hull visualization using D3.\nD3 Graph Methods\nSee accompanying d3_lib.py and the js and css folders.", "%matplotlib inline\nfrom IPython.core.display import HTML\nimport d3_lib\n\nHTML(d3_lib.set_styles(['basic_axis','3d_viewer']))\n\nHTML('<script src=\"lib/d3/d3.min.js\"></script>')\n\ndef points_d3(points):\n return [ {\"x\": d[0], \"y\": d[1], \"z\": d[2]} for d in points ]\n\ndef triangles_d3(points,triangles_vertices):\n triangles = []\n for tv in triangles_vertices:\n triangles.append( {\"x1\": points[tv[0]][0], \n \"y1\": points[tv[0]][1], \n \"z1\": points[tv[0]][2], \n \"x2\": points[tv[1]][0], \n \"y2\": points[tv[1]][1], \n \"z2\": points[tv[1]][2], \n \"x3\": points[tv[2]][0], \n \"y3\": points[tv[2]][1], \n \"z3\": points[tv[2]][2] } )\n \n return triangles\n\ndef graph_points_triangles(objs):\n data = []\n for obj in objs:\n points, triangles_vertices = obj[0], obj[1]\n data.append( {\"points\": points_d3(points), \n \"triangles\": triangles_d3(points, triangles_vertices)} )\n return HTML(d3_lib.draw_graph('3d_viewer',{'data':data}))", "Meshing and Volume Calculations", "import numpy as np\nimport random\nfrom scipy.spatial import ConvexHull\n\ndef compute_mesh(points):\n hull = ConvexHull(points)\n indices = hull.simplices\n return indices, hull.vertices", "Example: Randomly Sampled Points on a Cylinder", "def cylinder_points_and_hull_given_sample_size(sample_size):\n points = []\n for i in range(sample_size/2):\n x = random.uniform(-1,1)\n z = random.uniform(0,1)\n s = (-1.0, 1.0)[random.uniform(0,1) < 0.5]\n y = s * (1 - x**2) ** (0.5)\n points.append(np.array([x,y,z]))\n for z in range(0,2):\n for i in range(n/4):\n x = random.uniform(-1,1)\n s = (-1.0, 1.0)[random.uniform(0,1) < 0.5]\n y = s * random.uniform(0,1) * (1 - x**2) ** (0.5)\n points.append(np.array([x,y,z]))\n points = np.array(points)\n triangles_vertices, hull_points = compute_mesh(points)\n return points, hull_points, triangles_vertices\n\nrandom.seed(42)\nn = 100\npoints, hull_vertices, triangles_vertices = cylinder_points_and_hull_given_sample_size(n)\npoints[:3]\n\ntriangles_vertices[:3]\n\ngraph_points_triangles([[points, triangles_vertices]])" ]
[ "markdown", "code", "markdown", "code", "markdown", "code" ]
mvdbosch/AtosCodexDemo
Jupyter Notebooks/Explore the CBS Crime and Demographics Dataset.ipynb
gpl-3.0
[ "Atos Codex - Data Scientist Workbench\nExplore the CBS Crime and Demographics Dataset\nFirst check some of the environment specs and see what we have here", "%%bash\ncat /proc/cpuinfo | grep 'processor\\|model name'\n\n%%bash\nfree -g", "Import Python packages", "from __future__ import print_function\nimport pandas as pd\nimport geopandas as gpd\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nfrom ipywidgets.widgets import interact, Text\nfrom IPython.display import display\nimport numpy as np", "Set Jupyter Notebook graphical parameters", "# use the notebook definition for interactive embedded graphics\n# %matplotlib notebook\n\n# use the inline definition for static embedded graphics\n%matplotlib inline \n\nrcParam = {\n 'figure.figsize': (12,6),\n 'font.weight': 'bold',\n 'axes.labelsize': 20.0,\n 'axes.titlesize': 20.0,\n 'axes.titleweight': 'bold',\n 'legend.fontsize': 14,\n 'xtick.labelsize': 14,\n 'ytick.labelsize': 14,\n}\n\nfor key in rcParam:\n mpl.rcParams[key] = rcParam[key]", "Read the combines CBS dataset\nThis is the file that we downladen & merged using Talend Open Studio for Big Data. (Note: please check the file path)", "cbs_data = pd.read_csv('combined_data.csv',sep=',',na_values=['NA','.'],error_bad_lines=False);", "Let's inspect the contents of this file by looking at the first 5 rows.\nAs you can see, this file has a lot of columns. For a description of the fieldnames, please see the description file", "cbs_data.head()\n\ncbs_data_2015 = cbs_data.loc[cbs_data['YEAR'] == 2015];\n#list(cbs_data_2015)", "We will subset the entire 2010-2015 into just the year 2015.\nIn the table below you will see summary statistics", "cbs_data_2015.describe()\n#cbs_data_2015.YEAR.describe()\n\ncbs_data_2015 = cbs_data_2015.dropna();\ncbs_data_2015.describe()", "Description of some of the demographic features of this dataset", "cbs_data_2015.iloc[:,35:216].describe()", "We want to make a label and a set of features out of our data\nLabelling: The relative amount of money and property crimes ( Vermogensmisdrijven_rel)\nFeatures : All neighbourhood demographic columns in the dataset", "labels = cbs_data_2015[\"Vermogensmisdrijven_rel\"].values\ncolumns = list(cbs_data_2015.iloc[:,37:215])\n\nfeatures = cbs_data_2015[list(columns)];\nfeatures = features.apply(lambda columns : pd.to_numeric(columns, errors='ignore'))", "Inspect our labels and features", "print(labels[1:10])\nfeatures.head()", "Feature selection using Randomized Lasso\nImport Randomized Lasso from the Python Scikit-learn package", "from sklearn.linear_model import RandomizedLasso", "Run Randomized Lasso, with 3000 resampling and 100 iterations.", "rlasso = RandomizedLasso(alpha='aic',verbose =True,normalize =True,n_resampling=3000,max_iter=100)\nrlasso.fit(features, labels)", "Features sorted by their score\nIn the table below the top10 best features (i.e. columns) are shown with their score", "dfResults = pd.DataFrame.from_dict(sorted(zip(map(lambda x: round(x, 4), rlasso.scores_), list(features)), reverse=True))\ndfResults.columns = ['Score', 'FeatureName']\ndfResults.head(10)", "Because in the beginning of the lasso results table, a lot of high-scoring features are present, we want \nto check how the scores are devided across all features", "dfResults.plot('FeatureName', 'Score', kind='bar', color='navy')\nax1 = plt.axes()\nx_axis = ax1.axes.get_xaxis()\nx_axis.set_visible(False)\nplt.show()", "Scatterplot\nLet's inspect one of the top variables and make a scatterplot for this one", "plt.scatter(y=pd.to_numeric(cbs_data_2015['Vermogensmisdrijven_rel']),x=pd.to_numeric(cbs_data_2015['A_BED_GI']));\nplt.ylabel('Vermogensmisdrijven_rel')\nplt.xlabel('A_BED_GI ( Bedrijfsvestigingen: Handel en horeca )')\nplt.show()\n\ndfResults.tail(10)", "Let's also inspect one of the worst variables (Perc% of Low income households) and plot this one too", "plt.scatter(y=pd.to_numeric(cbs_data_2015['Vermogensmisdrijven_rel']),x=pd.to_numeric(cbs_data_2015['P_LAAGINKH']));\nplt.ylabel('Vermogensmisdrijven_rel')\nplt.xlabel('Perc. Laaginkomen Huish.')\nplt.show()", "Try-out another hypothese (e.g. Perc% of divorced vs. Rel% Domestic and Sexual violence crimes)", "plt.scatter(y=pd.to_numeric(cbs_data_2015['Gewelds_en_seksuele_misdrijven_rel']),x=pd.to_numeric(cbs_data_2015['P_GESCHEID']));\nplt.ylabel('Gewelds_en_seksuele_misdrijven_rel')\nplt.xlabel('Perc_Gescheiden')\nplt.show()" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
ramseylab/networkscompbio
class08_components_python3.ipynb
apache-2.0
[ "CS446/546 - Class Session 8 - Components\nIn this class session we are going to find the number of proteins that are in the giant component of the (undirected) protein-protein interaction network, using igraph.", "from igraph import Graph\nfrom igraph import summary\nimport pandas\nimport numpy", "Step 1: load in the SIF file (refer to Class 6 exercise) into a data frame sif_data, using the pandas.read_csv function, and name the columns species1, interaction_type, and species2.", "sif_data = pandas.read_csv(\"shared/pathway_commons.sif\",\n sep=\"\\t\", names=[\"species1\",\"interaction_type\",\"species2\"])", "Step 2: restrict the interactions to protein-protein undirected (\"in-complex-with\", \"interacts-with\"), by using the isin function and then using [ to index rows into the data frame. Call the returned ata frame interac_ppi.", "interaction_types_ppi = set([\"interacts-with\",\n \"in-complex-with\"])\ninterac_ppi = sif_data[sif_data.interaction_type.isin(interaction_types_ppi)].copy()", "Step 3: restrict the data frame to only the unique interaction pairs of proteins (ignoring the interaction type), and call that data frame interac_ppi_unique. Make an igraph Graph object from interac_ppi_unique using Graph.TupleList, values, and tolist. Call summary on the Graph object. Refer to the notebooks for the in-class exercises in Class sessions 3 and 6.", "boolean_vec = interac_ppi['species1'] > interac_ppi['species2']\ninterac_ppi.loc[boolean_vec, ['species1', 'species2']] = interac_ppi.loc[boolean_vec, ['species2', 'species1']].values\n \ninterac_ppi_unique = interac_ppi[[\"species1\",\"species2\"]].drop_duplicates() \n\n\nppi_igraph = Graph.TupleList(interac_ppi_unique.values.tolist(), directed=False)\nsummary(ppi_igraph)", "Step 4: Map the components of the network using the igraph.Graph.clusters method. That method returns a igraph.clustering.VertexClustering object. Call the sizes method on that VertexClustering object, to get a list of sizes of the components. What is the giant component size?", "# call the `clusters` method on the `ppi_igraph` object, and assign the \n# resulting `VertexClustering` object to have object name `ppi_components`\nppi_components = ppi_igraph.clusters()\n\n# call the `sizes` method on the `ppi_components` object, and assign the\n# resulting list object to have the name `ppi_component_sizes`.\nppi_component_sizes = ppi_components.sizes()\n\n# make a `numpy.array` initialized by `ppi_component_sizes`, and find its \n# maximum value using the `max` method on the `numpy.array` class\nnumpy.array(ppi_component_sizes).max()", "Advanced code-spellunking question: go to the GitHub repo for igraph (https://github.com/igraph), and find the code components.c. For the weakly connected components, is it doing a BFS or DFS?" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
ES-DOC/esdoc-jupyterhub
notebooks/miroc/cmip6/models/sandbox-3/land.ipynb
gpl-3.0
[ "ES-DOC CMIP6 Model Properties - Land\nMIP Era: CMIP6\nInstitute: MIROC\nSource ID: SANDBOX-3\nTopic: Land\nSub-Topics: Soil, Snow, Vegetation, Energy Balance, Carbon Cycle, Nitrogen Cycle, River Routing, Lakes. \nProperties: 154 (96 required)\nModel descriptions: Model description details\nInitialized From: -- \nNotebook Help: Goto notebook help page\nNotebook Initialised: 2018-02-20 15:02:41\nDocument Setup\nIMPORTANT: to be executed each time you run the notebook", "# DO NOT EDIT ! \nfrom pyesdoc.ipython.model_topic import NotebookOutput \n\n# DO NOT EDIT ! \nDOC = NotebookOutput('cmip6', 'miroc', 'sandbox-3', 'land')", "Document Authors\nSet document authors", "# Set as follows: DOC.set_author(\"name\", \"email\") \n# TODO - please enter value(s)", "Document Contributors\nSpecify document contributors", "# Set as follows: DOC.set_contributor(\"name\", \"email\") \n# TODO - please enter value(s)", "Document Publication\nSpecify document publication status", "# Set publication status: \n# 0=do not publish, 1=publish. \nDOC.set_publication_status(0)", "Document Table of Contents\n1. Key Properties\n2. Key Properties --&gt; Conservation Properties\n3. Key Properties --&gt; Timestepping Framework\n4. Key Properties --&gt; Software Properties\n5. Grid\n6. Grid --&gt; Horizontal\n7. Grid --&gt; Vertical\n8. Soil\n9. Soil --&gt; Soil Map\n10. Soil --&gt; Snow Free Albedo\n11. Soil --&gt; Hydrology\n12. Soil --&gt; Hydrology --&gt; Freezing\n13. Soil --&gt; Hydrology --&gt; Drainage\n14. Soil --&gt; Heat Treatment\n15. Snow\n16. Snow --&gt; Snow Albedo\n17. Vegetation\n18. Energy Balance\n19. Carbon Cycle\n20. Carbon Cycle --&gt; Vegetation\n21. Carbon Cycle --&gt; Vegetation --&gt; Photosynthesis\n22. Carbon Cycle --&gt; Vegetation --&gt; Autotrophic Respiration\n23. Carbon Cycle --&gt; Vegetation --&gt; Allocation\n24. Carbon Cycle --&gt; Vegetation --&gt; Phenology\n25. Carbon Cycle --&gt; Vegetation --&gt; Mortality\n26. Carbon Cycle --&gt; Litter\n27. Carbon Cycle --&gt; Soil\n28. Carbon Cycle --&gt; Permafrost Carbon\n29. Nitrogen Cycle\n30. River Routing\n31. River Routing --&gt; Oceanic Discharge\n32. Lakes\n33. Lakes --&gt; Method\n34. Lakes --&gt; Wetlands \n1. Key Properties\nLand surface key properties\n1.1. Model Overview\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nOverview of land surface model.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.key_properties.model_overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "1.2. Model Name\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nName of land surface model code (e.g. MOSES2.2)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.key_properties.model_name') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "1.3. Description\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nGeneral description of the processes modelled (e.g. dymanic vegation, prognostic albedo, etc.)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.key_properties.description') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "1.4. Land Atmosphere Flux Exchanges\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N\nFluxes exchanged with the atmopshere.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.key_properties.land_atmosphere_flux_exchanges') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"water\" \n# \"energy\" \n# \"carbon\" \n# \"nitrogen\" \n# \"phospherous\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "1.5. Atmospheric Coupling Treatment\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDescribe the treatment of land surface coupling with the Atmosphere model component, which may be different for different quantities (e.g. dust: semi-implicit, water vapour: explicit)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.key_properties.atmospheric_coupling_treatment') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "1.6. Land Cover\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nTypes of land cover defined in the land surface model", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.key_properties.land_cover') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"bare soil\" \n# \"urban\" \n# \"lake\" \n# \"land ice\" \n# \"lake ice\" \n# \"vegetated\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "1.7. Land Cover Change\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDescribe how land cover change is managed (e.g. the use of net or gross transitions)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.key_properties.land_cover_change') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "1.8. Tiling\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDescribe the general tiling procedure used in the land surface (if any). Include treatment of physiography, land/sea, (dynamic) vegetation coverage and orography/roughness", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.key_properties.tiling') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "2. Key Properties --&gt; Conservation Properties\nTODO\n2.1. Energy\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDescribe if/how energy is conserved globally and to what level (e.g. within X [units]/year)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.key_properties.conservation_properties.energy') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "2.2. Water\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDescribe if/how water is conserved globally and to what level (e.g. within X [units]/year)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.key_properties.conservation_properties.water') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "2.3. Carbon\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDescribe if/how carbon is conserved globally and to what level (e.g. within X [units]/year)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.key_properties.conservation_properties.carbon') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "3. Key Properties --&gt; Timestepping Framework\nTODO\n3.1. Timestep Dependent On Atmosphere\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nIs a time step dependent on the frequency of atmosphere coupling?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.key_properties.timestepping_framework.timestep_dependent_on_atmosphere') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "3.2. Time Step\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nOverall timestep of land surface model (i.e. time between calls)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.key_properties.timestepping_framework.time_step') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "3.3. Timestepping Method\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nGeneral description of time stepping method and associated time step(s)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.key_properties.timestepping_framework.timestepping_method') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "4. Key Properties --&gt; Software Properties\nSoftware properties of land surface code\n4.1. Repository\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nLocation of code for this component.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.key_properties.software_properties.repository') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "4.2. Code Version\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nCode version identifier.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.key_properties.software_properties.code_version') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "4.3. Code Languages\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N\nCode language(s).", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.key_properties.software_properties.code_languages') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "5. Grid\nLand surface grid\n5.1. Overview\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nOverview of the grid in the land surface", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.grid.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "6. Grid --&gt; Horizontal\nThe horizontal grid in the land surface\n6.1. Description\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDescribe the general structure of the horizontal grid (not including any tiling)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.grid.horizontal.description') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "6.2. Matches Atmosphere Grid\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDoes the horizontal grid match the atmosphere?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.grid.horizontal.matches_atmosphere_grid') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "7. Grid --&gt; Vertical\nThe vertical grid in the soil\n7.1. Description\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDescribe the general structure of the vertical grid in the soil (not including any tiling)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.grid.vertical.description') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "7.2. Total Depth\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nThe total depth of the soil (in metres)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.grid.vertical.total_depth') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "8. Soil\nLand surface soil\n8.1. Overview\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nOverview of soil in the land surface", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.soil.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "8.2. Heat Water Coupling\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDescribe the coupling between heat and water in the soil", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.soil.heat_water_coupling') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "8.3. Number Of Soil layers\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nThe number of soil layers", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.soil.number_of_soil layers') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "8.4. Prognostic Variables\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nList the prognostic variables of the soil scheme", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.soil.prognostic_variables') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "9. Soil --&gt; Soil Map\nKey properties of the land surface soil map\n9.1. Description\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nGeneral description of soil map", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.soil.soil_map.description') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "9.2. Structure\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDescribe the soil structure map", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.soil.soil_map.structure') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "9.3. Texture\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDescribe the soil texture map", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.soil.soil_map.texture') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "9.4. Organic Matter\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDescribe the soil organic matter map", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.soil.soil_map.organic_matter') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "9.5. Albedo\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDescribe the soil albedo map", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.soil.soil_map.albedo') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "9.6. Water Table\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDescribe the soil water table map, if any", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.soil.soil_map.water_table') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "9.7. Continuously Varying Soil Depth\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDoes the soil properties vary continuously with depth?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.soil.soil_map.continuously_varying_soil_depth') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "9.8. Soil Depth\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDescribe the soil depth map", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.soil.soil_map.soil_depth') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "10. Soil --&gt; Snow Free Albedo\nTODO\n10.1. Prognostic\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nIs snow free albedo prognostic?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.soil.snow_free_albedo.prognostic') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "10.2. Functions\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N\nIf prognostic, describe the dependancies on snow free albedo calculations", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.soil.snow_free_albedo.functions') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"vegetation type\" \n# \"soil humidity\" \n# \"vegetation state\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "10.3. Direct Diffuse\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nIf prognostic, describe the distinction between direct and diffuse albedo", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.soil.snow_free_albedo.direct_diffuse') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"distinction between direct and diffuse albedo\" \n# \"no distinction between direct and diffuse albedo\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "10.4. Number Of Wavelength Bands\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nIf prognostic, enter the number of wavelength bands used", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.soil.snow_free_albedo.number_of_wavelength_bands') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "11. Soil --&gt; Hydrology\nKey properties of the land surface soil hydrology\n11.1. Description\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nGeneral description of the soil hydrological model", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.soil.hydrology.description') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "11.2. Time Step\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nTime step of river soil hydrology in seconds", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.soil.hydrology.time_step') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "11.3. Tiling\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDescribe the soil hydrology tiling, if any.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.soil.hydrology.tiling') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "11.4. Vertical Discretisation\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDescribe the typical vertical discretisation", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.soil.hydrology.vertical_discretisation') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "11.5. Number Of Ground Water Layers\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nThe number of soil layers that may contain water", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.soil.hydrology.number_of_ground_water_layers') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "11.6. Lateral Connectivity\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nDescribe the lateral connectivity between tiles", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.soil.hydrology.lateral_connectivity') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"perfect connectivity\" \n# \"Darcian flow\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "11.7. Method\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nThe hydrological dynamics scheme in the land surface model", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.soil.hydrology.method') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Bucket\" \n# \"Force-restore\" \n# \"Choisnel\" \n# \"Explicit diffusion\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "12. Soil --&gt; Hydrology --&gt; Freezing\nTODO\n12.1. Number Of Ground Ice Layers\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nHow many soil layers may contain ground ice", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.soil.hydrology.freezing.number_of_ground_ice_layers') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "12.2. Ice Storage Method\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDescribe the method of ice storage", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.soil.hydrology.freezing.ice_storage_method') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "12.3. Permafrost\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDescribe the treatment of permafrost, if any, within the land surface scheme", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.soil.hydrology.freezing.permafrost') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "13. Soil --&gt; Hydrology --&gt; Drainage\nTODO\n13.1. Description\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nGeneral describe how drainage is included in the land surface scheme", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.soil.hydrology.drainage.description') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "13.2. Types\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N\nDifferent types of runoff represented by the land surface model", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.soil.hydrology.drainage.types') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Gravity drainage\" \n# \"Horton mechanism\" \n# \"topmodel-based\" \n# \"Dunne mechanism\" \n# \"Lateral subsurface flow\" \n# \"Baseflow from groundwater\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "14. Soil --&gt; Heat Treatment\nTODO\n14.1. Description\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nGeneral description of how heat treatment properties are defined", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.soil.heat_treatment.description') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "14.2. Time Step\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nTime step of soil heat scheme in seconds", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.soil.heat_treatment.time_step') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "14.3. Tiling\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDescribe the soil heat treatment tiling, if any.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.soil.heat_treatment.tiling') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "14.4. Vertical Discretisation\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDescribe the typical vertical discretisation", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.soil.heat_treatment.vertical_discretisation') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "14.5. Heat Storage\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nSpecify the method of heat storage", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.soil.heat_treatment.heat_storage') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Force-restore\" \n# \"Explicit diffusion\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "14.6. Processes\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nDescribe processes included in the treatment of soil heat", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.soil.heat_treatment.processes') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"soil moisture freeze-thaw\" \n# \"coupling with snow temperature\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "15. Snow\nLand surface snow\n15.1. Overview\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nOverview of snow in the land surface", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.snow.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "15.2. Tiling\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDescribe the snow tiling, if any.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.snow.tiling') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "15.3. Number Of Snow Layers\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nThe number of snow levels used in the land surface scheme/model", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.snow.number_of_snow_layers') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "15.4. Density\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDescription of the treatment of snow density", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.snow.density') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"prognostic\" \n# \"constant\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "15.5. Water Equivalent\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDescription of the treatment of the snow water equivalent", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.snow.water_equivalent') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"prognostic\" \n# \"diagnostic\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "15.6. Heat Content\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDescription of the treatment of the heat content of snow", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.snow.heat_content') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"prognostic\" \n# \"diagnostic\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "15.7. Temperature\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDescription of the treatment of snow temperature", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.snow.temperature') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"prognostic\" \n# \"diagnostic\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "15.8. Liquid Water Content\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDescription of the treatment of snow liquid water", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.snow.liquid_water_content') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"prognostic\" \n# \"diagnostic\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "15.9. Snow Cover Fractions\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nSpecify cover fractions used in the surface snow scheme", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.snow.snow_cover_fractions') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"ground snow fraction\" \n# \"vegetation snow fraction\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "15.10. Processes\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nSnow related processes in the land surface scheme", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.snow.processes') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"snow interception\" \n# \"snow melting\" \n# \"snow freezing\" \n# \"blowing snow\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "15.11. Prognostic Variables\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nList the prognostic variables of the snow scheme", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.snow.prognostic_variables') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "16. Snow --&gt; Snow Albedo\nTODO\n16.1. Type\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDescribe the treatment of snow-covered land albedo", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.snow.snow_albedo.type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"prognostic\" \n# \"prescribed\" \n# \"constant\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "16.2. Functions\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N\n*If prognostic, *", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.snow.snow_albedo.functions') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"vegetation type\" \n# \"snow age\" \n# \"snow density\" \n# \"snow grain type\" \n# \"aerosol deposition\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "17. Vegetation\nLand surface vegetation\n17.1. Overview\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nOverview of vegetation in the land surface", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.vegetation.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "17.2. Time Step\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nTime step of vegetation scheme in seconds", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.vegetation.time_step') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "17.3. Dynamic Vegetation\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nIs there dynamic evolution of vegetation?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.vegetation.dynamic_vegetation') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "17.4. Tiling\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDescribe the vegetation tiling, if any.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.vegetation.tiling') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "17.5. Vegetation Representation\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nVegetation classification used", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.vegetation.vegetation_representation') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"vegetation types\" \n# \"biome types\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "17.6. Vegetation Types\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N\nList of vegetation types in the classification, if any", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.vegetation.vegetation_types') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"broadleaf tree\" \n# \"needleleaf tree\" \n# \"C3 grass\" \n# \"C4 grass\" \n# \"vegetated\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "17.7. Biome Types\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N\nList of biome types in the classification, if any", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.vegetation.biome_types') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"evergreen needleleaf forest\" \n# \"evergreen broadleaf forest\" \n# \"deciduous needleleaf forest\" \n# \"deciduous broadleaf forest\" \n# \"mixed forest\" \n# \"woodland\" \n# \"wooded grassland\" \n# \"closed shrubland\" \n# \"opne shrubland\" \n# \"grassland\" \n# \"cropland\" \n# \"wetlands\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "17.8. Vegetation Time Variation\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nHow the vegetation fractions in each tile are varying with time", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.vegetation.vegetation_time_variation') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"fixed (not varying)\" \n# \"prescribed (varying from files)\" \n# \"dynamical (varying from simulation)\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "17.9. Vegetation Map\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nIf vegetation fractions are not dynamically updated , describe the vegetation map used (common name and reference, if possible)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.vegetation.vegetation_map') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "17.10. Interception\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nIs vegetation interception of rainwater represented?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.vegetation.interception') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "17.11. Phenology\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nTreatment of vegetation phenology", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.vegetation.phenology') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"prognostic\" \n# \"diagnostic (vegetation map)\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "17.12. Phenology Description\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nGeneral description of the treatment of vegetation phenology", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.vegetation.phenology_description') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "17.13. Leaf Area Index\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nTreatment of vegetation leaf area index", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.vegetation.leaf_area_index') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"prescribed\" \n# \"prognostic\" \n# \"diagnostic\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "17.14. Leaf Area Index Description\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nGeneral description of the treatment of leaf area index", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.vegetation.leaf_area_index_description') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "17.15. Biomass\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\n*Treatment of vegetation biomass *", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.vegetation.biomass') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"prognostic\" \n# \"diagnostic\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "17.16. Biomass Description\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nGeneral description of the treatment of vegetation biomass", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.vegetation.biomass_description') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "17.17. Biogeography\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nTreatment of vegetation biogeography", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.vegetation.biogeography') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"prognostic\" \n# \"diagnostic\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "17.18. Biogeography Description\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nGeneral description of the treatment of vegetation biogeography", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.vegetation.biogeography_description') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "17.19. Stomatal Resistance\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nSpecify what the vegetation stomatal resistance depends on", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.vegetation.stomatal_resistance') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"light\" \n# \"temperature\" \n# \"water availability\" \n# \"CO2\" \n# \"O3\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "17.20. Stomatal Resistance Description\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nGeneral description of the treatment of vegetation stomatal resistance", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.vegetation.stomatal_resistance_description') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "17.21. Prognostic Variables\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nList the prognostic variables of the vegetation scheme", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.vegetation.prognostic_variables') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "18. Energy Balance\nLand surface energy balance\n18.1. Overview\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nOverview of energy balance in land surface", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.energy_balance.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "18.2. Tiling\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDescribe the energy balance tiling, if any.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.energy_balance.tiling') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "18.3. Number Of Surface Temperatures\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nThe maximum number of distinct surface temperatures in a grid cell (for example, each subgrid tile may have its own temperature)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.energy_balance.number_of_surface_temperatures') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "18.4. Evaporation\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nSpecify the formulation method for land surface evaporation, from soil and vegetation", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.energy_balance.evaporation') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"alpha\" \n# \"beta\" \n# \"combined\" \n# \"Monteith potential evaporation\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "18.5. Processes\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nDescribe which processes are included in the energy balance scheme", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.energy_balance.processes') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"transpiration\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "19. Carbon Cycle\nLand surface carbon cycle\n19.1. Overview\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nOverview of carbon cycle in land surface", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.carbon_cycle.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "19.2. Tiling\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDescribe the carbon cycle tiling, if any.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.carbon_cycle.tiling') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "19.3. Time Step\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nTime step of carbon cycle in seconds", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.carbon_cycle.time_step') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "19.4. Anthropogenic Carbon\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N\nDescribe the treament of the anthropogenic carbon pool", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.carbon_cycle.anthropogenic_carbon') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"grand slam protocol\" \n# \"residence time\" \n# \"decay time\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "19.5. Prognostic Variables\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nList the prognostic variables of the carbon scheme", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.carbon_cycle.prognostic_variables') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "20. Carbon Cycle --&gt; Vegetation\nTODO\n20.1. Number Of Carbon Pools\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nEnter the number of carbon pools used", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.carbon_cycle.vegetation.number_of_carbon_pools') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "20.2. Carbon Pools\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nList the carbon pools used", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.carbon_cycle.vegetation.carbon_pools') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "20.3. Forest Stand Dynamics\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDescribe the treatment of forest stand dyanmics", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.carbon_cycle.vegetation.forest_stand_dynamics') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "21. Carbon Cycle --&gt; Vegetation --&gt; Photosynthesis\nTODO\n21.1. Method\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDescribe the general method used for photosynthesis (e.g. type of photosynthesis, distinction between C3 and C4 grasses, Nitrogen depencence, etc.)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.carbon_cycle.vegetation.photosynthesis.method') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "22. Carbon Cycle --&gt; Vegetation --&gt; Autotrophic Respiration\nTODO\n22.1. Maintainance Respiration\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDescribe the general method used for maintainence respiration", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.carbon_cycle.vegetation.autotrophic_respiration.maintainance_respiration') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "22.2. Growth Respiration\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDescribe the general method used for growth respiration", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.carbon_cycle.vegetation.autotrophic_respiration.growth_respiration') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "23. Carbon Cycle --&gt; Vegetation --&gt; Allocation\nTODO\n23.1. Method\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDescribe the general principle behind the allocation scheme", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.carbon_cycle.vegetation.allocation.method') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "23.2. Allocation Bins\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nSpecify distinct carbon bins used in allocation", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.carbon_cycle.vegetation.allocation.allocation_bins') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"leaves + stems + roots\" \n# \"leaves + stems + roots (leafy + woody)\" \n# \"leaves + fine roots + coarse roots + stems\" \n# \"whole plant (no distinction)\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "23.3. Allocation Fractions\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDescribe how the fractions of allocation are calculated", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.carbon_cycle.vegetation.allocation.allocation_fractions') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"fixed\" \n# \"function of vegetation type\" \n# \"function of plant allometry\" \n# \"explicitly calculated\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "24. Carbon Cycle --&gt; Vegetation --&gt; Phenology\nTODO\n24.1. Method\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDescribe the general principle behind the phenology scheme", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.carbon_cycle.vegetation.phenology.method') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "25. Carbon Cycle --&gt; Vegetation --&gt; Mortality\nTODO\n25.1. Method\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDescribe the general principle behind the mortality scheme", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.carbon_cycle.vegetation.mortality.method') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "26. Carbon Cycle --&gt; Litter\nTODO\n26.1. Number Of Carbon Pools\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nEnter the number of carbon pools used", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.carbon_cycle.litter.number_of_carbon_pools') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "26.2. Carbon Pools\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nList the carbon pools used", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.carbon_cycle.litter.carbon_pools') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "26.3. Decomposition\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nList the decomposition methods used", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.carbon_cycle.litter.decomposition') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "26.4. Method\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nList the general method used", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.carbon_cycle.litter.method') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "27. Carbon Cycle --&gt; Soil\nTODO\n27.1. Number Of Carbon Pools\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nEnter the number of carbon pools used", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.carbon_cycle.soil.number_of_carbon_pools') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "27.2. Carbon Pools\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nList the carbon pools used", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.carbon_cycle.soil.carbon_pools') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "27.3. Decomposition\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nList the decomposition methods used", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.carbon_cycle.soil.decomposition') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "27.4. Method\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nList the general method used", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.carbon_cycle.soil.method') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "28. Carbon Cycle --&gt; Permafrost Carbon\nTODO\n28.1. Is Permafrost Included\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nIs permafrost included?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.carbon_cycle.permafrost_carbon.is_permafrost_included') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "28.2. Emitted Greenhouse Gases\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nList the GHGs emitted", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.carbon_cycle.permafrost_carbon.emitted_greenhouse_gases') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "28.3. Decomposition\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nList the decomposition methods used", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.carbon_cycle.permafrost_carbon.decomposition') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "28.4. Impact On Soil Properties\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDescribe the impact of permafrost on soil properties", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.carbon_cycle.permafrost_carbon.impact_on_soil_properties') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "29. Nitrogen Cycle\nLand surface nitrogen cycle\n29.1. Overview\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nOverview of the nitrogen cycle in the land surface", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.nitrogen_cycle.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "29.2. Tiling\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDescribe the notrogen cycle tiling, if any.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.nitrogen_cycle.tiling') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "29.3. Time Step\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nTime step of nitrogen cycle in seconds", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.nitrogen_cycle.time_step') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "29.4. Prognostic Variables\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nList the prognostic variables of the nitrogen scheme", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.nitrogen_cycle.prognostic_variables') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "30. River Routing\nLand surface river routing\n30.1. Overview\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nOverview of river routing in the land surface", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.river_routing.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "30.2. Tiling\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDescribe the river routing, if any.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.river_routing.tiling') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "30.3. Time Step\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nTime step of river routing scheme in seconds", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.river_routing.time_step') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "30.4. Grid Inherited From Land Surface\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nIs the grid inherited from land surface?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.river_routing.grid_inherited_from_land_surface') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "30.5. Grid Description\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nGeneral description of grid, if not inherited from land surface", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.river_routing.grid_description') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "30.6. Number Of Reservoirs\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nEnter the number of reservoirs", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.river_routing.number_of_reservoirs') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "30.7. Water Re Evaporation\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nTODO", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.river_routing.water_re_evaporation') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"flood plains\" \n# \"irrigation\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "30.8. Coupled To Atmosphere\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nIs river routing coupled to the atmosphere model component?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.river_routing.coupled_to_atmosphere') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "30.9. Coupled To Land\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDescribe the coupling between land and rivers", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.river_routing.coupled_to_land') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "30.10. Quantities Exchanged With Atmosphere\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N\nIf couple to atmosphere, which quantities are exchanged between river routing and the atmosphere model components?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.river_routing.quantities_exchanged_with_atmosphere') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"heat\" \n# \"water\" \n# \"tracers\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "30.11. Basin Flow Direction Map\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nWhat type of basin flow direction map is being used?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.river_routing.basin_flow_direction_map') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"present day\" \n# \"adapted for other periods\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "30.12. Flooding\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDescribe the representation of flooding, if any", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.river_routing.flooding') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "30.13. Prognostic Variables\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nList the prognostic variables of the river routing", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.river_routing.prognostic_variables') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "31. River Routing --&gt; Oceanic Discharge\nTODO\n31.1. Discharge Type\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nSpecify how rivers are discharged to the ocean", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.river_routing.oceanic_discharge.discharge_type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"direct (large rivers)\" \n# \"diffuse\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "31.2. Quantities Transported\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nQuantities that are exchanged from river-routing to the ocean model component", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.river_routing.oceanic_discharge.quantities_transported') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"heat\" \n# \"water\" \n# \"tracers\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "32. Lakes\nLand surface lakes\n32.1. Overview\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nOverview of lakes in the land surface", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.lakes.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "32.2. Coupling With Rivers\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nAre lakes coupled to the river routing model component?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.lakes.coupling_with_rivers') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "32.3. Time Step\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nTime step of lake scheme in seconds", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.lakes.time_step') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "32.4. Quantities Exchanged With Rivers\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N\nIf coupling with rivers, which quantities are exchanged between the lakes and rivers", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.lakes.quantities_exchanged_with_rivers') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"heat\" \n# \"water\" \n# \"tracers\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "32.5. Vertical Grid\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDescribe the vertical grid of lakes", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.lakes.vertical_grid') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "32.6. Prognostic Variables\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nList the prognostic variables of the lake scheme", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.lakes.prognostic_variables') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "33. Lakes --&gt; Method\nTODO\n33.1. Ice Treatment\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nIs lake ice included?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.lakes.method.ice_treatment') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "33.2. Albedo\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDescribe the treatment of lake albedo", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.lakes.method.albedo') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"prognostic\" \n# \"diagnostic\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "33.3. Dynamics\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nWhich dynamics of lakes are treated? horizontal, vertical, etc.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.lakes.method.dynamics') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"No lake dynamics\" \n# \"vertical\" \n# \"horizontal\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "33.4. Dynamic Lake Extent\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nIs a dynamic lake extent scheme included?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.lakes.method.dynamic_lake_extent') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "33.5. Endorheic Basins\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nBasins not flowing to ocean included?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.lakes.method.endorheic_basins') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "34. Lakes --&gt; Wetlands\nTODO\n34.1. Description\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDescribe the treatment of wetlands, if any", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.lakes.wetlands.description') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "©2017 ES-DOC" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
ES-DOC/esdoc-jupyterhub
notebooks/nuist/cmip6/models/sandbox-2/landice.ipynb
gpl-3.0
[ "ES-DOC CMIP6 Model Properties - Landice\nMIP Era: CMIP6\nInstitute: NUIST\nSource ID: SANDBOX-2\nTopic: Landice\nSub-Topics: Glaciers, Ice. \nProperties: 30 (21 required)\nModel descriptions: Model description details\nInitialized From: -- \nNotebook Help: Goto notebook help page\nNotebook Initialised: 2018-02-15 16:54:34\nDocument Setup\nIMPORTANT: to be executed each time you run the notebook", "# DO NOT EDIT ! \nfrom pyesdoc.ipython.model_topic import NotebookOutput \n\n# DO NOT EDIT ! \nDOC = NotebookOutput('cmip6', 'nuist', 'sandbox-2', 'landice')", "Document Authors\nSet document authors", "# Set as follows: DOC.set_author(\"name\", \"email\") \n# TODO - please enter value(s)", "Document Contributors\nSpecify document contributors", "# Set as follows: DOC.set_contributor(\"name\", \"email\") \n# TODO - please enter value(s)", "Document Publication\nSpecify document publication status", "# Set publication status: \n# 0=do not publish, 1=publish. \nDOC.set_publication_status(0)", "Document Table of Contents\n1. Key Properties\n2. Key Properties --&gt; Software Properties\n3. Grid\n4. Glaciers\n5. Ice\n6. Ice --&gt; Mass Balance\n7. Ice --&gt; Mass Balance --&gt; Basal\n8. Ice --&gt; Mass Balance --&gt; Frontal\n9. Ice --&gt; Dynamics \n1. Key Properties\nLand ice key properties\n1.1. Overview\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nOverview of land surface model.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.landice.key_properties.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "1.2. Model Name\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nName of land surface model code", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.landice.key_properties.model_name') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "1.3. Ice Albedo\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nSpecify how ice albedo is modelled", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.landice.key_properties.ice_albedo') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"prescribed\" \n# \"function of ice age\" \n# \"function of ice density\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "1.4. Atmospheric Coupling Variables\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nWhich variables are passed between the atmosphere and ice (e.g. orography, ice mass)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.landice.key_properties.atmospheric_coupling_variables') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "1.5. Oceanic Coupling Variables\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nWhich variables are passed between the ocean and ice", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.landice.key_properties.oceanic_coupling_variables') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "1.6. Prognostic Variables\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nWhich variables are prognostically calculated in the ice model", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.landice.key_properties.prognostic_variables') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"ice velocity\" \n# \"ice thickness\" \n# \"ice temperature\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "2. Key Properties --&gt; Software Properties\nSoftware properties of land ice code\n2.1. Repository\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nLocation of code for this component.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.landice.key_properties.software_properties.repository') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "2.2. Code Version\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nCode version identifier.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.landice.key_properties.software_properties.code_version') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "2.3. Code Languages\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N\nCode language(s).", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.landice.key_properties.software_properties.code_languages') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "3. Grid\nLand ice grid\n3.1. Overview\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nOverview of the grid in the land ice scheme", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.landice.grid.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "3.2. Adaptive Grid\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nIs an adative grid being used?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.landice.grid.adaptive_grid') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "3.3. Base Resolution\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: FLOAT&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nThe base resolution (in metres), before any adaption", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.landice.grid.base_resolution') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "3.4. Resolution Limit\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: FLOAT&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nIf an adaptive grid is being used, what is the limit of the resolution (in metres)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.landice.grid.resolution_limit') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "3.5. Projection\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nThe projection of the land ice grid (e.g. albers_equal_area)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.landice.grid.projection') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "4. Glaciers\nLand ice glaciers\n4.1. Overview\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nOverview of glaciers in the land ice scheme", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.landice.glaciers.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "4.2. Description\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDescribe the treatment of glaciers, if any", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.landice.glaciers.description') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "4.3. Dynamic Areal Extent\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDoes the model include a dynamic glacial extent?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.landice.glaciers.dynamic_areal_extent') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "5. Ice\nIce sheet and ice shelf\n5.1. Overview\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nOverview of the ice sheet and ice shelf in the land ice scheme", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.landice.ice.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "5.2. Grounding Line Method\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nSpecify the technique used for modelling the grounding line in the ice sheet-ice shelf coupling", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.landice.ice.grounding_line_method') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"grounding line prescribed\" \n# \"flux prescribed (Schoof)\" \n# \"fixed grid size\" \n# \"moving grid\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "5.3. Ice Sheet\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nAre ice sheets simulated?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.landice.ice.ice_sheet') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "5.4. Ice Shelf\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nAre ice shelves simulated?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.landice.ice.ice_shelf') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "6. Ice --&gt; Mass Balance\nDescription of the surface mass balance treatment\n6.1. Surface Mass Balance\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDescribe how and where the surface mass balance (SMB) is calulated. Include the temporal coupling frequeny from the atmosphere, whether or not a seperate SMB model is used, and if so details of this model, such as its resolution", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.landice.ice.mass_balance.surface_mass_balance') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "7. Ice --&gt; Mass Balance --&gt; Basal\nDescription of basal melting\n7.1. Bedrock\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDescribe the implementation of basal melting over bedrock", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.landice.ice.mass_balance.basal.bedrock') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "7.2. Ocean\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDescribe the implementation of basal melting over the ocean", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.landice.ice.mass_balance.basal.ocean') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "8. Ice --&gt; Mass Balance --&gt; Frontal\nDescription of claving/melting from the ice shelf front\n8.1. Calving\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDescribe the implementation of calving from the front of the ice shelf", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.landice.ice.mass_balance.frontal.calving') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "8.2. Melting\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDescribe the implementation of melting from the front of the ice shelf", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.landice.ice.mass_balance.frontal.melting') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "9. Ice --&gt; Dynamics\n**\n9.1. Description\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nGeneral description if ice sheet and ice shelf dynamics", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.landice.ice.dynamics.description') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "9.2. Approximation\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nApproximation type used in modelling ice dynamics", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.landice.ice.dynamics.approximation') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"SIA\" \n# \"SAA\" \n# \"full stokes\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "9.3. Adaptive Timestep\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nIs there an adaptive time scheme for the ice scheme?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.landice.ice.dynamics.adaptive_timestep') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "9.4. Timestep\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nTimestep (in seconds) of the ice scheme. If the timestep is adaptive, then state a representative timestep.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.landice.ice.dynamics.timestep') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "©2017 ES-DOC" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
GoogleCloudPlatform/vertex-ai-samples
notebooks/official/ml_metadata/sdk-metric-parameter-tracking-for-custom-jobs.ipynb
apache-2.0
[ "# Copyright 2022 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.", "<table align=\"left\">\n\n <td>\n <a href=\"https://colab.research.google.com/github/GoogleCloudPlatform/vertex-ai-samples/blob/master/notebooks/official/ml_metadata/sdk-metric-parameter-tracking-for-custom-jobs.ipynb\">\n <img src=\"https://cloud.google.com/ml-engine/images/colab-logo-32px.png\" alt=\"Colab logo\"> Run in Colab\n </a>\n </td>\n <td>\n <a href=\"https://github.com/GoogleCloudPlatform/vertex-ai-samples/blob/master/notebooks/official/ml_metadata/sdk-metric-parameter-tracking-for-custom-jobs.ipynb\">\n <img src=\"https://cloud.google.com/ml-engine/images/github-logo-32px.png\" alt=\"GitHub logo\">\n View on GitHub\n </a>\n </td>\n <td>\n <a href=\"https://console.cloud.google.com/vertex-ai/workbench/deploy-notebook?download_url=https://github.com/GoogleCloudPlatform/vertex-ai-samples/blob/main/notebooks/official/ml_metadata/sdk-metric-parameter-tracking-for-custom-jobs.ipynb\">\n <img src=\"https://lh3.googleusercontent.com/UiNooY4LUgW_oTvpsNhPpQzsstV5W8F7rYgxgGBD85cWJoLmrOzhVs_ksK_vgx40SHs7jCqkTkCk=e14-rj-sc0xffffff-h130-w32\" alt=\"Vertex AI logo\">\n Open in Vertex AI Workbench\n </a>\n </td> \n</table>\n\nVertex AI: Track parameters and metrics for custom training jobs\nOverview\nThis notebook demonstrates how to track metrics and parameters for Vertex AI custom training jobs, and how to perform detailed analysis using this data.\nDataset\nThis example uses the Abalone Dataset. For more information about this dataset please visit: https://archive.ics.uci.edu/ml/datasets/abalone\nObjective\nIn this notebook, you will learn how to use Vertex AI SDK for Python to:\n* Track training parameters and prediction metrics for a custom training job.\n* Extract and perform analysis for all parameters and metrics within an Experiment.\n\nCosts\nThis tutorial uses billable components of Google Cloud:\n\nVertex AI\nCloud Storage\n\nLearn about Vertex AI\npricing and Cloud Storage\npricing, and use the Pricing\nCalculator\nto generate a cost estimate based on your projected usage.\nSet up your local development environment\nIf you are using Colab or Vertex AI Workbench, your environment already meets\nall the requirements to run this notebook. You can skip this step.\nOtherwise, make sure your environment meets this notebook's requirements.\nYou need the following:\n\nThe Google Cloud SDK\nGit\nPython 3\nvirtualenv\nJupyter notebook running in a virtual environment with Python 3\n\nThe Google Cloud guide to Setting up a Python development\nenvironment and the Jupyter\ninstallation guide provide detailed instructions\nfor meeting these requirements. The following steps provide a condensed set of\ninstructions:\n\n\nInstall and initialize the Cloud SDK.\n\n\nInstall Python 3.\n\n\nInstall\n virtualenv\n and create a virtual environment that uses Python 3. Activate the virtual environment.\n\n\nTo install Jupyter, run pip install jupyter on the\ncommand-line in a terminal shell.\n\n\nTo launch Jupyter, run jupyter notebook on the command-line in a terminal shell.\n\n\nOpen this notebook in the Jupyter Notebook Dashboard.\n\n\nInstall additional packages\nInstall additional package dependencies not installed in your notebook environment.", "import os\n\n# The Google Cloud Notebook product has specific requirements\nIS_GOOGLE_CLOUD_NOTEBOOK = os.path.exists(\"/opt/deeplearning/metadata/env_version\")\n\n# Google Cloud Notebook requires dependencies to be installed with '--user'\nUSER_FLAG = \"\"\nif IS_GOOGLE_CLOUD_NOTEBOOK:\n USER_FLAG = \"--user\"\n\n! pip3 install -U tensorflow $USER_FLAG\n! python3 -m pip install {USER_FLAG} google-cloud-aiplatform --upgrade\n! pip3 install scikit-learn {USER_FLAG}\n", "Restart the kernel\nAfter you install the additional packages, you need to restart the notebook kernel so it can find the packages.", "# Automatically restart kernel after installs\nimport os\n\nif not os.getenv(\"IS_TESTING\"):\n # Automatically restart kernel after installs\n import IPython\n\n app = IPython.Application.instance()\n app.kernel.do_shutdown(True)", "Before you begin\nSelect a GPU runtime\nMake sure you're running this notebook in a GPU runtime if you have that option. In Colab, select \"Runtime --> Change runtime type > GPU\"\nSet up your Google Cloud project\nThe following steps are required, regardless of your notebook environment.\n\n\nSelect or create a Google Cloud project. When you first create an account, you get a $300 free credit towards your compute/storage costs.\n\n\nMake sure that billing is enabled for your project.\n\n\nEnable the Vertex AI API and Compute Engine API.\n\n\nIf you are running this notebook locally, you will need to install the Cloud SDK.\n\n\nEnter your project ID in the cell below. Then run the cell to make sure the\nCloud SDK uses the right project for all the commands in this notebook.\n\n\nNote: Jupyter runs lines prefixed with ! as shell commands, and it interpolates Python variables prefixed with $ into these commands.\nSet your project ID\nIf you don't know your project ID, you may be able to get your project ID using gcloud.", "import os\n\nPROJECT_ID = \"\"\n\n# Get your Google Cloud project ID from gcloud\nif not os.getenv(\"IS_TESTING\"):\n shell_output = !gcloud config list --format 'value(core.project)' 2>/dev/null\n PROJECT_ID = shell_output[0]\n print(\"Project ID: \", PROJECT_ID)", "Otherwise, set your project ID here.", "if PROJECT_ID == \"\" or PROJECT_ID is None:\n PROJECT_ID = \"[your-project-id]\" # @param {type:\"string\"}", "Set gcloud config to your project ID.", "!gcloud config set project $PROJECT_ID", "Timestamp\nIf you are in a live tutorial session, you might be using a shared test account or project. To avoid name collisions between users on resources created, you create a timestamp for each instance session, and append it onto the name of resources you create in this tutorial.", "from datetime import datetime\n\nTIMESTAMP = datetime.now().strftime(\"%Y%m%d%H%M%S\")", "Authenticate your Google Cloud account\nIf you are using Vertex AI Workbench, your environment is already\nauthenticated. Skip this step.\nIf you are using Colab, run the cell below and follow the instructions\nwhen prompted to authenticate your account via oAuth.\nOtherwise, follow these steps:\n\n\nIn the Cloud Console, go to the Create service account key\n page.\n\n\nClick Create service account.\n\n\nIn the Service account name field, enter a name, and\n click Create.\n\n\nIn the Grant this service account access to project section, click the Role drop-down list. Type \"Vertex AI\"\ninto the filter box, and select\n Vertex AI Administrator. Type \"Storage Object Admin\" into the filter box, and select Storage Object Admin.\n\n\nClick Create. A JSON file that contains your key downloads to your\nlocal environment.\n\n\nEnter the path to your service account key as the\nGOOGLE_APPLICATION_CREDENTIALS variable in the cell below and run the cell.", "import os\nimport sys\n\n# If you are running this notebook in Colab, run this cell and follow the\n# instructions to authenticate your GCP account. This provides access to your\n# Cloud Storage bucket and lets you submit training jobs and prediction\n# requests.\n\n# If on Google Cloud Notebooks, then don't execute this code\nif not os.path.exists(\"/opt/deeplearning/metadata/env_version\"):\n if \"google.colab\" in sys.modules:\n from google.colab import auth as google_auth\n\n google_auth.authenticate_user()\n\n # If you are running this notebook locally, replace the string below with the\n # path to your service account key and run this cell to authenticate your GCP\n # account.\n elif not os.getenv(\"IS_TESTING\"):\n %env GOOGLE_APPLICATION_CREDENTIALS ''", "Create a Cloud Storage bucket\nThe following steps are required, regardless of your notebook environment.\nWhen you submit a training job using the Cloud SDK, you upload a Python package\ncontaining your training code to a Cloud Storage bucket. Vertex AI runs\nthe code from this package. In this tutorial, Vertex AI also saves the\ntrained model that results from your job in the same bucket. Using this model artifact, you can then\ncreate Vertex AI model and endpoint resources in order to serve\nonline predictions.\nSet the name of your Cloud Storage bucket below. It must be unique across all\nCloud Storage buckets.\nYou may also change the REGION variable, which is used for operations\nthroughout the rest of this notebook. Make sure to choose a region where Vertex AI services are\navailable. You may\nnot use a Multi-Regional Storage bucket for training with Vertex AI.", "BUCKET_URI = \"gs://[your-bucket-name]\" # @param {type:\"string\"}\nREGION = \"[your-region]\" # @param {type:\"string\"}\n\nif BUCKET_URI == \"\" or BUCKET_URI is None or BUCKET_URI == \"gs://[your-bucket-name]\":\n BUCKET_URI = \"gs://\" + PROJECT_ID + \"-aip-\" + TIMESTAMP\n\nif REGION == \"[your-region]\":\n REGION = \"us-central1\"", "Only if your bucket doesn't already exist: Run the following cell to create your Cloud Storage bucket.", "! gsutil mb -l $REGION $BUCKET_URI", "Finally, validate access to your Cloud Storage bucket by examining its contents:", "! gsutil ls -al $BUCKET_URI", "Import libraries and define constants\nImport required libraries.", "import pandas as pd\nfrom google.cloud import aiplatform\nfrom sklearn.metrics import mean_absolute_error, mean_squared_error\nfrom tensorflow.python.keras.utils import data_utils", "Initialize Vertex AI and set an experiment\nDefine experiment name.", "EXPERIMENT_NAME = \"\" # @param {type:\"string\"}", "If EXEPERIMENT_NAME is not set, set a default one below:", "if EXPERIMENT_NAME == \"\" or EXPERIMENT_NAME is None:\n EXPERIMENT_NAME = \"my-experiment-\" + TIMESTAMP", "Initialize the client for Vertex AI.", "aiplatform.init(\n project=PROJECT_ID,\n location=REGION,\n staging_bucket=BUCKET_URI,\n experiment=EXPERIMENT_NAME,\n)", "Tracking parameters and metrics in Vertex AI custom training jobs\nThis example uses the Abalone Dataset. For more information about this dataset please visit: https://archive.ics.uci.edu/ml/datasets/abalone", "!wget https://storage.googleapis.com/download.tensorflow.org/data/abalone_train.csv\n!gsutil cp abalone_train.csv {BUCKET_URI}/data/\n\ngcs_csv_path = f\"{BUCKET_URI}/data/abalone_train.csv\"", "Create a managed tabular dataset from a CSV\nA Managed dataset can be used to create an AutoML model or a custom model.", "ds = aiplatform.TabularDataset.create(display_name=\"abalone\", gcs_source=[gcs_csv_path])\n\nds.resource_name", "Write the training script\nRun the following cell to create the training script that is used in the sample custom training job.", "%%writefile training_script.py\n\nimport pandas as pd\nimport argparse\nimport os\nimport tensorflow as tf\nfrom tensorflow import keras\nfrom tensorflow.keras import layers\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--epochs', dest='epochs',\n default=10, type=int,\n help='Number of epochs.')\nparser.add_argument('--num_units', dest='num_units',\n default=64, type=int,\n help='Number of unit for first layer.')\nargs = parser.parse_args()\n# uncomment and bump up replica_count for distributed training\n# strategy = tf.distribute.experimental.MultiWorkerMirroredStrategy()\n# tf.distribute.experimental_set_strategy(strategy)\n\ncol_names = [\"Length\", \"Diameter\", \"Height\", \"Whole weight\", \"Shucked weight\", \"Viscera weight\", \"Shell weight\", \"Age\"]\ntarget = \"Age\"\n\ndef aip_data_to_dataframe(wild_card_path):\n return pd.concat([pd.read_csv(fp.numpy().decode(), names=col_names)\n for fp in tf.data.Dataset.list_files([wild_card_path])])\n\ndef get_features_and_labels(df):\n return df.drop(target, axis=1).values, df[target].values\n\ndef data_prep(wild_card_path):\n return get_features_and_labels(aip_data_to_dataframe(wild_card_path))\n\n\nmodel = tf.keras.Sequential([layers.Dense(args.num_units), layers.Dense(1)])\nmodel.compile(loss='mse', optimizer='adam')\n\nmodel.fit(*data_prep(os.environ[\"AIP_TRAINING_DATA_URI\"]),\n epochs=args.epochs ,\n validation_data=data_prep(os.environ[\"AIP_VALIDATION_DATA_URI\"]))\nprint(model.evaluate(*data_prep(os.environ[\"AIP_TEST_DATA_URI\"])))\n\n# save as Vertex AI Managed model\ntf.saved_model.save(model, os.environ[\"AIP_MODEL_DIR\"])", "Launch a custom training job and track its trainig parameters on Vertex AI ML Metadata", "job = aiplatform.CustomTrainingJob(\n display_name=\"train-abalone-dist-1-replica\",\n script_path=\"training_script.py\",\n container_uri=\"us-docker.pkg.dev/vertex-ai/training/tf-cpu.2-8:latest\",\n requirements=[\"gcsfs==0.7.1\"],\n model_serving_container_image_uri=\"us-docker.pkg.dev/vertex-ai/prediction/tf2-cpu.2-8:latest\",\n)", "Start a new experiment run to track training parameters and start the training job. Note that this operation will take around 10 mins.", "aiplatform.start_run(\"custom-training-run-1\") # Change this to your desired run name\nparameters = {\"epochs\": 10, \"num_units\": 64}\naiplatform.log_params(parameters)\n\nmodel = job.run(\n ds,\n replica_count=1,\n model_display_name=\"abalone-model\",\n args=[f\"--epochs={parameters['epochs']}\", f\"--num_units={parameters['num_units']}\"],\n)", "Deploy Model and calculate prediction metrics\nDeploy model to Google Cloud. This operation will take 10-20 mins.", "endpoint = model.deploy(machine_type=\"n1-standard-4\")", "Once model is deployed, perform online prediction using the abalone_test dataset and calculate prediction metrics.\nPrepare the prediction dataset.", "def read_data(uri):\n dataset_path = data_utils.get_file(\"abalone_test.data\", uri)\n col_names = [\n \"Length\",\n \"Diameter\",\n \"Height\",\n \"Whole weight\",\n \"Shucked weight\",\n \"Viscera weight\",\n \"Shell weight\",\n \"Age\",\n ]\n dataset = pd.read_csv(\n dataset_path,\n names=col_names,\n na_values=\"?\",\n comment=\"\\t\",\n sep=\",\",\n skipinitialspace=True,\n )\n return dataset\n\n\ndef get_features_and_labels(df):\n target = \"Age\"\n return df.drop(target, axis=1).values, df[target].values\n\n\ntest_dataset, test_labels = get_features_and_labels(\n read_data(\n \"https://storage.googleapis.com/download.tensorflow.org/data/abalone_test.csv\"\n )\n)", "Perform online prediction.", "prediction = endpoint.predict(test_dataset.tolist())\nprediction", "Calculate and track prediction evaluation metrics.", "mse = mean_squared_error(test_labels, prediction.predictions)\nmae = mean_absolute_error(test_labels, prediction.predictions)\n\naiplatform.log_metrics({\"mse\": mse, \"mae\": mae})", "Extract all parameters and metrics created during this experiment.", "aiplatform.get_experiment_df()", "View data in the Cloud Console\nParameters and metrics can also be viewed in the Cloud Console.", "print(\"Vertex AI Experiments:\")\nprint(\n f\"https://console.cloud.google.com/ai/platform/experiments/experiments?folder=&organizationId=&project={PROJECT_ID}\"\n)", "Cleaning up\nTo clean up all Google Cloud resources used in this project, you can delete the Google Cloud\nproject you used for the tutorial.\nOtherwise, you can delete the individual resources you created in this tutorial:\nTraining Job\nModel\nCloud Storage Bucket\n\nVertex AI Dataset\nTraining Job\nModel\nEndpoint\nCloud Storage Bucket", "# Warning: Setting this to true will delete everything in your bucket\ndelete_bucket = False\n\n# Delete dataset\nds.delete()\n\n# Delete the training job\njob.delete()\n\n# Undeploy model from endpoint\nendpoint.undeploy_all()\n\n# Delete the endpoint\nendpoint.delete()\n\n# Delete the model\nmodel.delete()\n\n\nif delete_bucket or os.getenv(\"IS_TESTING\"):\n ! gsutil -m rm -r $BUCKET_URI" ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
dnxbjyj/python-basic
libs/ConfigParser/handout.ipynb
mit
[ "用ConfigParser模块读写conf配置文件\nConfigParser是Python内置的一个读取配置文件的模块,用它来读取和修改配置文件非常方便,本文介绍一下它的基本用法。\n数据准备\n假设当前目录下有一个名为sys.conf的配置文件,其内容如下:\n```bash\n[db]\ndb_host=127.0.0.1\ndb_port=22\ndb_user=root\ndb_pass=root123\n[concurrent]\nthread = 10\nprocessor = 20\n```\n注:配置文件中,各个配置项其实是用等号'='隔开的键值对,这个等号两边如果有空白符,在处理的时候都会被自动去掉。但是key之前不能存在空白符,否则会报错。\n配置文件介绍\n配置文件即conf文件,其文件结构多为键值对的文件结构,比如上面的sys.conf文件。\nconf文件有2个层次结构,[]中的文本是section的名称,下面的键值对列表是item,代表每个配置项的键和值。\n初始化ConfigParser实例", "import ConfigParser\n\ncf = ConfigParser.ConfigParser()\ncf.read('./sys.conf')", "读取所有的section列表\nsection即[]中的内容。", "s = cf.sections()\nprint '【Output】'\nprint s", "读取指定section下options key列表\noptions即某个section下的每个键值对的key.", "opt = cf.options('concurrent')\nprint '【Output】'\nprint opt", "获取指定section下的键值对字典列表", "items = cf.items('concurrent')\nprint '【Output】'\nprint items", "按照指定数据类型读取配置值\ncf对象有get()、getint()、getboolean()、getfloat()四种方法来读取不同数据类型的配置项的值。", "db_host = cf.get('db','db_host')\ndb_port = cf.getint('db','db_port')\nthread = cf.getint('concurrent','thread')\n\nprint '【Output】'\nprint db_host,db_port,thread", "修改某个配置项的值\n比如要修改一下数据库的密码,可以这样修改:", "cf.set('db','db_pass','newpass')\n# 修改完了要写入才能生效\nwith open('sys.conf','w') as f:\n cf.write(f)", "添加一个section", "cf.add_section('log')\ncf.set('log','name','mylog.log')\ncf.set('log','num',100)\ncf.set('log','size',10.55)\ncf.set('log','auto_save',True)\ncf.set('log','info','%(bar)s is %(baz)s!')\n\n# 同样的,要写入才能生效\nwith open('sys.conf','w') as f:\n cf.write(f)", "执行上面代码后,sys.conf文件多了一个section,内容如下:\nbash\n[log]\nname = mylog.log\nnum = 100\nsize = 10.55\nauto_save = True\ninfo = %(bar)s is %(baz)s!\n移除某个section", "cf.remove_section('log')\n\n# 同样的,要写入才能生效\nwith open('sys.conf','w') as f:\n cf.write(f)", "移除某个option", "cf.remove_option('db','db_pass')\n\n# 同样的,要写入才能生效\nwith open('sys.conf','w') as f:\n cf.write(f)" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
google/floq-client
samples/notebooks/Floq_Client_Colab_Tutorial.ipynb
apache-2.0
[ "Copyright 2021 Floq authors.\nLicensed under the Apache License, Version 2.0 (the \"License\");", "#@title Copyright 2021 Floq authors, All Rights Reserved.\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.", "Setup", "!pip install floq_client --quiet\n\n# Imports\nimport numpy as np\nimport sympy\n\nimport cirq\nimport floq.client", "Floq simulation", "nrows = 10\nncols = 2\nqubits = cirq.GridQubit.rect(nrows, ncols) # 20 qubits\nparameters = sympy.symbols([f'a{idx}' for idx in range(nrows * ncols)])\ncircuit = cirq.Circuit(cirq.HPowGate(exponent=p).on(q) for p, q in zip(parameters, qubits))", "New observable compatible with Floq\nFloq accepts observables in the type of cirq.ops.linear_combinations.PauliSum only", "observables = []\nfor i in range(nrows):\n for j in range(ncols):\n if i < nrows - 1:\n observables.append(cirq.Z(qubits[i*ncols + j]) * cirq.Z(qubits[(i + 1)*ncols + j]))\n # Z[i * ncols + j] * Z[(i + 1) * ncols + j]\n if j < ncols - 1:\n observables.append(cirq.Z(qubits[i*ncols + j]) * cirq.Z(qubits[i*ncols + j+1]))\n # Z[i * ncols + j] * Z[i * ncols + (j + 1)]\nlen(observables)\n\nimport copy\n\ndef sum_pauli_strings(obs):\n m = copy.deepcopy(obs[0])\n for o in obs[1:]:\n m += o\n return m\n\ndef split_observables(obs):\n# hack: split observables into many buckets with at most 26 terms\n obs_buckets = [obs[s:s+25] for s in range(0, len(obs), 25)]\n measure = []\n for obs in obs_buckets:\n measure.append(sum_pauli_strings(obs))\n return measure\n\nmeasure = split_observables(observables)\n\n[len(m) for m in measure]\n\n# These two results should have the same number of Pauli string terms\nassert sum_pauli_strings(observables) == sum_pauli_strings(measure)", "Padding qubits\nBecause Floq's minimum number of qubits is 26, we need to pad it. This will be changed in the future.", "def pad_circuit(circ, qubits):\n return circ + cirq.Circuit([cirq.I(q) for q in qubits])\n\ndef get_pad_qubits(circ):\n num = len(circ.all_qubits())\n return [cirq.GridQubit(num, pad) for pad in range(26 - num)]\n\npad_qubits = get_pad_qubits(circuit)\npadded_circuit = pad_circuit(circuit, pad_qubits)\n\npadded_circuit\n\nvalues = np.random.random(len(parameters))\nresolver = {s: v for s, v in zip(parameters, values)}\nprint(resolver)", "Using Floq simulator\nBefore going further, please FORK THIS COLAB NOTEBOOK, and DO NOT SHARE YOUR API KEY WITH OTHERS PLEASE\nCreate & start a Floq instance", "# Please specify your API_KEY\nAPI_KEY = \"\" #@param {type:\"string\"}\n\n!floq-client \"$API_KEY\" worker start\n\nclient = floq.client.CirqClient(API_KEY)", "Expectation values from the circuit and measurements", "energy = client.simulator.simulate_expectation_values(padded_circuit, measure, resolver)\n\n# energy shows expectation values on each Pauli sum in measure.\nenergy\n\n# Here is the total energy\nsum(energy)", "Samples from the circuit", "niter = 100\nsamples = client.simulator.run(padded_circuit, resolver, niter)\n\nsamples", "Stop the Floq instance", "!floq-client \"$API_KEY\" worker stop" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
m2dsupsdlclass/lectures-labs
labs/09_triplet_loss/triplet_loss_totally_looks_like.ipynb
mit
[ "Triplet Loss on Totally Looks Like dataset\nThis notebook is inspired from this Keras tutorial by Hazem Essam and Santiago L. Valdarrama.\nThe goal is to showcase the use of siamese networks and triplet loss to do representation learning using a CNN. It will also showcase data generators and data augmentation techniques.\nDataset\nThe dataset considered is the Totally Looks Like dataset, consisting of pairs of web curated similar looking images:\nImage pair 1 | Image pair 2\n:-------------------------:|:-------------------------:\n | \nThe goal is to extract generic human perceptual representation through a CNN. The next cell downloads the dataset and unzips it (run it asap, it will download a few hundead megabytes).", "import os\nimport os.path as op\nfrom urllib.request import urlretrieve\nfrom pathlib import Path\n\nURL = \"https://github.com/m2dsupsdlclass/lectures-labs/releases/download/totallylookslike/dataset_totally.zip\"\nFILENAME = \"dataset_totally.zip\"\n\nif not op.exists(FILENAME):\n print('Downloading %s to %s...' % (URL, FILENAME))\n urlretrieve(URL, FILENAME)\n\nimport zipfile\nif not op.exists(\"anchors\"):\n print('Extracting image files...')\n with zipfile.ZipFile(FILENAME, 'r') as zip_ref:\n zip_ref.extractall('.')\n\nhome_dir = Path(Path.home())\nanchor_images_path = Path(\"./anchors\")\npositive_images_path = Path(\"./positives\")", "We will use mostly TensorFlow functions to open and process images:", "def open_image(filename, target_shape = (256, 256)):\n \"\"\" Load the specified file as a JPEG image, preprocess it and\n resize it to the target shape.\n \"\"\"\n image_string = tf.io.read_file(filename)\n image = tf.image.decode_jpeg(image_string, channels=3)\n image = tf.image.convert_image_dtype(image, tf.float32)\n image = tf.image.resize(image, target_shape)\n return image\n\nimport tensorflow as tf\n\n# Careful to sort images folders so that the anchor and positive images correspond.\nanchor_images = sorted([str(anchor_images_path / f) for f in os.listdir(anchor_images_path)])\npositive_images = sorted([str(positive_images_path / f) for f in os.listdir(positive_images_path)])\n\nanchor_count = len(anchor_images)\npositive_count = len(positive_images)\n\nprint(f\"number of anchors: {anchor_count}, positive: {positive_count}\")\n\nanchor_dataset_files = tf.data.Dataset.from_tensor_slices(anchor_images)\nanchor_dataset = anchor_dataset_files.map(open_image)\npositive_dataset_files = tf.data.Dataset.from_tensor_slices(positive_images)\npositive_dataset = positive_dataset_files.map(open_image)\n\nimport matplotlib.pyplot as plt \n\ndef visualize(img_list):\n \"\"\"Visualize a list of images\"\"\"\n def show(ax, image):\n ax.imshow(image)\n ax.get_xaxis().set_visible(False)\n ax.get_yaxis().set_visible(False)\n\n fig = plt.figure(figsize=(6, 18))\n \n num_imgs = len(img_list)\n \n axs = fig.subplots(1, num_imgs)\n for i in range(num_imgs):\n show(axs[i], img_list[i])\n\n# display the first element of our dataset\nanc = next(iter(anchor_dataset))\npos = next(iter(positive_dataset))\nvisualize([anc, pos])\n\nfrom tensorflow.keras import layers\n\n# data augmentations\ndata_augmentation = tf.keras.Sequential([\n layers.RandomFlip(\"horizontal\"),\n # layers.RandomRotation(0.15), # you may add random rotations\n layers.RandomCrop(224, 224)\n])", "To generate the list of negative images, let's randomize the list of available images (anchors and positives) and concatenate them together.", "import numpy as np \n\nrng = np.random.RandomState(seed=42)\nrng.shuffle(anchor_images)\nrng.shuffle(positive_images)\n\nnegative_images = anchor_images + positive_images\nnp.random.RandomState(seed=32).shuffle(negative_images)\n\nnegative_dataset_files = tf.data.Dataset.from_tensor_slices(negative_images)\nnegative_dataset_files = negative_dataset_files.shuffle(buffer_size=4096)\n\n# Build final triplet dataset\ndataset = tf.data.Dataset.zip((anchor_dataset_files, positive_dataset_files, negative_dataset_files))\ndataset = dataset.shuffle(buffer_size=1024)\n\n# preprocess function\ndef preprocess_triplets(anchor, positive, negative):\n return (\n data_augmentation(open_image(anchor)),\n data_augmentation(open_image(positive)),\n data_augmentation(open_image(negative)),\n )\n\n# The map function is lazy, it is not evaluated on the spot, \n# but each time a batch is sampled.\ndataset = dataset.map(preprocess_triplets)\n\n# Let's now split our dataset in train and validation.\ntrain_dataset = dataset.take(round(anchor_count * 0.8))\nval_dataset = dataset.skip(round(anchor_count * 0.8))\n\n# define the batch size\ntrain_dataset = train_dataset.batch(32, drop_remainder=False)\ntrain_dataset = train_dataset.prefetch(8)\n\nval_dataset = val_dataset.batch(32, drop_remainder=False)\nval_dataset = val_dataset.prefetch(8)", "We can visualize a triplet and display its shape:", "anc_batch, pos_batch, neg_batch = next(train_dataset.take(1).as_numpy_iterator())\nprint(anc_batch.shape, pos_batch.shape, neg_batch.shape)\n\nidx = np.random.randint(0, 32)\nvisualize([anc_batch[idx], pos_batch[idx], neg_batch[idx]])", "Exercise\nBuild the embedding network, starting from a resnet and adding a few layers. The output should have a dimension $d= 128$ or $d=256$. Edit the following code, and you may use the next cell to test your code.\nBonus: Try to freeze the weights of the ResNet.", "from tensorflow.keras import Model, layers\nfrom tensorflow.keras import optimizers, losses, metrics, applications\nfrom tensorflow.keras.applications import resnet\n\ninput_img = layers.Input((224,224,3))\n\noutput = input_img # change that line and edit this code!\n\nembedding = Model(input_img, output, name=\"Embedding\")\n\noutput = embedding(np.random.randn(1,224,224,3))\noutput.shape", "Run the following can be run to get the same architecture as we have:", "from tensorflow.keras import Model, layers\nfrom tensorflow.keras import optimizers, losses, metrics, applications\nfrom tensorflow.keras.applications import resnet\n\ninput_img = layers.Input((224,224,3))\n\nbase_cnn = resnet.ResNet50(weights=\"imagenet\", input_shape=(224,224,3), include_top=False)\nresnet_output = base_cnn(input_img)\n\nflatten = layers.Flatten()(resnet_output)\ndense1 = layers.Dense(512, activation=\"relu\")(flatten)\n# The batch normalization layer enables to normalize the activations\n# over the batch\ndense1 = layers.BatchNormalization()(dense1)\ndense2 = layers.Dense(256, activation=\"relu\")(dense1)\ndense2 = layers.BatchNormalization()(dense2)\noutput = layers.Dense(256)(dense2)\n\nembedding = Model(input_img, output, name=\"Embedding\")\n\ntrainable = False\nfor layer in base_cnn.layers:\n if layer.name == \"conv5_block1_out\":\n trainable = True\n layer.trainable = trainable\n\ndef preprocess(x):\n \"\"\" we'll need to preprocess the input before passing them\n to the resnet for better results. This is the same preprocessing\n that was used during the training of ResNet on ImageNet.\n \"\"\"\n return resnet.preprocess_input(x * 255.)", "Exercise\nOur goal is now to build the positive and negative distances from 3 inputs images: the anchor, the positive, and the negative one $‖f(A) - f(P)‖²$ $‖f(A) - f(N)‖²$. You may define a specific Layer using the Keras subclassing API, or any other method.\nYou will need to run the Embedding model previously defined, don't forget to apply the preprocessing function defined above!", "anchor_input = layers.Input(name=\"anchor\", shape=(224, 224, 3))\npositive_input = layers.Input(name=\"positive\", shape=(224, 224, 3))\nnegative_input = layers.Input(name=\"negative\", shape=(224, 224, 3))\n\ndistances = [anchor_input, positive_input] # TODO: Change this code to actually compute the distances\n\nsiamese_network = Model(\n inputs=[anchor_input, positive_input, negative_input], outputs=distances\n)", "Solution: run the following cell to get the exact same method as we have.", "class DistanceLayer(layers.Layer):\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n\n def call(self, anchor, positive, negative):\n ap_distance = tf.reduce_sum(tf.square(anchor - positive), -1)\n an_distance = tf.reduce_sum(tf.square(anchor - negative), -1)\n return (ap_distance, an_distance)\n\n\nanchor_input = layers.Input(name=\"anchor\", shape=(224, 224, 3))\npositive_input = layers.Input(name=\"positive\", shape=(224, 224, 3))\nnegative_input = layers.Input(name=\"negative\", shape=(224, 224, 3))\n\ndistances = DistanceLayer()(\n embedding(preprocess(anchor_input)),\n embedding(preprocess(positive_input)),\n embedding(preprocess(negative_input)),\n)\n\nsiamese_network = Model(\n inputs=[anchor_input, positive_input, negative_input], outputs=distances\n)", "The final triplet model\nOnce we are able to produce the distances, we may wrap it into a new Keras Model which includes the computation of the loss. The following implementation uses a subclassing of the Model class, redefining a few functions used internally during model.fit: call, train_step, test_step", "class TripletModel(Model):\n \"\"\"The Final Keras Model with a custom training and testing loops.\n\n Computes the triplet loss using the three embeddings produced by the\n Siamese Network.\n\n The triplet loss is defined as:\n L(A, P, N) = max(‖f(A) - f(P)‖² - ‖f(A) - f(N)‖² + margin, 0)\n \"\"\"\n\n def __init__(self, siamese_network, margin=0.5):\n super(TripletModel, self).__init__()\n self.siamese_network = siamese_network\n self.margin = margin\n self.loss_tracker = metrics.Mean(name=\"loss\")\n\n def call(self, inputs):\n return self.siamese_network(inputs)\n\n def train_step(self, data):\n # GradientTape is a context manager that records every operation that\n # you do inside. We are using it here to compute the loss so we can get\n # the gradients and apply them using the optimizer specified in\n # `compile()`.\n with tf.GradientTape() as tape:\n loss = self._compute_loss(data)\n\n # Storing the gradients of the loss function with respect to the\n # weights/parameters.\n gradients = tape.gradient(loss, self.siamese_network.trainable_weights)\n\n # Applying the gradients on the model using the specified optimizer\n self.optimizer.apply_gradients(\n zip(gradients, self.siamese_network.trainable_weights)\n )\n\n # Let's update and return the training loss metric.\n self.loss_tracker.update_state(loss)\n return {\"loss\": self.loss_tracker.result()}\n\n def test_step(self, data):\n loss = self._compute_loss(data)\n self.loss_tracker.update_state(loss)\n return {\"loss\": self.loss_tracker.result()}\n\n def _compute_loss(self, data):\n # The output of the network is a tuple containing the distances\n # between the anchor and the positive example, and the anchor and\n # the negative example.\n ap_distance, an_distance = self.siamese_network(data)\n\n loss = ap_distance - an_distance\n loss = tf.maximum(loss + self.margin, 0.0)\n return loss\n\n @property\n def metrics(self):\n # We need to list our metrics here so the `reset_states()` can be\n # called automatically.\n return [self.loss_tracker]\n\n\nsiamese_model = TripletModel(siamese_network)\nsiamese_model.compile(optimizer=optimizers.Adam(0.0001))\nsiamese_model.fit(train_dataset, epochs=10, validation_data=val_dataset)\n\nembedding.save('best_model.h5')\n\n# uncomment to get a pretrained model\nurl_pretrained = \"https://github.com/m2dsupsdlclass/lectures-labs/releases/download/totallylookslike/best_model.h5\"\nurlretrieve(url_pretrained, \"best_model.h5\")\n\nloaded_model = tf.keras.models.load_model('best_model.h5')", "Find most similar images in test dataset\nThe negative_images list was built by concatenating all possible images, both anchors and positive. We can reuse these to form a bank of possible images to query from.\nWe will first compute all embeddings of these images. To do so, we build a tf.Dataset and apply the few functions: open_img and preprocess.", "from functools import partial\n\nopen_img = partial(open_image, target_shape=(224,224))\nall_img_files = tf.data.Dataset.from_tensor_slices(negative_images)\ndataset = all_img_files.map(open_img).map(preprocess).take(1024).batch(32, drop_remainder=False).prefetch(8)\nall_embeddings = loaded_model.predict(dataset)\n\nall_embeddings.shape", "We can build a most_similar function which takes an image path as input and return the topn most similar images through the embedding representation. It would be possible to use another metric, such as the cosine similarity here.", "random_img = np.random.choice(negative_images)\n\ndef most_similar(img, topn=5):\n img_batch = tf.expand_dims(open_image(img, target_shape=(224, 224)), 0)\n new_emb = loaded_model.predict(preprocess(img_batch))\n dists = tf.sqrt(tf.reduce_sum((all_embeddings - new_emb)**2, -1)).numpy()\n idxs = np.argsort(dists)[:topn]\n return [(negative_images[idx], dists[idx]) for idx in idxs]\n\nprint(random_img)\nmost_similar(random_img)\n\nrandom_img = np.random.choice(negative_images)\nvisualize([open_image(im) for im, _ in most_similar(random_img)])", "Note that this is not a rigorous evaluation, as we are using the images from the training set for both the query and the possible images. You may try with a completely different picture!\nGoing further\nIn order to improve the training efficiency, hard negative mining would be most relevant in that case." ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
mne-tools/mne-tools.github.io
0.22/_downloads/243172b1ef6a2d804d3245b8c0a927ef/plot_60_maxwell_filtering_sss.ipynb
bsd-3-clause
[ "%matplotlib inline", "Signal-space separation (SSS) and Maxwell filtering\nThis tutorial covers reducing environmental noise and compensating for head\nmovement with SSS and Maxwell filtering.\n :depth: 2\nAs usual we'll start by importing the modules we need, loading some\nexample data &lt;sample-dataset&gt;, and cropping it to save on memory:", "import os\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport pandas as pd\nimport numpy as np\nimport mne\nfrom mne.preprocessing import find_bad_channels_maxwell\n\nsample_data_folder = mne.datasets.sample.data_path()\nsample_data_raw_file = os.path.join(sample_data_folder, 'MEG', 'sample',\n 'sample_audvis_raw.fif')\nraw = mne.io.read_raw_fif(sample_data_raw_file, verbose=False)\nraw.crop(tmax=60)", "Background on SSS and Maxwell filtering\nSignal-space separation (SSS) :footcite:TauluKajola2005,TauluSimola2006\nis a technique based on the physics\nof electromagnetic fields. SSS separates the measured signal into components\nattributable to sources inside the measurement volume of the sensor array\n(the internal components), and components attributable to sources outside\nthe measurement volume (the external components). The internal and external\ncomponents are linearly independent, so it is possible to simply discard the\nexternal components to reduce environmental noise. Maxwell filtering is a\nrelated procedure that omits the higher-order components of the internal\nsubspace, which are dominated by sensor noise. Typically, Maxwell filtering\nand SSS are performed together (in MNE-Python they are implemented together\nin a single function).\nLike SSP &lt;tut-artifact-ssp&gt;, SSS is a form of projection. Whereas SSP\nempirically determines a noise subspace based on data (empty-room recordings,\nEOG or ECG activity, etc) and projects the measurements onto a subspace\northogonal to the noise, SSS mathematically constructs the external and\ninternal subspaces from spherical harmonics_ and reconstructs the sensor\nsignals using only the internal subspace (i.e., does an oblique projection).\n<div class=\"alert alert-danger\"><h4>Warning</h4><p>Maxwell filtering was originally developed for Elekta Neuromag® systems,\n and should be considered *experimental* for non-Neuromag data. See the\n Notes section of the :func:`~mne.preprocessing.maxwell_filter` docstring\n for details.</p></div>\n\nThe MNE-Python implementation of SSS / Maxwell filtering currently provides\nthe following features:\n\nBasic bad channel detection\n (:func:~mne.preprocessing.find_bad_channels_maxwell)\nBad channel reconstruction\nCross-talk cancellation\nFine calibration correction\ntSSS\nCoordinate frame translation\nRegularization of internal components using information theory\nRaw movement compensation (using head positions estimated by MaxFilter)\ncHPI subtraction (see :func:mne.chpi.filter_chpi)\nHandling of 3D (in addition to 1D) fine calibration files\nEpoch-based movement compensation as described in\n :footcite:TauluKajola2005 through :func:mne.epochs.average_movements\nExperimental processing of data from (un-compensated) non-Elekta\n systems\n\nUsing SSS and Maxwell filtering in MNE-Python\nFor optimal use of SSS with data from Elekta Neuromag® systems, you should\nprovide the path to the fine calibration file (which encodes site-specific\ninformation about sensor orientation and calibration) as well as a crosstalk\ncompensation file (which reduces interference between Elekta's co-located\nmagnetometer and paired gradiometer sensor units).", "fine_cal_file = os.path.join(sample_data_folder, 'SSS', 'sss_cal_mgh.dat')\ncrosstalk_file = os.path.join(sample_data_folder, 'SSS', 'ct_sparse_mgh.fif')", "Before we perform SSS we'll look for bad channels — MEG 2443 is quite\nnoisy.\n<div class=\"alert alert-danger\"><h4>Warning</h4><p>It is critical to mark bad channels in ``raw.info['bads']`` *before*\n calling :func:`~mne.preprocessing.maxwell_filter` in order to prevent\n bad channel noise from spreading.</p></div>\n\nLet's see if we can automatically detect it.", "raw.info['bads'] = []\nraw_check = raw.copy()\nauto_noisy_chs, auto_flat_chs, auto_scores = find_bad_channels_maxwell(\n raw_check, cross_talk=crosstalk_file, calibration=fine_cal_file,\n return_scores=True, verbose=True)\nprint(auto_noisy_chs) # we should find them!\nprint(auto_flat_chs) # none for this dataset", "<div class=\"alert alert-info\"><h4>Note</h4><p>`~mne.preprocessing.find_bad_channels_maxwell` needs to operate on\n a signal without line noise or cHPI signals. By default, it simply\n applies a low-pass filter with a cutoff frequency of 40 Hz to the\n data, which should remove these artifacts. You may also specify a\n different cutoff by passing the ``h_freq`` keyword argument. If you\n set ``h_freq=None``, no filtering will be applied. This can be\n useful if your data has already been preconditioned, for example\n using :func:`mne.chpi.filter_chpi`,\n :func:`mne.io.Raw.notch_filter`, or :meth:`mne.io.Raw.filter`.</p></div>\n\nNow we can update the list of bad channels in the dataset.", "bads = raw.info['bads'] + auto_noisy_chs + auto_flat_chs\nraw.info['bads'] = bads", "We called ~mne.preprocessing.find_bad_channels_maxwell with the optional\nkeyword argument return_scores=True, causing the function to return a\ndictionary of all data related to the scoring used to classify channels as\nnoisy or flat. This information can be used to produce diagnostic figures.\nIn the following, we will generate such visualizations for\nthe automated detection of noisy gradiometer channels.", "# Only select the data forgradiometer channels.\nch_type = 'grad'\nch_subset = auto_scores['ch_types'] == ch_type\nch_names = auto_scores['ch_names'][ch_subset]\nscores = auto_scores['scores_noisy'][ch_subset]\nlimits = auto_scores['limits_noisy'][ch_subset]\nbins = auto_scores['bins'] # The the windows that were evaluated.\n# We will label each segment by its start and stop time, with up to 3\n# digits before and 3 digits after the decimal place (1 ms precision).\nbin_labels = [f'{start:3.3f} – {stop:3.3f}'\n for start, stop in bins]\n\n# We store the data in a Pandas DataFrame. The seaborn heatmap function\n# we will call below will then be able to automatically assign the correct\n# labels to all axes.\ndata_to_plot = pd.DataFrame(data=scores,\n columns=pd.Index(bin_labels, name='Time (s)'),\n index=pd.Index(ch_names, name='Channel'))\n\n# First, plot the \"raw\" scores.\nfig, ax = plt.subplots(1, 2, figsize=(12, 8))\nfig.suptitle(f'Automated noisy channel detection: {ch_type}',\n fontsize=16, fontweight='bold')\nsns.heatmap(data=data_to_plot, cmap='Reds', cbar_kws=dict(label='Score'),\n ax=ax[0])\n[ax[0].axvline(x, ls='dashed', lw=0.25, dashes=(25, 15), color='gray')\n for x in range(1, len(bins))]\nax[0].set_title('All Scores', fontweight='bold')\n\n# Now, adjust the color range to highlight segments that exceeded the limit.\nsns.heatmap(data=data_to_plot,\n vmin=np.nanmin(limits), # bads in input data have NaN limits\n cmap='Reds', cbar_kws=dict(label='Score'), ax=ax[1])\n[ax[1].axvline(x, ls='dashed', lw=0.25, dashes=(25, 15), color='gray')\n for x in range(1, len(bins))]\nax[1].set_title('Scores > Limit', fontweight='bold')\n\n# The figure title should not overlap with the subplots.\nfig.tight_layout(rect=[0, 0.03, 1, 0.95])", "<div class=\"alert alert-info\"><h4>Note</h4><p>You can use the very same code as above to produce figures for\n *flat* channel detection. Simply replace the word \"noisy\" with\n \"flat\", and replace ``vmin=np.nanmin(limits)`` with\n ``vmax=np.nanmax(limits)``.</p></div>\n\nYou can see the un-altered scores for each channel and time segment in the\nleft subplots, and thresholded scores – those which exceeded a certain limit\nof noisiness – in the right subplots. While the right subplot is entirely\nwhite for the magnetometers, we can see a horizontal line extending all the\nway from left to right for the gradiometers. This line corresponds to channel\nMEG 2443, which was reported as auto-detected noisy channel in the step\nabove. But we can also see another channel exceeding the limits, apparently\nin a more transient fashion. It was therefore not detected as bad, because\nthe number of segments in which it exceeded the limits was less than 5,\nwhich MNE-Python uses by default.\n<div class=\"alert alert-info\"><h4>Note</h4><p>You can request a different number of segments that must be\n found to be problematic before\n `~mne.preprocessing.find_bad_channels_maxwell` reports them as bad.\n To do this, pass the keyword argument ``min_count`` to the\n function.</p></div>\n\nObviously, this algorithm is not perfect. Specifically, on closer inspection\nof the raw data after looking at the diagnostic plots above, it becomes clear\nthat the channel exceeding the \"noise\" limits in some segments without\nqualifying as \"bad\", in fact contains some flux jumps. There were just not\nenough flux jumps in the recording for our automated procedure to report\nthe channel as bad. So it can still be useful to manually inspect and mark\nbad channels. The channel in question is MEG 2313. Let's mark it as bad:", "raw.info['bads'] += ['MEG 2313'] # from manual inspection", "After that, performing SSS and Maxwell filtering is done with a\nsingle call to :func:~mne.preprocessing.maxwell_filter, with the crosstalk\nand fine calibration filenames provided (if available):", "raw_sss = mne.preprocessing.maxwell_filter(\n raw, cross_talk=crosstalk_file, calibration=fine_cal_file, verbose=True)", "To see the effect, we can plot the data before and after SSS / Maxwell\nfiltering.", "raw.pick(['meg']).plot(duration=2, butterfly=True)\nraw_sss.pick(['meg']).plot(duration=2, butterfly=True)", "Notice that channels marked as \"bad\" have been effectively repaired by SSS,\neliminating the need to perform interpolation &lt;tut-bad-channels&gt;.\nThe heartbeat artifact has also been substantially reduced.\nThe :func:~mne.preprocessing.maxwell_filter function has parameters\nint_order and ext_order for setting the order of the spherical\nharmonic expansion of the interior and exterior components; the default\nvalues are appropriate for most use cases. Additional parameters include\ncoord_frame and origin for controlling the coordinate frame (\"head\"\nor \"meg\") and the origin of the sphere; the defaults are appropriate for most\nstudies that include digitization of the scalp surface / electrodes. See the\ndocumentation of :func:~mne.preprocessing.maxwell_filter for details.\nSpatiotemporal SSS (tSSS)\nAn assumption of SSS is that the measurement volume (the spherical shell\nwhere the sensors are physically located) is free of electromagnetic sources.\nThe thickness of this source-free measurement shell should be 4-8 cm for SSS\nto perform optimally. In practice, there may be sources falling within that\nmeasurement volume; these can often be mitigated by using Spatiotemporal\nSignal Space Separation (tSSS) :footcite:TauluSimola2006.\ntSSS works by looking for temporal\ncorrelation between components of the internal and external subspaces, and\nprojecting out any components that are common to the internal and external\nsubspaces. The projection is done in an analogous way to\nSSP &lt;tut-artifact-ssp&gt;, except that the noise vector is computed\nacross time points instead of across sensors.\nTo use tSSS in MNE-Python, pass a time (in seconds) to the parameter\nst_duration of :func:~mne.preprocessing.maxwell_filter. This will\ndetermine the \"chunk duration\" over which to compute the temporal projection.\nThe chunk duration effectively acts as a high-pass filter with a cutoff\nfrequency of $\\frac{1}{\\mathtt{st_duration}}~\\mathrm{Hz}$; this\neffective high-pass has an important consequence:\n\nIn general, larger values of st_duration are better (provided that your\n computer has sufficient memory) because larger values of st_duration\n will have a smaller effect on the signal.\n\nIf the chunk duration does not evenly divide your data length, the final\n(shorter) chunk will be added to the prior chunk before filtering, leading\nto slightly different effective filtering for the combined chunk (the\neffective cutoff frequency differing at most by a factor of 2). If you need\nto ensure identical processing of all analyzed chunks, either:\n\n\nchoose a chunk duration that evenly divides your data length (only\n recommended if analyzing a single subject or run), or\n\n\ninclude at least 2 * st_duration of post-experiment recording time at\n the end of the :class:~mne.io.Raw object, so that the data you intend to\n further analyze is guaranteed not to be in the final or penultimate chunks.\n\n\nAdditional parameters affecting tSSS include st_correlation (to set the\ncorrelation value above which correlated internal and external components\nwill be projected out) and st_only (to apply only the temporal projection\nwithout also performing SSS and Maxwell filtering). See the docstring of\n:func:~mne.preprocessing.maxwell_filter for details.\nMovement compensation\nIf you have information about subject head position relative to the sensors\n(i.e., continuous head position indicator coils, or :term:cHPI &lt;HPI&gt;), SSS\ncan take that into account when projecting sensor data onto the internal\nsubspace. Head position data can be computed using\n:func:mne.chpi.compute_chpi_locs and :func:mne.chpi.compute_head_pos,\nor loaded with the:func:mne.chpi.read_head_pos function. The\nexample data &lt;sample-dataset&gt; doesn't include cHPI, so here we'll\nload a :file:.pos file used for testing, just to demonstrate:", "head_pos_file = os.path.join(mne.datasets.testing.data_path(), 'SSS',\n 'test_move_anon_raw.pos')\nhead_pos = mne.chpi.read_head_pos(head_pos_file)\nmne.viz.plot_head_positions(head_pos, mode='traces')", "The cHPI data file could also be passed as the head_pos parameter of\n:func:~mne.preprocessing.maxwell_filter. Not only would this account for\nmovement within a given recording session, but also would effectively\nnormalize head position across different measurement sessions and subjects.\nSee here &lt;example-movement-comp&gt; for an extended example of applying\nmovement compensation during Maxwell filtering / SSS. Another option is to\napply movement compensation when averaging epochs into an\n:class:~mne.Evoked instance, using the :func:mne.epochs.average_movements\nfunction.\nEach of these approaches requires time-varying estimates of head position,\nwhich is obtained from MaxFilter using the -headpos and -hp\narguments (see the MaxFilter manual for details).\nCaveats to using SSS / Maxwell filtering\n\n\nThere are patents related to the Maxwell filtering algorithm, which may\n legally preclude using it in commercial applications. More details are\n provided in the documentation of\n :func:~mne.preprocessing.maxwell_filter.\n\n\nSSS works best when both magnetometers and gradiometers are present, and\n is most effective when gradiometers are planar (due to the need for very\n accurate sensor geometry and fine calibration information). Thus its\n performance is dependent on the MEG system used to collect the data.\n\n\nReferences\n.. footbibliography::\n.. LINKS" ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
atulsingh0/MachineLearning
BMLSwPython/01_GettingStarted_withPython.ipynb
gpl-3.0
[ "# import\nimport numpy as np\nimport scipy as sp\nimport timeit\nimport matplotlib.pyplot as plt\n\n%matplotlib inline", "Comparing the time", "start = timeit.timeit()\n\nX = range(1000)\n\npySum = sum([n*n for n in X])\n\nend = timeit.timeit()\n\nprint(\"Total time taken: \", end-start)", "Learning Scipy", "# reading the web data \n\ndata = sp.genfromtxt(\"data/web_traffic.tsv\", delimiter=\"\\t\")\nprint(data[:3])\nprint(len(data))", "Preprocessing and Cleaning the data", "X = data[:, 0]\ny = data[:, 1]\n\n# checking for nan values\nprint(sum(np.isnan(X)))\nprint(sum(np.isnan(y)))", "Filtering the nan data", "X = X[~np.isnan(y)]\ny = y[~np.isnan(y)]\n\n# checking for nan values\nprint(sum(np.isnan(X)))\nprint(sum(np.isnan(y)))\n\nfig, ax = plt.subplots(figsize=(8,6))\n\nax.plot(X, y, '.b')\nax.margins(0.2)\nplt.xticks([w*24*7 for w in range(0, 6)], [\"week %d\" %w for w in range(0, 6)])\nax.set_xlabel(\"Week\")\nax.set_ylabel(\"Hits / Week\")\nax.set_title(\"Web Traffic over weeks\")", "Choosing the right model and learning algorithm", "# creating a error calc fuction\ndef error(f, x, y):\n return np.sum((f(x) - y)**2)", "Linear 1-d model", "# sp's polyfit func do the same\nfp1, residuals, rank, sv, rcond = sp.polyfit(X, y, 1, full=True)\n\nprint(fp1)\nprint(residuals)\n\n# generating the one order function\nf1 = sp.poly1d(fp1)\n\n# checking error\nprint(\"Error : \",error(f1, X, y))\n\nx1 = np.array([-100, np.max(X)+100])\ny1 = f1(x1)\n\nax.plot(x1, y1, c='g', linewidth=2)\nax.legend([\"data\", \"d = %i\" % f1.order], loc='best')\nfig", "$$ f(x) = 2.59619213 * x + 989.02487106 $$ \n Polynomial 2-d", "# sp's polyfit func do the same\nfp2 = sp.polyfit(X, y, 2)\n\nprint(fp2)\n\n# generating the 2 order function\nf2= sp.poly1d(fp2)\n\n# checking error\nprint(\"Error : \",error(f2, X, y))\n\nx1= np.linspace(-100, np.max(X)+100, 2000)\ny2= f2(x1)\n\nax.plot(x1, y2, c='r', linewidth=2)\nax.legend([\"data\", \"d = %i\" % f1.order, \"d = %i\" % f2.order], loc='best')\nfig", "$$ f(x) = 0.0105322215 * x^2 - 5.26545650 * x + 1974.6082 $$\nWhat if we want to regress two response output instead of one, As we can see in the graph that there is a steep change in data between week 3 and 4, so let's draw two reponses line, one for the data between week0 and week3.5 and second for week3.5 to week5", "# we are going to divide the data on time so\ndiv = 3.5*7*24\n\nX1 = X[X<=div]\nY1 = y[X<=div]\n\nX2 = X[X>div]\nY2 = y[X>div]\n\n\n# now plotting the both data\n\nfa = sp.poly1d(sp.polyfit(X1, Y1, 1))\nfb = sp.poly1d(sp.polyfit(X2, Y2, 1))\n\nfa_error = error(fa, X1, Y1)\nfb_error = error(fb, X2, Y2)\nprint(\"Error inflection = %f\" % (fa_error + fb_error))\n\nx1 = np.linspace(-100, X1[-1]+100, 1000)\nx2 = np.linspace(X1[-10], X2[-1]+100, 1000)\n\nya = fa(x1)\nyb = fb(x2)\n\nax.plot(x1, ya, c='#800000', linewidth=2) # brown\nax.plot(x2, yb, c='#FFA500', linewidth=2) # orange\nax.grid(True)\n\n\nfig", "Suppose we choose that function with degree 2 is best fit for our data and want to predict that if everything will go same then when we will hit the 100000 count ??\n$$ 0 = f(x) - 100000 = 0.0105322215 * x^2 - 5.26545650 * x + 1974.6082 - 100000 $$ \nSciPy's optimize module has the function \nfsolve that achieves this, when providing an initial starting position with parameter \nx0. As every entry in our input data file corresponds to one hour, and we have 743 of \nthem, we set the starting position to some value after that. Let fbt2 be the winning \npolynomial of degree 2.", "print(f2)\n\nprint(f2 - 100000)\n\n# import \nfrom scipy.optimize import fsolve\n\nreached_max = fsolve(f2-100000, x0=800)/(7*24)\nprint(\"100,000 hits/hour expected at week %f\" % reached_max[0])" ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
uwkejia/Clean-Energy-Outlook
examples/Demo.ipynb
mit
[ "Examples\nImporting libraries", "from ceo import data_cleaning\nfrom ceo import missing_data\nfrom ceo import svr_prediction\nfrom ceo import ridge_prediction", "datacleaning\n\nThe datacleaning module is used to clean and organize the data into 51 CSV files corresponding to the 50 states of the US and the District of Columbia. \nThe wrapping function clean_all_data takes all the data sets as input and sorts the data in to CSV files of the states.\nThe CSVs are stored in the Cleaned Data directory which is under the Data directory.", "data_cleaning.clean_all_data()", "missing_data\n\nThe missing_data module is used to estimate the missing data of the GDP (from 1960 - 1962) and determine the values of the predictors (from 2016-2020).\nThe wrapping function predict_all takes the CSV files of the states as input and stores the predicted missing values in the same CSV files.\nThe CSVs generated replace the previous CSV files in the Cleaned Data directory which is under the Data directory.", "missing_data.predict_all()", "ridge_prediction\n\nThe ridge_prediction module is used to predict the future values of energies like wind energy, solar energy, hydro energy and nuclear energy from 2016-2020 using ridge regression.\nThe wrapping function ridge_predict_all takes the CSV files of the states as input and stores the future values of the energies in another CSV file under Ridge Regression folder under the Predicted Data directory.", "ridge_prediction.ridge_predict_all()", "svr_prediction\n\nThe svr_prediction module is used to predict the future values of energies like wind energy, solar energy, hydro energy and nuclear energy from 2016-2020 using Support Vector Regression\nThe wrapping function SVR_predict_all takes the CSV files of the states as input and stores the future values of the energies in another CSV file under SVR folder under the Predicted Data directory.", "svr_prediction.SVR_predict_all()", "plots\nVisualizations is done using Tableau software. The Tableau workbook for the predicted data is included in the repository. The Tableau dashboard created for this data is illustrated below:", "%%HTML\n\n<div class='tableauPlaceholder' id='viz1489609724011' style='position: relative'><noscript><a href='#'><img alt='Clean Energy Production in the contiguous United States(in million kWh) ' src='https:&#47;&#47;public.tableau.com&#47;static&#47;images&#47;PB&#47;PB87S38NW&#47;1_rss.png' style='border: none' /></a></noscript><object class='tableauViz' style='display:none;'><param name='host_url' value='https%3A%2F%2Fpublic.tableau.com%2F' /> <param name='path' value='shared&#47;PB87S38NW' /> <param name='toolbar' value='yes' /><param name='static_image' value='https:&#47;&#47;public.tableau.com&#47;static&#47;images&#47;PB&#47;PB87S38NW&#47;1.png' /> <param name='animate_transition' value='yes' /><param name='display_static_image' value='yes' /><param name='display_spinner' value='yes' /><param name='display_overlay' value='yes' /><param name='display_count' value='yes' /></object></div> <script type='text/javascript'> var divElement = document.getElementById('viz1489609724011'); var vizElement = divElement.getElementsByTagName('object')[0]; vizElement.style.width='1004px';vizElement.style.height='869px'; var scriptElement = document.createElement('script'); scriptElement.src = 'https://public.tableau.com/javascripts/api/viz_v1.js'; vizElement.parentNode.insertBefore(scriptElement, vizElement); </script>" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
mne-tools/mne-tools.github.io
0.23/_downloads/c7633c38a703b9d0a626a5a4fa161026/psf_ctf_label_leakage.ipynb
bsd-3-clause
[ "%matplotlib inline", "Visualize source leakage among labels using a circular graph\nThis example computes all-to-all pairwise leakage among 68 regions in\nsource space based on MNE inverse solutions and a FreeSurfer cortical\nparcellation. Label-to-label leakage is estimated as the correlation among the\nlabels' point-spread functions (PSFs). It is visualized using a circular graph\nwhich is ordered based on the locations of the regions in the axial plane.", "# Authors: Olaf Hauk <olaf.hauk@mrc-cbu.cam.ac.uk>\n# Martin Luessi <mluessi@nmr.mgh.harvard.edu>\n# Alexandre Gramfort <alexandre.gramfort@inria.fr>\n# Nicolas P. Rougier (graph code borrowed from his matplotlib gallery)\n#\n# License: BSD (3-clause)\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nimport mne\nfrom mne.datasets import sample\nfrom mne.minimum_norm import (read_inverse_operator,\n make_inverse_resolution_matrix,\n get_point_spread)\n\nfrom mne.viz import circular_layout, plot_connectivity_circle\n\nprint(__doc__)", "Load forward solution and inverse operator\nWe need a matching forward solution and inverse operator to compute\nresolution matrices for different methods.", "data_path = sample.data_path()\nsubjects_dir = data_path + '/subjects'\nfname_fwd = data_path + '/MEG/sample/sample_audvis-meg-eeg-oct-6-fwd.fif'\nfname_inv = data_path + '/MEG/sample/sample_audvis-meg-oct-6-meg-fixed-inv.fif'\nforward = mne.read_forward_solution(fname_fwd)\n# Convert forward solution to fixed source orientations\nmne.convert_forward_solution(\n forward, surf_ori=True, force_fixed=True, copy=False)\ninverse_operator = read_inverse_operator(fname_inv)\n\n# Compute resolution matrices for MNE\nrm_mne = make_inverse_resolution_matrix(forward, inverse_operator,\n method='MNE', lambda2=1. / 3.**2)\nsrc = inverse_operator['src']\ndel forward, inverse_operator # save memory", "Read and organise labels for cortical parcellation\nGet labels for FreeSurfer 'aparc' cortical parcellation with 34 labels/hemi", "labels = mne.read_labels_from_annot('sample', parc='aparc',\n subjects_dir=subjects_dir)\nn_labels = len(labels)\nlabel_colors = [label.color for label in labels]\n# First, we reorder the labels based on their location in the left hemi\nlabel_names = [label.name for label in labels]\nlh_labels = [name for name in label_names if name.endswith('lh')]\n\n# Get the y-location of the label\nlabel_ypos = list()\nfor name in lh_labels:\n idx = label_names.index(name)\n ypos = np.mean(labels[idx].pos[:, 1])\n label_ypos.append(ypos)\n\n# Reorder the labels based on their location\nlh_labels = [label for (yp, label) in sorted(zip(label_ypos, lh_labels))]\n\n# For the right hemi\nrh_labels = [label[:-2] + 'rh' for label in lh_labels]", "Compute point-spread function summaries (PCA) for all labels\nWe summarise the PSFs per label by their first five principal components, and\nuse the first component to evaluate label-to-label leakage below.", "# Compute first PCA component across PSFs within labels.\n# Note the differences in explained variance, probably due to different\n# spatial extents of labels.\nn_comp = 5\nstcs_psf_mne, pca_vars_mne = get_point_spread(\n rm_mne, src, labels, mode='pca', n_comp=n_comp, norm=None,\n return_pca_vars=True)\nn_verts = rm_mne.shape[0]\ndel rm_mne", "We can show the explained variances of principal components per label. Note\nhow they differ across labels, most likely due to their varying spatial\nextent.", "with np.printoptions(precision=1):\n for [name, var] in zip(label_names, pca_vars_mne):\n print(f'{name}: {var.sum():.1f}% {var}')", "The output shows the summed variance explained by the first five principal\ncomponents as well as the explained variances of the individual components.\nEvaluate leakage based on label-to-label PSF correlations\nNote that correlations ignore the overall amplitude of PSFs, i.e. they do\nnot show which region will potentially be the bigger \"leaker\".", "# get PSFs from Source Estimate objects into matrix\npsfs_mat = np.zeros([n_labels, n_verts])\n# Leakage matrix for MNE, get first principal component per label\nfor [i, s] in enumerate(stcs_psf_mne):\n psfs_mat[i, :] = s.data[:, 0]\n# Compute label-to-label leakage as Pearson correlation of PSFs\n# Sign of correlation is arbitrary, so take absolute values\nleakage_mne = np.abs(np.corrcoef(psfs_mat))\n\n# Save the plot order and create a circular layout\nnode_order = lh_labels[::-1] + rh_labels # mirror label order across hemis\nnode_angles = circular_layout(label_names, node_order, start_pos=90,\n group_boundaries=[0, len(label_names) / 2])\n# Plot the graph using node colors from the FreeSurfer parcellation. We only\n# show the 200 strongest connections.\nfig = plt.figure(num=None, figsize=(8, 8), facecolor='black')\nplot_connectivity_circle(leakage_mne, label_names, n_lines=200,\n node_angles=node_angles, node_colors=label_colors,\n title='MNE Leakage', fig=fig)", "Most leakage occurs for neighbouring regions, but also for deeper regions\nacross hemispheres.\nSave the figure (optional)\nMatplotlib controls figure facecolor separately for interactive display\nversus for saved figures. Thus when saving you must specify facecolor,\nelse your labels, title, etc will not be visible::\n&gt;&gt;&gt; fname_fig = data_path + '/MEG/sample/plot_label_leakage.png'\n&gt;&gt;&gt; fig.savefig(fname_fig, facecolor='black')\n\nPlot PSFs for individual labels\nLet us confirm for left and right lateral occipital lobes that there is\nindeed no leakage between them, as indicated by the correlation graph.\nWe can plot the summary PSFs for both labels to examine the spatial extent of\ntheir leakage.", "# left and right lateral occipital\nidx = [22, 23]\nstc_lh = stcs_psf_mne[idx[0]]\nstc_rh = stcs_psf_mne[idx[1]]\n\n# Maximum for scaling across plots\nmax_val = np.max([stc_lh.data, stc_rh.data])", "Point-spread function for the lateral occipital label in the left hemisphere", "brain_lh = stc_lh.plot(subjects_dir=subjects_dir, subject='sample',\n hemi='both', views='caudal',\n clim=dict(kind='value',\n pos_lims=(0, max_val / 2., max_val)))\nbrain_lh.add_text(0.1, 0.9, label_names[idx[0]], 'title', font_size=16)", "and in the right hemisphere.", "brain_rh = stc_rh.plot(subjects_dir=subjects_dir, subject='sample',\n hemi='both', views='caudal',\n clim=dict(kind='value',\n pos_lims=(0, max_val / 2., max_val)))\nbrain_rh.add_text(0.1, 0.9, label_names[idx[1]], 'title', font_size=16)", "Both summary PSFs are confined to their respective hemispheres, indicating\nthat there is indeed low leakage between these two regions." ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
tensorflow/docs
site/en/guide/dtensor_overview.ipynb
apache-2.0
[ "Copyright 2019 The TensorFlow Authors.", "#@title Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.", "DTensor Concepts\n<table class=\"tfo-notebook-buttons\" align=\"left\">\n <td>\n <a target=\"_blank\" href=\"https://www.tensorflow.org/guide/dtensor_overview\"><img src=\"https://www.tensorflow.org/images/tf_logo_32px.png\" />View on TensorFlow.org</a>\n </td>\n <td>\n <a target=\"_blank\" href=\"https://colab.research.google.com/github/tensorflow/docs/blob/master/site/en/guide/dtensor_overview.ipynb\"><img src=\"https://www.tensorflow.org/images/colab_logo_32px.png\" />Run in Google Colab</a>\n </td>\n <td>\n <a target=\"_blank\" href=\"https://github.com/tensorflow/docs/blob/master/site/en/guide/dtensor_overview.ipynb\"><img src=\"https://www.tensorflow.org/images/GitHub-Mark-32px.png\" />View source on GitHub</a>\n </td>\n <td>\n <a href=\"https://storage.googleapis.com/tensorflow_docs/docs/site/en/guide/dtensor_overview.ipynb\"><img src=\"https://www.tensorflow.org/images/download_logo_32px.png\" />Download notebook</a>\n </td>\n</table>\n\nOverview\nThis colab introduces DTensor, an extension to TensorFlow for synchronous distributed computing.\nDTensor provides a global programming model that allows developers to compose applications that operate on Tensors globally while managing the distribution across devices internally. DTensor distributes the program and tensors according to the sharding directives through a procedure called Single program, multiple data (SPMD) expansion.\nBy decoupling the application from sharding directives, DTensor enables running the same application on a single device, multiple devices, or even multiple clients, while preserving its global semantics.\nThis guide introduces DTensor concepts for distributed computing, and how DTensor integrates with TensorFlow. To see a demo of using DTensor in model training, see Distributed training with DTensor tutorial.\nSetup\nDTensor is part of TensorFlow 2.9.0 release, and also included in the TensorFlow nightly builds since 04/09/2022.", "!pip install --quiet --upgrade --pre tensorflow", "Once installed, import tensorflow and tf.experimental.dtensor. Then configure TensorFlow to use 6 virtual CPUs.\nEven though this example uses vCPUs, DTensor works the same way on CPU, GPU or TPU devices.", "import tensorflow as tf\nfrom tensorflow.experimental import dtensor\n\nprint('TensorFlow version:', tf.__version__)\n\ndef configure_virtual_cpus(ncpu):\n phy_devices = tf.config.list_physical_devices('CPU')\n tf.config.set_logical_device_configuration(phy_devices[0], [\n tf.config.LogicalDeviceConfiguration(),\n ] * ncpu)\n\nconfigure_virtual_cpus(6)\nDEVICES = [f'CPU:{i}' for i in range(6)]\n\ntf.config.list_logical_devices('CPU')", "DTensor's model of distributed tensors\nDTensor introduces two concepts: dtensor.Mesh and dtensor.Layout. They are abstractions to model the sharding of tensors across topologically related devices.\n\nMesh defines the device list for computation.\nLayout defines how to shard the Tensor dimension on a Mesh.\n\nMesh\nMesh represents a logical Cartisian topology of a set of devices. Each dimension of the Cartisian grid is called a Mesh dimension, and referred to with a name. Names of mesh dimension within the same Mesh must be unique.\nNames of mesh dimensions are referenced by Layout to describe the sharding behavior of a tf.Tensor along each of its axes. This is described in more detail later in the section on Layout.\nMesh can be thought of as a multi-dimensional array of devices.\nIn a 1 dimensional Mesh, all devices form a list in a single mesh dimension. The following example uses dtensor.create_mesh to create a mesh from 6 CPU devices along a mesh dimension 'x' with a size of 6 devices:\n<img src=\"https://www.tensorflow.org/images/dtensor/dtensor_mesh_1d.png\" alt=\"A 1 dimensional mesh with 6 CPUs\" class=\"no-filter\">", "mesh_1d = dtensor.create_mesh([('x', 6)], devices=DEVICES)\nprint(mesh_1d)", "A Mesh can be multi dimensional as well. In the following example, 6 CPU devices form a 3x2 mesh, where the 'x' mesh dimension has a size of 3 devices, and the 'y' mesh dimension has a size of 2 devices:\n<img src=\"https://www.tensorflow.org/images/dtensor/dtensor_mesh_2d.png\" alt=\"A 2 dimensional mesh with 6 CPUs\"\n class=\"no-filter\">", "mesh_2d = dtensor.create_mesh([('x', 3), ('y', 2)], devices=DEVICES)\nprint(mesh_2d)", "Layout\nLayout specifies how a tensor is distributed, or sharded, on a Mesh.\nNote: In order to avoid confusions between Mesh and Layout, the term dimension is always associated with Mesh, and the term axis with Tensor and Layout in this guide.\nThe rank of Layout should be the same as the rank of the Tensor where the Layout is applied. For each of the Tensor's axes the Layout may specify a mesh dimension to shard the tensor across, or specify the axis as \"unsharded\".\nThe tensor is replicated across any mesh dimensions that it is not sharded across.\nThe rank of a Layout and the number of dimensions of a Mesh do not need to match. The unsharded axes of a Layout do not need to be associated to a mesh dimension, and unsharded mesh dimensions do not need to be associated with a layout axis.\n<img src=\"https://www.tensorflow.org/images/dtensor/dtensor_components_diag.png\" alt=\"Diagram of dtensor components.\"\n class=\"no-filter\">\nLet's analyze a few examples of Layout for the Mesh's created in the previous section.\nOn a 1-dimensional mesh such as [(\"x\", 6)] (mesh_1d in the previous section), Layout([\"unsharded\", \"unsharded\"], mesh_1d) is a layout for a rank-2 tensor replicated across 6 devices.\n<img src=\"https://www.tensorflow.org/images/dtensor/dtensor_layout_replicated.png\" alt=\"A tensor replicated across a rank-1 mesh\" class=\"no-filter\">", "layout = dtensor.Layout([dtensor.UNSHARDED, dtensor.UNSHARDED], mesh_1d)", "Using the same tensor and mesh the layout Layout(['unsharded', 'x']) would shard the second axis of the tensor across the 6 devices.\n<img src=\"https://www.tensorflow.org/images/dtensor/dtensor_layout_rank1.png\" alt=\"A tensor sharded across a rank-1 mesh\" class=\"no-filter\">", "layout = dtensor.Layout([dtensor.UNSHARDED, 'x'], mesh_1d)", "Given a 2-dimensional 3x2 mesh such as [(\"x\", 3), (\"y\", 2)], (mesh_2d from the previous section), Layout([\"y\", \"x\"], mesh_2d) is a layout for a rank-2 Tensor whose first axis is sharded across across mesh dimension \"y\", and whose second axis is sharded across mesh dimension \"x\".\n<img src=\"https://www.tensorflow.org/images/dtensor/dtensor_layout_rank2.png\" alt=\"A tensorr with it's first axis sharded across mesh dimension 'y' and it's second axis sharded across mesh dimension 'x'\" class=\"no-filter\">", "layout = dtensor.Layout(['y', 'x'], mesh_2d)", "For the same mesh_2d, the layout Layout([\"x\", dtensor.UNSHARDED], mesh_2d) is a layout for a rank-2 Tensor that is replicated across \"y\", and whose first axis is sharded on mesh dimension x.\n<img src=\"https://www.tensorflow.org/images/dtensor/dtensor_layout_hybrid.png\" alt=\"A tensor replicated across mesh-dimension y, with it's first axis sharded across mesh dimension 'x'\" class=\"no-filter\">", "layout = dtensor.Layout([\"x\", dtensor.UNSHARDED], mesh_2d)", "Single-Client and Multi-Client Applications\nDTensor supports both single-client and multi-client applications. The colab Python kernel is an example of a single client DTensor application, where there is a single Python process.\nIn a multi-client DTensor application, multiple Python processes collectively perform as a coherent application. The Cartisian grid of a Mesh in a multi-client DTensor application can span across devices regardless of whether they are attached locally to the current client or attached remotely to another client. The set of all devices used by a Mesh are called the global device list.\nThe creation of a Mesh in a multi-client DTensor application is a collective operation where the global device list is identicial for all of the participating clients, and the creation of the Mesh serves as a global barrier.\nDuring Mesh creation, each client provides its local device list together with the expected global device list. DTensor validates that both lists are consistent. Please refer to the API documentation for dtensor.create_mesh and dtensor.create_distributed_mesh\n for more information on multi-client mesh creation and the global device list.\nSingle-client can be thought of as a special case of multi-client, with 1 client. In a single-client application, the global device list is identical to the local device list.\nDTensor as a sharded tensor\nNow let's start coding with DTensor. The helper function, dtensor_from_array, demonstrates creating DTensors from something that looks like a tf.Tensor. The function performs 2 steps:\n - Replicates the tensor to every device on the mesh.\n - Shards the copy according to the layout requested in its arguments.", "def dtensor_from_array(arr, layout, shape=None, dtype=None):\n \"\"\"Convert a DTensor from something that looks like an array or Tensor.\n\n This function is convenient for quick doodling DTensors from a known,\n unsharded data object in a single-client environment. This is not the\n most efficient way of creating a DTensor, but it will do for this\n tutorial.\n \"\"\"\n if shape is not None or dtype is not None:\n arr = tf.constant(arr, shape=shape, dtype=dtype)\n\n # replicate the input to the mesh\n a = dtensor.copy_to_mesh(arr,\n layout=dtensor.Layout.replicated(layout.mesh, rank=layout.rank))\n # shard the copy to the desirable layout\n return dtensor.relayout(a, layout=layout)", "Anatomy of a DTensor\nA DTensor is a tf.Tensor object, but augumented with the Layout annotation that defines its sharding behavior. A DTensor consists of the following:\n\nGlobal tensor meta-data, including the global shape and dtype of the tensor.\nA Layout, which defines the Mesh the Tensor belongs to, and how the Tensor is sharded onto the Mesh.\nA list of component tensors, one item per local device in the Mesh.\n\nWith dtensor_from_array, you can create your first DTensor, my_first_dtensor, and examine its contents.", "mesh = dtensor.create_mesh([(\"x\", 6)], devices=DEVICES)\nlayout = dtensor.Layout([dtensor.UNSHARDED], mesh)\n\nmy_first_dtensor = dtensor_from_array([0, 1], layout)\n\n# Examine the dtensor content\nprint(my_first_dtensor)\nprint(\"global shape:\", my_first_dtensor.shape)\nprint(\"dtype:\", my_first_dtensor.dtype)", "Layout and fetch_layout\nThe layout of a DTensor is not a regular attribute of tf.Tensor. Instead, DTensor provides a function, dtensor.fetch_layout to access the layout of a DTensor.", "print(dtensor.fetch_layout(my_first_dtensor))\nassert layout == dtensor.fetch_layout(my_first_dtensor)", "Component tensors, pack and unpack\nA DTensor consists of a list of component tensors. The component tensor for a device in the Mesh is the Tensor object representing the piece of the global DTensor that is stored on this device.\nA DTensor can be unpacked into component tensors through dtensor.unpack. You can make use of dtensor.unpack to inspect the components of the DTensor, and confirm they are on all devices of the Mesh.\nNote that the positions of component tensors in the global view may overlap each other. For example, in the case of a fully replicated layout, all components are identical replicas of the global tensor.", "for component_tensor in dtensor.unpack(my_first_dtensor):\n print(\"Device:\", component_tensor.device, \",\", component_tensor)", "As shown, my_first_dtensor is a tensor of [0, 1] replicated to all 6 devices.\nThe inverse operation of dtensor.unpack is dtensor.pack. Component tensors can be packed back into a DTensor.\nThe components must have the same rank and dtype, which will be the rank and dtype of the returned DTensor. However there is no strict requirement on the device placement of component tensors as inputs of dtensor.unpack: the function will automatically copy the component tensors to their respective corresponding devices.", "packed_dtensor = dtensor.pack(\n [[0, 1], [0, 1], [0, 1],\n [0, 1], [0, 1], [0, 1]],\n layout=layout\n)\nprint(packed_dtensor)", "Sharding a DTensor to a Mesh\nSo far you've worked with the my_first_dtensor, which is a rank-1 DTensor fully replicated across a dim-1 Mesh.\nNext create and inspect DTensors that are sharded across a dim-2 Mesh. The next example does this with a 3x2 Mesh on 6 CPU devices, where size of mesh dimension 'x' is 3 devices, and size of mesh dimension'y' is 2 devices.", "mesh = dtensor.create_mesh([(\"x\", 3), (\"y\", 2)], devices=DEVICES)", "Fully sharded rank-2 Tensor on a dim-2 Mesh\nCreate a 3x2 rank-2 DTensor, sharding its first axis along the 'x' mesh dimension, and its second axis along the 'y' mesh dimension.\n\nBecause the tensor shape equals to the mesh dimension along all of the sharded axes, each device receives a single element of the DTensor.\nThe rank of the component tensor is always the same as the rank of the global shape. DTensor adopts this convention as a simple way to preserve information for locating the relation between a component tensor and the global DTensor.", "fully_sharded_dtensor = dtensor_from_array(\n tf.reshape(tf.range(6), (3, 2)),\n layout=dtensor.Layout([\"x\", \"y\"], mesh))\n\nfor raw_component in dtensor.unpack(fully_sharded_dtensor):\n print(\"Device:\", raw_component.device, \",\", raw_component)", "Fully replicated rank-2 Tensor on a dim-2 Mesh\nFor comparison, create a 3x2 rank-2 DTensor, fully replicated to the same dim-2 Mesh.\n\nBecause the DTensor is fully replicated, each device receives a full replica of the 3x2 DTensor.\nThe rank of the component tensors are the same as the rank of the global shape -- this fact is trivial, because in this case, the shape of the component tensors are the same as the global shape anyway.", "fully_replicated_dtensor = dtensor_from_array(\n tf.reshape(tf.range(6), (3, 2)),\n layout=dtensor.Layout([dtensor.UNSHARDED, dtensor.UNSHARDED], mesh))\n# Or, layout=tensor.Layout.fully_replicated(mesh, rank=2)\n\nfor component_tensor in dtensor.unpack(fully_replicated_dtensor):\n print(\"Device:\", component_tensor.device, \",\", component_tensor)", "Hybrid rank-2 Tensor on a dim-2 Mesh\nWhat about somewhere between fully sharded and fully replicated?\nDTensor allows a Layout to be a hybrid, sharded along some axes, but replicated along others.\nFor example, you can shard the same 3x2 rank-2 DTensor in the following way:\n\n1st axis sharded along the 'x' mesh dimension.\n2nd axis replicated along the 'y' mesh dimension.\n\nTo achieve this sharding scheme, you just need to replace the sharding spec of the 2nd axis from 'y' to dtensor.UNSHARDED, to indicate your intention of replicating along the 2nd axis. The layout object will look like Layout(['x', dtensor.UNSHARDED], mesh).", "hybrid_sharded_dtensor = dtensor_from_array(\n tf.reshape(tf.range(6), (3, 2)),\n layout=dtensor.Layout(['x', dtensor.UNSHARDED], mesh))\n\nfor component_tensor in dtensor.unpack(hybrid_sharded_dtensor):\n print(\"Device:\", component_tensor.device, \",\", component_tensor)", "You can inspect the component tensors of the created DTensor and verify they are indeed sharded according to your scheme. It may be helpful to illustrate the situation with a chart:\n<img src=\"https://www.tensorflow.org/images/dtensor/dtensor_hybrid_mesh.png\" alt=\"A 3x2 hybrid mesh with 6 CPUs\"\n class=\"no-filter\" width=75%>\nTensor.numpy() and sharded DTensor\nBe aware that calling the .numpy() method on a sharded DTensor raises an error. The rationale for erroring is to protect against unintended gathering of data from multiple computing devices to the host CPU device backing the returned numpy array.", "print(fully_replicated_dtensor.numpy())\n\ntry:\n fully_sharded_dtensor.numpy()\nexcept tf.errors.UnimplementedError:\n print(\"got an error as expected for fully_sharded_dtensor\")\n\ntry:\n hybrid_sharded_dtensor.numpy()\nexcept tf.errors.UnimplementedError:\n print(\"got an error as expected for hybrid_sharded_dtensor\")", "TensorFlow API on DTensor\nDTensor strives to be a drop-in replacement for tensor in your program. The TensorFlow Python API that consume tf.Tensor, such as the Ops library functions, tf.function, tf.GradientTape, also work with DTensor.\nTo accomplish this, for each TensorFlow Graph, DTensor produces and executes an equivalent SPMD graph in a procedure called SPMD expansion. A few critical steps in DTensor SPMD expansion are:\n\nPropagating the sharding Layout of DTensor in the TensorFlow graph\nRewriting TensorFlow Ops on the global DTensor with equivalent TensorFlow Ops on the component tensors, inserting collective and communication Ops when necessary\nLowering backend neutral TensorFlow Ops to backend specific TensorFlow Ops.\n\nThe final result is that DTensor is a drop-in replacement for Tensor.\nNote: DTensor is still an experimental API which means you will be exploring and pushing the boundaries and limits of the DTensor programming model.\nThere are 2 ways of triggering DTensor execution:\n - DTensor as operands of a Python function, e.g. tf.matmul(a, b) will run through DTensor if a, b, or both are DTensors.\n - Requesting the result of a Python function to be a DTensor, e.g. dtensor.call_with_layout(tf.ones, layout, shape=(3, 2)) will run through DTensor because we requested the output of tf.ones to be sharded according to a layout.\nDTensor as Operands\nMany TensorFlow API functions take tf.Tensor as their operands, and returns tf.Tensor as their results. For these functions, you can express intention to run a function through DTensor by passing in DTensor as operands. This section uses tf.matmul(a, b) as an example.\nFully replicated input and output\nIn this case, the DTensors are fully replicated. On each of the devices of the Mesh,\n - the component tensor for operand a is [[1, 2, 3], [4, 5, 6]] (2x3)\n - the component tensor for operand b is [[6, 5], [4, 3], [2, 1]] (3x2)\n - the computation consists of a single MatMul of (2x3, 3x2) -&gt; 2x2,\n - the component tensor for result c is [[20, 14], [56,41]] (2x2)\nTotal number of floating point mul operations is 6 device * 4 result * 3 mul = 72.", "mesh = dtensor.create_mesh([(\"x\", 6)], devices=DEVICES)\nlayout = dtensor.Layout([dtensor.UNSHARDED, dtensor.UNSHARDED], mesh)\na = dtensor_from_array([[1, 2, 3], [4, 5, 6]], layout=layout)\nb = dtensor_from_array([[6, 5], [4, 3], [2, 1]], layout=layout)\n\nc = tf.matmul(a, b) # runs 6 identical matmuls in parallel on 6 devices\n\n# `c` is a DTensor replicated on all devices (same as `a` and `b`)\nprint('Sharding spec:', dtensor.fetch_layout(c).sharding_specs)\nprint(\"components:\")\nfor component_tensor in dtensor.unpack(c):\n print(component_tensor.device, component_tensor.numpy())\n", "Sharding operands along the contracted axis\nYou can reduce the amount of computation per device by sharding the operands a and b. A popular sharding scheme for tf.matmul is to shard the operands along the axis of the contraction, which means sharding a along the second axis, and b along the first axis.\nThe global matrix product sharded under this scheme can be performed efficiently, by local matmuls that runs concurrently, followed by a collective reduction to aggregate the local results. This is also the canonical way of implementing a distributed matrix dot product.\nTotal number of floating point mul operations is 6 devices * 4 result * 1 = 24, a factor of 3 reduction compared to the fully replicated case (72) above. The factor of 3 is due to the sharing along x mesh dimension with a size of 3 devices.\nThe reduction of the number of operations run sequentially is the main mechansism with which synchronuous model parallelism accelerates training.", "mesh = dtensor.create_mesh([(\"x\", 3), (\"y\", 2)], devices=DEVICES)\na_layout = dtensor.Layout([dtensor.UNSHARDED, 'x'], mesh)\na = dtensor_from_array([[1, 2, 3], [4, 5, 6]], layout=a_layout)\nb_layout = dtensor.Layout(['x', dtensor.UNSHARDED], mesh)\nb = dtensor_from_array([[6, 5], [4, 3], [2, 1]], layout=b_layout)\n\nc = tf.matmul(a, b)\n# `c` is a DTensor replicated on all devices (same as `a` and `b`)\nprint('Sharding spec:', dtensor.fetch_layout(c).sharding_specs)", "Additional Sharding\nYou can perform additional sharding on the inputs, and they are appropriately carried over to the results. For example, you can apply additional sharding of operand a along its first axis to the 'y' mesh dimension. The additional sharding will be carried over to the first axis of the result c.\nTotal number of floating point mul operations is 6 devices * 2 result * 1 = 12, an additional factor of 2 reduction compared to the case (24) above. The factor of 2 is due to the sharing along y mesh dimension with a size of 2 devices.", "mesh = dtensor.create_mesh([(\"x\", 3), (\"y\", 2)], devices=DEVICES)\n\na_layout = dtensor.Layout(['y', 'x'], mesh)\na = dtensor_from_array([[1, 2, 3], [4, 5, 6]], layout=a_layout)\nb_layout = dtensor.Layout(['x', dtensor.UNSHARDED], mesh)\nb = dtensor_from_array([[6, 5], [4, 3], [2, 1]], layout=b_layout)\n\nc = tf.matmul(a, b)\n# The sharding of `a` on the first axis is carried to `c'\nprint('Sharding spec:', dtensor.fetch_layout(c).sharding_specs)\nprint(\"components:\")\nfor component_tensor in dtensor.unpack(c):\n print(component_tensor.device, component_tensor.numpy())", "DTensor as Output\nWhat about Python functions that do not take operands, but returns a Tensor result that can be sharded? Examples of such functions are\n\ntf.ones, tf.zeros, tf.random.stateless_normal,\n\nFor these Python functions, DTensor provides dtensor.call_with_layout which eagelry executes a Python function with DTensor, and ensures that the returned Tensor is a DTensor with the requested Layout.", "help(dtensor.call_with_layout)", "The eagerly executed Python function usually only contain a single non-trivial TensorFlow Op.\nTo use a Python function that emits multiple TensorFlow Ops with dtensor.call_with_layout, the function should be converted to a tf.function. Calling a tf.function is a single TensorFlow Op. When the tf.function is called, DTensor can perform layout propagation when it analyzes the computing graph of the tf.function, before any of the intermediate tensors are materialized.\nAPIs that emit a single TensorFlow Op\nIf a function emits a single TensorFlow Op, you can directly apply dtensor.call_with_layout to the function.", "help(tf.ones)\n\nmesh = dtensor.create_mesh([(\"x\", 3), (\"y\", 2)], devices=DEVICES)\nones = dtensor.call_with_layout(tf.ones, dtensor.Layout(['x', 'y'], mesh), shape=(6, 4))\nprint(ones)", "APIs that emit multiple TensorFlow Ops\nIf the API emits multiple TensorFlow Ops, convert the function into a single Op through tf.function. For example tf.random.stateleess_normal", "help(tf.random.stateless_normal)\n\nones = dtensor.call_with_layout(\n tf.function(tf.random.stateless_normal),\n dtensor.Layout(['x', 'y'], mesh),\n shape=(6, 4),\n seed=(1, 1))\nprint(ones)", "Wrapping a Python function that emits a single TensorFlow Op with tf.function is allowed. The only caveat is paying the associated cost and complexity of creating a tf.function from a Python function.", "ones = dtensor.call_with_layout(\n tf.function(tf.ones),\n dtensor.Layout(['x', 'y'], mesh),\n shape=(6, 4))\nprint(ones)", "From tf.Variable to dtensor.DVariable\nIn Tensorflow, tf.Variable is the holder for a mutable Tensor value.\nWith DTensor, the corresponding variable semantics is provided by dtensor.DVariable.\nThe reason a new type DVariable was introduced for DTensor variable is because DVariables have an additional requirement that the layout cannot change from its initial value.", "mesh = dtensor.create_mesh([(\"x\", 6)], devices=DEVICES)\nlayout = dtensor.Layout([dtensor.UNSHARDED, dtensor.UNSHARDED], mesh)\n\nv = dtensor.DVariable(\n initial_value=dtensor.call_with_layout(\n tf.function(tf.random.stateless_normal),\n layout=layout,\n shape=tf.TensorShape([64, 32]),\n seed=[1, 1],\n dtype=tf.float32))\n\nprint(v.handle)\nassert layout == dtensor.fetch_layout(v)", "Other than the requirement on matching the layout, a DVariable behaves the same as a tf.Variable. For example, you can add a DVariable to a DTensor,", "a = dtensor.call_with_layout(tf.ones, layout=layout, shape=(64, 32))\nb = v + a # add DVariable and DTensor\nprint(b)", "You can also assign a DTensor to a DVariable.", "v.assign(a) # assign a DTensor to a DVariable\nprint(a)", "Attempting to mutate the layout of a DVariable, by assigning a DTensor with an incompatible layout produces an error.", "# variable's layout is immutable.\nanother_mesh = dtensor.create_mesh([(\"x\", 3), (\"y\", 2)], devices=DEVICES)\nb = dtensor.call_with_layout(tf.ones,\n layout=dtensor.Layout([dtensor.UNSHARDED, dtensor.UNSHARDED], another_mesh),\n shape=(64, 32))\ntry:\n v.assign(b)\nexcept:\n print(\"exception raised\")", "What's next?\nIn this colab, you learned about DTensor, an extension to TensorFlow for distributed computing. To try out these concepts in a tutorial, see Distributed training with DTensor." ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
jbpoline/cnv_analysis
CNV_dangerosite.ipynb
artistic-2.0
[ "%pylab inline\n\nimport numpy as np\nimport scipy.stats as sst\nimport matplotlib.pyplot as plt\nimport os\nimport os.path as osp\nfrom __future__ import print_function\nfrom __future__ import division\nimport six\nimport cnv_util as util\nfrom datetime import datetime\nreload(util)", "Reading TSV files", "CWD = osp.join(osp.expanduser('~'), 'documents','grants_projects','roberto_projects', \\\n 'guillaume_huguet_CNV','File_OK')\nfilename = 'Imagen_QC_CIA_MMAP_V2_Annotation.tsv'\nfullfname = osp.join(CWD, filename)\n\narr = np.loadtxt(fullfname, dtype='str', comments=None, delimiter='\\Tab', \n converters=None, skiprows=0, usecols=None, unpack=False, ndmin=0)\n\nEXPECTED_LINES = 19542\nexpected_nb_values = EXPECTED_LINES - 1 \nassert arr.shape[0] == EXPECTED_LINES\nline0 = arr[0].split('\\t')\nprint(line0)\n\ndanger = 'Pvalue_MMAP_V2_sans_intron_and_Intergenic'\nscore = 'SCORE'\ni_danger = line0.index(danger)\ni_score = line0.index(score)\nprint(i_danger, i_score)\n\n# check that all lines have the same number of tab separated elements \nlarr = np.asarray([len(arr[i].split('\\t')) for i in range(arr.shape[0])])\nassert not (larr - larr[0]).any() # all element have the same value \n\ndangers = np.asarray([line.split('\\t')[i_danger] for line in arr[1:]])\nscores = np.asarray([line.split('\\t')[i_score] for line in arr[1:]])\n# print(np.unique(scores))\n\nassert len(dangers) == expected_nb_values\nassert len(scores) == expected_nb_values\n\n", "transforming the \"Pvalue_MMAP_V2_...\" into danger score\nTesting the function danger_score", "assert util._test_danger_score_1()\nassert util._test_danger_score()", "QUESTION pour Guillaume: \na quoi correspondent les '' dans la colonne \"Pvalue_MMAP_V2_sans_intron_and_Intergenic\" (danger)?\nAnsewer: cnv for which we have no dangerosity information", "\"\"\"\ndanger_not_empty = dangers != ''\ndanger_scores = dangers[danger_not_empty]\ndanger_scores = np.asarray([util.danger_score(pstr, util.pH1_with_apriori) \n for pstr in danger_scores])\n\"\"\";", "To be or not to be a CNV: p value from the 'SCORE' column", "reload(util)\n#get the scores\nscores = np.asarray([line.split('\\t')[i_score] for line in arr[1:]])\nassert len(scores) == expected_nb_values\nprint(len(np.unique(scores)))\n#tmp_score = np.asarray([util.str2floats(s, comma2point=True, sep=' ')[0] for s in scores])\nassert scores.shape[0] == EXPECTED_LINES - 1 \n\n\n# h = plt.hist(tmp[tmp > sst.scoreatpercentile(tmp, 99)], bins=100)\n# h = plt.hist(tmp[tmp < 50], bins=100)\n\n\"\"\"\nprint(\"# CNV with score == 0.: \", (tmp==0.).sum())\nprint(\"# CNV with score >=15 < 17.5 : \", np.logical_and(tmp >= 15., tmp < 17.5).sum())\ntmp.max()\n\"\"\";", "Replace the zero score by the maximum score: cf Guillaume's procedure", "scoresf = np.asarray([util.str2floats(s, comma2point=True, sep=' ')[0] \n for s in scores])\nprint(scoresf.max(), scoresf.min(),(scoresf==0).sum())\n#clean_score = util.process_scores(scores)\n#h = plt.hist(clean_score[clean_score < 60], bins=100)\n#h = plt.hist(scoresf[scoresf < 60], bins=100)\nh = plt.hist(scoresf, bins=100, range=(0,150))\n", "Transforms the scores into P(cnv is real)", "# Creating a function from score to proba from Guillaume's values\np_cnv = util._build_dict_prob_cnv()\n#print(p_cnv.keys())\n#print(p_cnv.values())\n\n#### Definition with a piecewise linear function\n#score2prob = util.create_score2prob_lin_piecewise(p_cnv)\n#scores = np.arange(15,50,1)\n#probs = [score2prob(sc) for sc in scores]\n#plt.plot(scores, probs)\n\n#### Definition with a corrected regression line\nscore2prob = util.create_score2prob_lin(p_cnv)\n#x = np.arange(0,50,1)\n#plt.plot(x, [score2prob(_) for _ in x], '-', p_cnv.keys(), p_cnv.values(), '+')\n\np_scores = [score2prob(sc) for sc in clean_score]\nassert len(p_scores) == EXPECTED_LINES -1 ", "Finally, putting things together", "# re-loading \nreload(util)\nCWD = osp.join(osp.expanduser('~'), 'documents','grants_projects','roberto_projects', \\\n 'guillaume_huguet_CNV','File_OK')\nfilename = 'Imagen_QC_CIA_MMAP_V2_Annotation.tsv'\nfullfname = osp.join(CWD, filename)\n\n# in numpy array\narr = np.loadtxt(fullfname, dtype='str', comments=None, delimiter='\\Tab', \n converters=None, skiprows=0, usecols=None, unpack=False, ndmin=0)\n\nline0 = arr[0].split('\\t')\n\ni_DANGER = line0.index('Pvalue_MMAP_V2_sans_intron_and_Intergenic')\ni_SCORE = line0.index('SCORE')\ni_START = line0.index('START')\ni_STOP = line0.index('STOP')\ni_5pGENE = line0.index(\"5'gene\")\ni_3pGENE = line0.index(\"3'gene\")\ni_5pDIST = line0.index(\"5'dist(kb)\")\ni_3pDIST = line0.index(\"3'dist(kb)\")\n#i_LOC = line0.index('Location')\n\nscores = np.asarray([line.split('\\t')[i_SCORE] for line in arr[1:]])\nclean_score = util.process_scores(scores)\nmax_score = clean_score.max()\n\nprint(line0)\n\n#names_from = ['START', 'STOP', \"5'gene\", \"3'gene\", \"5'dist(kb)\", \"3'dist(kb)\"]\n\n#---------- ligne uniques:\nnames_from = ['IID_projet', 'IID_genotype', \"CHR de Merge_CIA_610_660_QC\", 'START', 'STOP'] \ncnv_names = util.make_uiid(arr, names_from)\nprint(\"with names from: \", names_from)\nprint(\"we have {} unique elements out of {} rows in the tsv\".format(\n len(np.unique(cnv_names)), len(cnv_names)))\n\n#---------- CNV uniques ? \nnames_from = [\"CHR de Merge_CIA_610_660_QC\", 'START', 'STOP'] \ncnv_names = util.make_uiid(arr, names_from)\nprint(\"with names from: \", names_from)\nprint(\"we have {} unique elements out of {} rows in the tsv\".format(\n len(np.unique(cnv_names)), len(cnv_names)))\n\n#---------- sujets uniques ? \nnames_from = ['IID_projet'] # , 'IID_genotype'] \ncnv_names = util.make_uiid(arr, names_from)\nprint(\"with names from: \", names_from)\nprint(\"we have {} unique elements out of {} rows in the tsv\".format(\n len(np.unique(cnv_names)), len(cnv_names)))\n\ndangers = np.asarray([line.split('\\t')[i_DANGER] for line in arr[1:]])\nscores = np.asarray([line.split('\\t')[i_SCORE] for line in arr[1:]])\n\n#danger_not_empty = dangers != ''\n#print(danger_not_empty.sum())\n#print(len(np.unique(cnv_name)))\n#print(cnv_name[:10])", "Create a dict of the cnv", "from collections import OrderedDict\ncnv = OrderedDict()\nnames_from = [\"CHR de Merge_CIA_610_660_QC\", 'START', 'STOP'] \n #, \"5'gene\", \"3'gene\", \"5'dist(kb)\", \"3'dist(kb)\"]\nblank_dgr = 0\n\nfor line in arr[1:]:\n lline = line.split('\\t')\n dgr = lline[i_DANGER]\n scr = lline[i_SCORE]\n cnv_iid = util.make_uiid(line, names_from, arr[0])\n \n if dgr != '':\n add_cnv = (util.danger_score(lline[i_DANGER], util.pH1_with_apriori),\n score2prob(util.process_one_score(lline[i_SCORE], max_score)))\n if cnv_iid in cnv.keys():\n cnv[cnv_iid].append(add_cnv)\n else:\n cnv[cnv_iid] = [add_cnv]\n else:\n blank_dgr += 1\n\n\nprint(len(cnv), (blank_dgr))\nprint([k for k in cnv.keys()[:5]])\nprint([k for k in cnv.values()[:5]])\n\nfor k in cnv.keys()[3340:3350]:\n print(k,': ',cnv[k])", "Create a dictionary of the subjects -", "cnv = OrderedDict({})\n#names_from = ['START', 'STOP', \"5'gene\", \"3'gene\", \"5'dist(kb)\", \"3'dist(kb)\"]\nnames_from = ['IID_projet']\n\nfor line in arr[1:]:\n lline = line.split('\\t')\n dgr = lline[i_DANGER]\n scr = lline[i_SCORE]\n sub_iid = util.make_uiid(line, names_from, arr[0])\n \n if dgr != '':\n add_cnv = (util.danger_score(lline[i_DANGER], util.pH1_with_apriori),\n score2prob(util.process_one_score(lline[i_SCORE], max_score)))\n if sub_iid in cnv.keys():\n cnv[sub_iid].append(add_cnv)\n else:\n cnv[sub_iid] = [add_cnv]", "Histogram of the number of cnv used to compute dangerosity", "print(len(cnv))\nnbcnv = [len(cnv[sb]) for sb in cnv]\nhist = plt.hist(nbcnv, bins=50)\nprint(np.max(np.asarray(nbcnv)))\n\n# definition of dangerosity from a list of cnv\ndef dangerosity(listofcnvs):\n \"\"\"\n inputs: list tuples (danger_score, proba_cnv)\n returns: a dangerosity score \n \"\"\"\n last = -1 #slicing the last\n tmp = [np.asarray(t) for t in zip(*listofcnvs)]\n return tmp[0].dot(tmp[1])\n\n# or: return np.asarray([dgr*prob for (dgr,prob) in listofcnvs]).cumsum()[last]\n", "Testing dangerosity", "for k in range(1,30, 30):\n print(cnv[cnv.keys()[k]], ' yields ', dangerosity(cnv[cnv.keys()[k]]))\n \ntest_dangerosity_input = [[(1., .5), (1., .5), (1., .5), (1., .5)],\n [(2., 1.)],\n [(10000., 0.)]]\ntest_dangerosity_output = [2., 2., 0]\n\n#print( [dangerosity(icnv) for icnv in test_dangerosity_input]) # == test_dangerosity_output\nassert( [dangerosity(icnv) for icnv in test_dangerosity_input] == test_dangerosity_output)", "Printing out results", "dtime = datetime.now().strftime(\"%y-%m-%d_h%H-%M\")\noutfile = dtime+'dangerosity_cnv.txt'\nfulloutfile = osp.join(CWD, outfile)\n\nwith open(fulloutfile, 'w') as outf:\n for sub in cnv:\n outf.write(\"\\t\".join([sub, str(dangerosity(cnv[sub]))]) + \"\\n\")" ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
wcmckee/signinlca
pggNumAdd.ipynb
mit
[ "# IPython log file\n\n#get_ipython().magic(u'logstart')\nimport random\nranumlis = []\nranlow = 0\nranhigh = 9\n\nfor ranez in range(10):\n randmun = random.randint(ranlow, ranhigh)\n ranumlis.append(randmun)\n\n ranlow = (ranlow + 10)\n ranhigh = (ranhigh + 10)\n\nprint ranumlis\n\nsavlis = open('/home/wcmckee/pgg/roll.json', 'w') \nsavlis.write(str(ranumlis))\nsavlis.close()\n", "Testing of playing pyguessgame. \nGenerates random numbers and plays a game.\nCreate two random lists of numbers 0/9,10/19,20/29 etc to 100.\nCompare the two lists. If win mark, if lose mark.\nDebian", "#for ronum in ranumlis:\n# print ronum\n\nrandict = dict()\n\nothgues = []\nothlow = 0\nothhigh = 9\n\n\nfor ranez in range(10):\n randxz = random.randint(othlow, othhigh)\n othgues.append(randxz)\n\n othlow = (othlow + 10)\n othhigh = (othhigh + 10)\n\n#print othgues\n\ntenlis = ['zero', 'ten', 'twenty', 'thirty', 'fourty',\n 'fifty', 'sixty', 'seventy', 'eighty', \n 'ninety']\n\n#for telis in tenlis:\n# for diez in dieci:\n# print telis\n\n#randict", "Makes dict with keys pointing to the 10s numbers. \nThe value needs the list of random numbers updated.\nCurrently it just adds the number one.\nHow to add the random number list?", "for ronum in ranumlis:\n #print ronum\n if ronum in othgues:\n print (str(ronum) + ' You Win!')\n else:\n print (str(ronum) + ' You Lose!')\n \n\n#dieci = dict()\n\n#for ranz in range(10):\n #print str(ranz) + str(1)#\n# dieci.update({str(ranz) + str(1): str(ranz)})\n# for numz in range(10):\n #print str(ranz) + str(numz)\n# print numz\n#print zetoo\n\n#for diez in dieci:\n# print diez\n\n#for sinum in ranumlis:\n# print str(sinum) + (str('\\n'))\n #if str(sinum) in othhigh:\n # print 'Win'\n \n\n#import os\n\n#os.system('sudo adduser joemanz --disabled-login --quiet -D')\n\n#uslis = os.listdir('/home/wcmckee/signinlca/usernames/')\n#print ('User List: ')\n#for usl in uslis:\n# print usl\n# os.system('sudo adduser ' + usl + ' ' + '--disabled-login --quiet')\n \n# os.system('sudo mv /home/wcmckee/signinlca/usernames/' + usl + ' ' + '/home/' + usl + ' ') \n\n\n#print dieci" ]
[ "code", "markdown", "code", "markdown", "code" ]
harishkrao/DSE200x
Week-7-MachineLearning/Weather Data Classification using Decision Trees.ipynb
mit
[ "<p style=\"font-family: Arial; font-size:2.75em;color:purple; font-style:bold\">\n\nClassification of Weather Data <br><br>\nusing scikit-learn\n<br><br>\n</p>\n\n<p style=\"font-family: Arial; font-size:1.75em;color:purple; font-style:bold\"><br>\nDaily Weather Data Analysis</p>\n\nIn this notebook, we will use scikit-learn to perform a decision tree based classification of weather data.\n<p style=\"font-family: Arial; font-size:1.75em;color:purple; font-style:bold\"><br>\n\nImporting the Necessary Libraries<br></p>", "import pandas as pd\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.tree import DecisionTreeClassifier", "<p style=\"font-family: Arial; font-size:1.75em;color:purple; font-style:bold\"><br>\n\nCreating a Pandas DataFrame from a CSV file<br></p>", "data = pd.read_csv('./weather/daily_weather.csv')", "<p style=\"font-family: Arial; font-size:1.75em;color:purple; font-style:bold\">Daily Weather Data Description</p>\n<br>\nThe file daily_weather.csv is a comma-separated file that contains weather data. This data comes from a weather station located in San Diego, California. The weather station is equipped with sensors that capture weather-related measurements such as air temperature, air pressure, and relative humidity. Data was collected for a period of three years, from September 2011 to September 2014, to ensure that sufficient data for different seasons and weather conditions is captured.<br><br>\nLet's now check all the columns in the data.", "data.columns", "<br>Each row in daily_weather.csv captures weather data for a separate day. <br><br>\nSensor measurements from the weather station were captured at one-minute intervals. These measurements were then processed to generate values to describe daily weather. Since this dataset was created to classify low-humidity days vs. non-low-humidity days (that is, days with normal or high humidity), the variables included are weather measurements in the morning, with one measurement, namely relatively humidity, in the afternoon. The idea is to use the morning weather values to predict whether the day will be low-humidity or not based on the afternoon measurement of relative humidity.\nEach row, or sample, consists of the following variables:\n\nnumber: unique number for each row\nair_pressure_9am: air pressure averaged over a period from 8:55am to 9:04am (Unit: hectopascals)\nair_temp_9am: air temperature averaged over a period from 8:55am to 9:04am (Unit: degrees Fahrenheit)\nair_wind_direction_9am: wind direction averaged over a period from 8:55am to 9:04am (Unit: degrees, with 0 means coming from the North, and increasing clockwise)\nair_wind_speed_9am: wind speed averaged over a period from 8:55am to 9:04am (Unit: miles per hour)\n max_wind_direction_9am: wind gust direction averaged over a period from 8:55am to 9:10am (Unit: degrees, with 0 being North and increasing clockwise)\nmax_wind_speed_9am: wind gust speed averaged over a period from 8:55am to 9:04am (Unit: miles per hour)\nrain_accumulation_9am: amount of rain accumulated in the 24 hours prior to 9am (Unit: millimeters)\nrain_duration_9am: amount of time rain was recorded in the 24 hours prior to 9am (Unit: seconds)\nrelative_humidity_9am: relative humidity averaged over a period from 8:55am to 9:04am (Unit: percent)\nrelative_humidity_3pm: relative humidity averaged over a period from 2:55pm to 3:04pm (Unit: percent )", "data\n\ndata[data.isnull().any(axis=1)]", "<p style=\"font-family: Arial; font-size:1.75em;color:purple; font-style:bold\"><br>\n\nData Cleaning Steps<br><br></p>\n\nWe will not need to number for each row so we can clean it.", "del data['number']", "Now let's drop null values using the pandas dropna function.", "before_rows = data.shape[0]\nprint(before_rows)\n\ndata = data.dropna()\n\nafter_rows = data.shape[0]\nprint(after_rows)", "<p style=\"font-family: Arial; font-size:1.75em;color:purple; font-style:bold\"><br>\n\nHow many rows dropped due to cleaning?<br><br></p>", "before_rows - after_rows", "<p style=\"font-family: Arial; font-size:1.75em;color:purple; font-style:bold\">\nConvert to a Classification Task <br><br></p>\nBinarize the relative_humidity_3pm to 0 or 1.<br>", "clean_data = data.copy()\nclean_data['high_humidity_label'] = (clean_data['relative_humidity_3pm'] > 24.99)*1\nprint(clean_data['high_humidity_label'])", "<p style=\"font-family: Arial; font-size:1.75em;color:purple; font-style:bold\"><br>\n\nTarget is stored in 'y'.\n<br><br></p>", "y=clean_data[['high_humidity_label']].copy()\n#y\n\nclean_data['relative_humidity_3pm'].head()\n\ny.head()", "<p style=\"font-family: Arial; font-size:1.75em;color:purple; font-style:bold\"><br>\n\nUse 9am Sensor Signals as Features to Predict Humidity at 3pm\n<br><br></p>", "morning_features = ['air_pressure_9am','air_temp_9am','avg_wind_direction_9am','avg_wind_speed_9am',\n 'max_wind_direction_9am','max_wind_speed_9am','rain_accumulation_9am',\n 'rain_duration_9am']\n\nX = clean_data[morning_features].copy()\n\nX.columns\n\ny.columns", "<p style=\"font-family: Arial; font-size:1.75em;color:purple; font-style:bold\"><br>\n\nPerform Test and Train split\n\n<br><br></p>\n\nREMINDER: Training Phase\nIn the training phase, the learning algorithm uses the training data to adjust the model’s parameters to minimize errors. At the end of the training phase, you get the trained model.\n<img src=\"TrainingVSTesting.jpg\" align=\"middle\" style=\"width:550px;height:360px;\"/>\n<BR>\nIn the testing phase, the trained model is applied to test data. Test data is separate from the training data, and is previously unseen by the model. The model is then evaluated on how it performs on the test data. The goal in building a classifier model is to have the model perform well on training as well as test data.", "X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=324)\n\n#type(X_train)\n#type(X_test)\n#type(y_train)\n#type(y_test)\n#X_train.head()\n#y_train.describe()", "<p style=\"font-family: Arial; font-size:1.75em;color:purple; font-style:bold\"><br>\n\nFit on Train Set\n<br><br></p>", "humidity_classifier = DecisionTreeClassifier(max_leaf_nodes=10, random_state=0)\nhumidity_classifier.fit(X_train, y_train)\n\ntype(humidity_classifier)", "<p style=\"font-family: Arial; font-size:1.75em;color:purple; font-style:bold\"><br>\n\nPredict on Test Set \n\n<br><br></p>", "predictions = humidity_classifier.predict(X_test)\n\npredictions[:10]\n\ny_test['high_humidity_label'][:10]", "<p style=\"font-family: Arial; font-size:1.75em;color:purple; font-style:bold\"><br>\n\nMeasure Accuracy of the Classifier\n<br><br></p>", "accuracy_score(y_true = y_test, y_pred = predictions)" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
gaufung/Data_Analytics_Learning_Note
python-statatics-tutorial/advance-theme/Singleton.ipynb
mit
[ "Python 单例模式\n1 _new_ 方法\n\n_new_(cls, *args, **kwargs) 创建对象时调用,返回当前对象的一个实例;注意:这里的第一个参数是cls即class本身 \n_init_(self, *args, **kwargs) 创建完对象后调用,对当前对象的实例的一些初始化,无返回值,即在调用__new__之后,根据返回的实例初始化;注意,这里的第一个参数是self即对象本身", "class Singleton(object): \n def __new__(cls, *args, **kwargs): \n if not hasattr(cls, '_instance'): \n cls._instance = super(Singleton, cls).__new__(cls, *args, **kwargs) \n return cls._instance\nclass MyClass(object):\n pass\n\nsingle1 = Singleton()\nsingle2 = Singleton()\nmyclass1 = MyClass()\nmyclass2 = MyClass()\nprint id(single1) == id(single2)\nprint id(myclass1) == id(myclass2)", "2 使用类(class)装饰器", "from functools import wraps\ndef singleton(cls): \n instances = {} \n @wraps(cls) \n def wrapper(*args, **kwargs): \n if cls not in instances: \n instances[cls] = cls(*args, **kwargs) \n return instances[cls] \n return wrapper\n@singleton \nclass MyClass(object): \n pass\n\nmyclass1 = MyClass()\nmyclass2 = MyClass()\nprint id(myclass1) == id(myclass2)", "3 使用GetInstance方法,非线程安全", "class MySingleton(object): \n @classmethod\n def getInstance(cls): \n if not hasattr(cls, '_instance'): \n cls._instance = cls() \n return cls._instance\nmysingleton1 = MySingleton.getInstance()\nmysingleton2 = MySingleton.getInstance()\nprint id(mysingleton1) == id(mysingleton2)" ]
[ "markdown", "code", "markdown", "code", "markdown", "code" ]
JackDi/phys202-2015-work
assignments/assignment09/IntegrationEx02.ipynb
mit
[ "Integration Exercise 2\nImports", "%matplotlib inline\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport seaborn as sns\nfrom scipy import integrate", "Indefinite integrals\nHere is a table of definite integrals. Many of these integrals has a number of parameters $a$, $b$, etc.\nFind five of these integrals and perform the following steps:\n\nTypeset the integral using LateX in a Markdown cell.\nDefine an integrand function that computes the value of the integrand.\nDefine an integral_approx funciton that uses scipy.integrate.quad to peform the integral.\nDefine an integral_exact function that computes the exact value of the integral.\nCall and print the return value of integral_approx and integral_exact for one set of parameters.\n\nHere is an example to show what your solutions should look like:\nExample\nHere is the integral I am performing:\n$$ I_1 = \\int_0^\\infty \\frac{dx}{x^2 + a^2} = \\frac{\\pi}{2a} $$", "def integrand(x, a):\n return 1.0/(x**2 + a**2)\n\ndef integral_approx(a):\n # Use the args keyword argument to feed extra arguments to your integrand\n I, e = integrate.quad(integrand, 0, np.inf, args=(a,))\n return I\n\ndef integral_exact(a):\n return 0.5*np.pi/a\n\nprint(\"Numerical: \", integral_approx(1.0))\nprint(\"Exact : \", integral_exact(1.0))\n\n\nassert True # leave this cell to grade the above integral", "Integral 1\n\\begin{equation}\n\\int_{0}^{a}{\\sqrt{a^2 - x^2}} dx=\\frac{\\pi a^2}{4}\n\\end{equation}", "# YOUR CODE HERE\ndef integrand(x, a):\n return (np.sqrt(a**2 - x**2))\n\ndef integral_approx(a):\n # Use the args keyword argument to feed extra arguments to your integrand\n I, e = integrate.quad(integrand, 0, a, args=(a,))\n return I\n\ndef integral_exact(a):\n return (0.25*np.pi*a**2)\n\nprint(\"Numerical: \", integral_approx(1.0))\nprint(\"Exact : \", integral_exact(1.0))\n\n\nassert True # leave this cell to grade the above integral", "Integral 2\n\\begin{equation}\n\\int_{0}^{\\infty} e^{-ax^2} dx =\\frac{1}{2}\\sqrt{\\frac{\\pi}{a}}\n\\end{equation}", "# YOUR CODE HERE\ndef integrand(x, a):\n return np.exp(-a*x**2)\n\ndef integral_approx(a):\n # Use the args keyword argument to feed extra arguments to your integrand\n I, e = integrate.quad(integrand, 0, np.inf, args=(a,))\n return I\n\ndef integral_exact(a):\n return 0.5*np.sqrt(np.pi/a)\n\nprint(\"Numerical: \", integral_approx(1.0))\nprint(\"Exact : \", integral_exact(1.0))\n\nassert True # leave this cell to grade the above integral", "Integral 3\n\\begin{equation}\n\\int_{0}^{\\infty} \\frac{x}{e^x-1} dx =\\frac{\\pi^2}{6}\n\\end{equation}", "# YOUR CODE HERE\ndef integrand(x, a):\n return x/(np.exp(x)-1)\n\ndef integral_approx(a):\n # Use the args keyword argument to feed extra arguments to your integrand\n I, e = integrate.quad(integrand, 0, np.inf, args=(a,))\n return I\n\ndef integral_exact(a):\n return (1/6.0)*np.pi**2\n\nprint(\"Numerical: \", integral_approx(1.0))\nprint(\"Exact : \", integral_exact(1.0))\n\nassert True # leave this cell to grade the above integral", "Integral 4\n\\begin{equation}\n\\int_{0}^{\\infty} \\frac{x}{e^x+1} dx =\\frac{\\pi^2}{12}\n\\end{equation}", "# YOUR CODE HERE\ndef integrand(x, a):\n return x/(np.exp(x)+1)\n\ndef integral_approx(a):\n # Use the args keyword argument to feed extra arguments to your integrand\n I, e = integrate.quad(integrand, 0, np.inf, args=(a,))\n return I\n\ndef integral_exact(a):\n return (1/12.0)*np.pi**2\n\nprint(\"Numerical: \", integral_approx(1.0))\nprint(\"Exact : \", integral_exact(1.0))\n\nassert True # leave this cell to grade the above integral", "Integral 5\n\\begin{equation}\n\\int_{0}^{1} \\frac{ln x}{1-x} dx =-\\frac{\\pi^2}{6}\n\\end{equation}", "# YOUR CODE HERE\ndef integrand(x, a):\n return np.log(x)/(1-x)\n\ndef integral_approx(a):\n # Use the args keyword argument to feed extra arguments to your integrand\n I, e = integrate.quad(integrand, 0, 1, args=(a,))\n return I\n\ndef integral_exact(a):\n return (-1.0/6.0)*np.pi**2\n\nprint(\"Numerical: \", integral_approx(1.0))\nprint(\"Exact : \", integral_exact(1.0))\n\nassert True # leave this cell to grade the above integral" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
serpilliere/miasm
doc/ir/lift.ipynb
gpl-2.0
[ "Prerequisite: the reader is encouraged to read the documentation of expression and locationdb before this part.\nMiasm Intermediate representation\nThe intermediate representation of Miasm allows to represent the side effects of instructions in a control flow graph. To summarise, here is the correspondence between native world and its intermediate representation:\n- an assembly control flow graph (AsmCFG) is represented in intermediate representation by an \"Intermediate representation control flow graph\": IRCfg\n- an AsmCFG in composed of basic blocks. In intermediate representation, the IRCfg is composed of Intermediate representation blocks: IRBlocks\n- a native basic block is a sequence of instructions. In intermediate representation, the IRBlock if a sequence of AssignBlocks\n- an AssignBlock is composed of parallel assignments of expressions. \"Parallel\" mean that those assignments are executed exactly the same time (different from successive)\nNote this does not imply that an instruction translates to an AssignBlock. The translation of a native instruction can generate multiple AssignBlocks and even multiple IRBlocks. \nExamples\nLet's take some examples of translated instructions. First of all, we will create an helper to generate intermediate representation from assembly code. Skip this code, it's not important for the rest of the documentation", "from miasm.analysis.machine import Machine\nfrom miasm.arch.x86.arch import mn_x86\nfrom miasm.core import parse_asm, asmblock\nfrom miasm.arch.x86.lifter_model_call import LifterModelCall_x86_32\nfrom miasm.core.locationdb import LocationDB\nfrom miasm.loader.strpatchwork import StrPatchwork\nfrom miasm.analysis.binary import Container\nfrom miasm.ir.ir import IRCFG, AssignBlock\nfrom miasm.expression.expression import *\nimport logging\n\n# Quiet warnings\nasmblock.log_asmblock.setLevel(logging.ERROR)\n\n\ndef gen_x86_asmcfg(asm):\n # First, asm code\n machine = Machine(\"x86_32\")\n\n # Add dummy label \"end\" at code's end\n code = asm + \"\\nend:\\n\"\n loc_db = LocationDB()\n # The main will be at address 0\n loc_db.set_location_offset(loc_db.get_or_create_name_location(\"main\"), 0x0)\n\n asmcfg = parse_asm.parse_txt(\n mn_x86, 32, code,\n loc_db\n )\n virt = StrPatchwork()\n # Assemble shellcode\n patches = asmblock.asm_resolve_final(\n machine.mn,\n asmcfg,\n )\n # Put shelcode in a string\n for offset, raw in patches.items():\n virt[offset] = raw\n data = bytes(virt)\n cont = Container.fallback_container(\n data,\n vm=None, addr=0,\n loc_db=loc_db,\n )\n dis_engine = machine.dis_engine\n # Disassemble back the shellcode\n # Now, basic blocks are at known position, determined by\n # the assembled version\n mdis = dis_engine(cont.bin_stream, loc_db=cont.loc_db)\n asmcfg = mdis.dis_multiblock(0)\n return asmcfg\n\ndef lift_x86_asm(asm, model_call=False, lifter_custom=None):\n asmcfg = gen_x86_asmcfg(asm)\n machine = Machine(\"x86_32\")\n # Get a lifter\n if model_call and lifter_custom is None:\n lifter = LifterModelCall_x86_32(asmcfg.loc_db)\n elif lifter_custom is not None:\n lifter = lifter_custom(asmcfg.loc_db)\n else:\n lifter = machine.lifter(asmcfg.loc_db)\n\n # Translate to IR\n ircfg = lifter.new_ircfg_from_asmcfg(asmcfg)\n return ircfg\n\ndef graph_ir_x86(asm, model_call=False, lifter_custom=None):\n ircfg = lift_x86_asm(asm, model_call, lifter_custom)\n return ircfg.graphviz()\n\n\n# Let's generate the AsmCFG\nasmcfg = gen_x86_asmcfg(\"\"\"\nmain:\n MOV EAX, EBX\n\"\"\")\nasmcfg.graphviz()\n\n# And graph the corresponding IRCFG\ngraph_ir_x86(\"\"\"\nmain:\n MOV EAX, EBX\n\"\"\")", "Lets analyze this graph:\n- the first ir basic block has the name set to main\n- it is composed of 2 AssignBlocks\n- the first AssignBlock contains only one assignment, EAX = EBX\n- the second one is IRDst = loc_key_1\nThe IRDst is a special register which represent a kind of program counter in intermediate representation. Each IRBlock has one and only one assignment to IRDst. The position of the IRDst assignment is not always in the last AssignBlock of the IRBlock. In our case, the shellcode stops after the MOV EAX, EBX, so the next location to execution is unknown: end. This label has been artificially added by the script.\nLet's take another instruction.", "graph_ir_x86(\"\"\"\nmain:\n ADD EAX, 3\n\"\"\")", "In this graph, we can note that each instruction side effect is represented.\nNote that in the equation:\nzf = FLAG_EQ_CMP(EAX, -0x3)\nThe detailed version of the expression:\nExprId('zf', 1) = ExprOp('FLAG_EQ_CMP', ExprId('EAX', 32), ExprInt(-0x3, 32))\nThe operator FLAG_EQ_CMP is a kind of high level representation. But you can customize the lifter in order to get the real equation of the zf. This will be presented in a documentation dedicated to modification of the intermediate representation control flow graph.\nExprId('zf', 1) = ExprCond(ExprId('EAX', 32) - ExprInt(-0x3, 32), ExprInt(0, 1), ExprInt(1, 1))\nwhich is, in a simplified form:\nzf = (EAX - 3) ? (0, 1)", "graph_ir_x86(\"\"\"\nmain:\n XCHG EAX, EBX\n\"\"\")", "This one is interesting, as it demonstrate perfectly the parallel execution of multiple assignments. In you are puzzled by this notation, imagine this describes equations, which expresses destination variables of an output state depending on an input state. The equations can be rewritten:\nEAX_out = EBX_in\nEBX_out = EAX_in\nAnd this matches the xchg semantic. After the execution, those variables are committed, which means that EAX takes the value of EAX_out, and EBX takes the value of EBX_out\nSome arbitrary choices have been done in order to try to match as best as possible. For example lets take the instruction:\nCMOVZ EAX, EBX\nThis conditional move is done if the zero flag is activated. So we may want to translate it as:\nEAX = zf ? EBX : EAX\nWhich can be read: if zf is 1, EAX is set to EBX else EAX is set to EAX, which is equivalent to no modifications.\nThis representation seems good at first, as the semantic of the conditional move seems ok. But let's question the system on the equation EAX = zf ? EBX, EAX:\n- which register is written ? EAX is always written\n- which register is read ? zf, EBX, EAX are read\nIF we ask the same question on the instruction CMOVZ EAX, EBX, the answers are a bit different:\n- which register is written ? EAX is written only if the zf is 1\n- which register is read ? zf is always read, EBX may be read is zf is 1\nThe conclusion is the representation we gave doesn't represent properly the instruction. Here is what Miasm will gave as intermediate representation for it:", "# Here is a push\ngraph_ir_x86(\"\"\"\nmain:\n PUSH EAX\n\"\"\")\n\ngraph_ir_x86(\"\"\"\nmain:\n CMOVZ EAX, EBX\n\"\"\")", "Here are some remarks we can do on this version:\n- one x86 instruction has generated multiple IRBlocks\n- the first IRBlock only reads the zf (we don't take the locations into account here)\n- EAX is assigned only in the case of zf equals to 1\n- EBX is read only in the case of zf equals to 1\nWe can dispute on the fact that in this form, it's harder to get what is read and what is written. But one argument is: If cmovz doesn't exist (for example in older cpus) what may be the code to do this ?", "graph_ir_x86(\"\"\"\nmain:\n JZ end\n MOV EAX, EBX\nend:\n\"\"\")", "The conclusion is that in intermediate representation, the cmovz is exactly as difficult as analyzing the code using jz/mov\nSo an important point is that in intermediate representation, one instruction can generate multiple IRBlocks. Here are some interesting examples:", "graph_ir_x86(\"\"\"\nmain:\n MOVSB\n\"\"\")", "And now, the version using a repeat prefix:", "graph_ir_x86(\"\"\"\nmain:\n REP MOVSB\n\"\"\")", "In the very same way as cmovz, if the rep movsb instruction didn't exist, we would use a more complex code.\nThe translation of some instructions are tricky:", "graph_ir_x86(\"\"\"\nmain:\n SHR EAX, 1\n\"\"\")", "For the moment, nothing special. EAX is updated correctly, and the flags are updated according to the result (note those side effects are in parallel here). But look at the next one:", "graph_ir_x86(\"\"\"\nmain:\n SHR EAX, CL\n\"\"\")", "In this case, if CL is zero, the destination is shifted by a zero amount. The instruction behaves (in 32 bit mode) as a nop, and the flags are not assigned. We could have done the same trick as in the cmovz, but this representation matches more accurately the instruction semantic.\nHere is another one:", "graph_ir_x86(\"\"\"\nmain:\n DIV ECX\n\"\"\")", "This instruction may generate an exception in case of the divisor is zero. The intermediate representation generates a test in which it evaluate the divisor value and assigns a special register exception_flags to a constant. This constant represents the division by zero.\nNote this is arbitrary. We could have done the choice to not explicit the possible division by zero, and keep in mind that the umod and udiv operator may generate exceptions. This may change in a future version of Miasm. Indeed, each memory access may generate a exception, and Miasm doesn't explicit them in the intermediate representation: this may be misleading and very hard to analyze in a post pass. This is why we may accept to implicitly raise exception in both those operators rather than generating such a code.\nThe same choice has been done in other instructions:", "graph_ir_x86(\"\"\"\nmain:\n INT 0x3\n\"\"\")", "Memory accesses by default explicit segmentation:", "graph_ir_x86(\"\"\"\nmain:\n MOV EAX, DWORD PTR FS:[EBX]\n\"\"\")", "The pointer of the memory uses the special operator segm, which takes two arguments:\n- the value of the segment used the memory access\n- the base address\nNote that if you work in a flat segmentation model, you can add a post translation pass which will simplify ExprOp(\"segm\", A, B) into B. This will ease code analysis.\nNote: If you read carefully the documentation on expressions, you know that the word ExprOp is n-ary and that all of its arguments must have the same size. The operator segm is one of the exceptions. The register FS has a size of 16 bit (as a segment selector register) and EBX has a size of 32. In this case, the size of ExprOp(\"segm\", FS, EBX) has the size of EBX\nIntermediate representation translation\nIn this part, we will explain some manipulations which can be done during the native code lifting. Let's take the example of a call to a subfunction:", "asmcfg = gen_x86_asmcfg(\"\"\"\nmain:\n CALL 0x11223344\n MOV EBX, EAX\n\"\"\")\nasmcfg.graphviz()\n\ngraph_ir_x86(\"\"\"\nmain:\n CALL 0x11223344\n MOV EBX, EAX\n\"\"\")", "What did happened here ?\n- the call instruction has 2 side effects: stacking the return address and jumping to the subfunction address\n- here, the subfunction address is 0x1122334455, and the return address is located at offset 0x5, which is represented here by loc_5\nThe question is: why are there unlinked nodes in the graph? The answer is that the graph only analyzes destinations of the IRBlocks, which means the value of IRDst. So in the main, Miasm knowns that the next IRBlock is located at loc_11223344. But as we didn't disassemble code at this address, we don't have its intermediate representation.\nBut the disassembler engine knowns (this behavior can be customized) that a call returns back to the instruction just next to the call. So the basic block at end has been disassembled and translated. If we analyze IRDst only, there are no links between them.\nThis raw way of translating is interesting to see low level moves of stack and return address, but it makes code analysis a bit hard. What we may want is to consider subcalls like an unknown operator, with arguments and side effects. This may model the call to a subfunction.\nThis is the difference in Miasm between translating using lifter (raw translation) and lifter_model_call (ilifter + call modelization) which models subfunction calls. By default, Miasm uses a basic model which is wrong in most cases. But this model can (and must ?) be replaced by the user behavior.\nYou can observe the difference in the examples:\nexample/disasm/dis_binary_lift.py\nand\nexample/disasm/dis_binary_lifter_model_call.py", "graph_ir_x86(\"\"\"\nmain:\n MOV EBX, 0x1234\n CALL 0x11223344\n MOV ECX, EAX\n RET\n\"\"\", True)", "What happened here?\nThe translation of the call is replaced by two side effects which occur in parallel:\n- EAX is set to the result of the operator call_func_ret which has two arguments: loc_11223344 and ESP\n- ESP is set to the result of the operator call_func_stack which has two arguments: loc_11223344 and ESP\nThe first one is there to model the assignment in 'classic' x86 code of the return value. The second one is there to model a possible change of the stack pointer depending on the function called, that the old stack pointer.\nEverything here can be subclassed in order to customize the translation behavior.\nSubfunction call custom modeling\nThe code responsible of the modelisation of function calls is located in the LifterModelCall class (the lifter with call modeling) in miasm/ir/analysis.py:\n```python\n...\n def call_effects(self, addr, instr):\n \"\"\"Default modelisation of a function call to @addr. This may be used to:\n * insert dependencies to arguments (stack base, registers, ...)\n * add some side effects (stack clean, return value, ...)\n\n Return a couple:\n * list of assignments to add to the current irblock\n * list of additional irblocks\n\n @addr: (Expr) address of the called function\n @instr: native instruction which is responsible of the call\n \"\"\"\n\n call_assignblk = AssignBlock(\n [\n ExprAssign(self.ret_reg, ExprOp('call_func_ret', addr, self.sp)),\n ExprAssign(self.sp, ExprOp('call_func_stack', addr, self.sp))\n ],\n instr\n )\n return [call_assignblk], []\n\n```\nSome architectures subclass it to include some architecture dependent stuffs, for example in miasm/arch/x86/lifter_model_call.py in which we use a default calling convention linked to arguments passed through registers:\n```python\n...\n def call_effects(self, ad, instr):\n call_assignblk = AssignBlock(\n [\n ExprAssign(\n self.ret_reg,\n ExprOp(\n 'call_func_ret',\n ad,\n self.sp,\n self.arch.regs.RCX,\n self.arch.regs.RDX,\n self.arch.regs.R8,\n self.arch.regs.R9,\n )\n ),\n ExprAssign(self.sp, ExprOp('call_func_stack', ad, self.sp)),\n ],\n instr\n )\n return [call_assignblk], []\n```\nThis is the generic code used in x86_64 to model function calls. But you can finely model functions. For example, suppose you are analysing code on x86_32 with stdcall convention. Suppose you know the callee clean its stack arguments. Supppose as well you know for each function how many arguments it has. You can then customize the model to match the callee and compute the correct stack modification, as well as getting the arguments from stack:", "# Construct a custom lifter\nclass LifterFixCallStack(LifterModelCall_x86_32):\n def call_effects(self, addr, instr):\n if addr.is_loc():\n if self.loc_db.get_location_offset(addr.loc_key) == 0x11223344:\n # Suppose the function at 0x11223344 has 3 arguments\n args_count = 3\n else:\n # It's a function we didn't analyze\n raise RuntimeError(\"Unknown function parameters\")\n else:\n # It's a dynamic call !\n raise RuntimeError(\"Dynamic destination ?\")\n # Arguments are taken from stack\n args = []\n for i in range(args_count):\n args.append(ExprMem(self.sp + ExprInt(i * 4, 32), 32))\n # Generate the model\n call_assignblk = AssignBlock(\n [\n ExprAssign(self.ret_reg, ExprOp('call_func_ret', addr, *args)),\n ExprAssign(self.sp, self.sp + ExprInt(args_count * 4, self.sp.size))\n ],\n instr\n )\n return [call_assignblk], []\n\ngraph_ir_x86(\"\"\"\nmain:\n MOV EBX, 0x1234\n PUSH 3\n PUSH 2\n PUSH 1\n CALL 0x11223344\n MOV ECX, EAX\n RET\n\"\"\", lifter_custom=LifterFixCallStack)", "In the new graph, it's now easy to see that EAX depends on a custom operator call_func_ret with arguments:\n- loc_11223344\n- @32[ESP + 0x0]\n- @32[ESP + 0x4]\n- @32[ESP + 0x8]\nThe stack pointer is updated: it is increased by 0xC bytes, which corresponds to its arguments size (we didn't model the extra 4 bytes pushed on the stack for the return address, so no need to take them into account using our arbitrary model)" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
bdestombe/flopy-1
examples/Notebooks/flopy3_ZoneBudget_example.ipynb
bsd-3-clause
[ "FloPy\nZoneBudget Example\nThis notebook demonstrates how to use the ZoneBudget class to extract budget information from the cell by cell budget file using an array of zones.\nFirst set the path and import the required packages. The flopy path doesn't have to be set if you install flopy from a binary installer. If you want to run this notebook, you have to set the path to your own flopy path.", "%matplotlib inline\nfrom __future__ import print_function\nimport os\nimport sys\nimport platform\nimport numpy as np\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport flopy\n\nprint(sys.version)\nprint('numpy version: {}'.format(np.__version__))\nprint('matplotlib version: {}'.format(mpl.__version__))\nprint('pandas version: {}'.format(pd.__version__))\nprint('flopy version: {}'.format(flopy.__version__))\n\n# Set path to example datafiles\nloadpth = os.path.join('..', 'data', 'zonbud_examples')\ncbc_f = os.path.join(loadpth, 'freyberg_mlt', 'freyberg.gitcbc')", "Read File Containing Zones\nUsing the read_zbarray utility, we can import zonebudget-style array files.", "from flopy.utils import read_zbarray\n\nzone_file = os.path.join(loadpth, 'zonef_mlt')\nzon = read_zbarray(zone_file)\nnlay, nrow, ncol = zon.shape\n\nfig = plt.figure(figsize=(10, 4))\n\nfor lay in range(nlay):\n ax = fig.add_subplot(1, nlay, lay+1)\n im = ax.pcolormesh(zon[lay, :, :])\n cbar = plt.colorbar(im)\n plt.gca().set_aspect('equal')\n \nplt.show()\nnp.unique(zon)", "Extract Budget Information from ZoneBudget Object\nAt the core of the ZoneBudget object is a numpy structured array. The class provides some wrapper functions to help us interogate the array and save it to disk.", "# Create a ZoneBudget object and get the budget record array\nzb = flopy.utils.ZoneBudget(cbc_f, zon, kstpkper=(0, 1096))\nzb.get_budget()\n\n# Get a list of the unique budget record names\nzb.get_record_names()\n\n# Look at a subset of fluxes\nnames = ['RECHARGE_IN', 'ZONE_1_IN', 'ZONE_3_IN']\nzb.get_budget(names=names)\n\n# Look at fluxes in from zone 2\nnames = ['RECHARGE_IN', 'ZONE_1_IN', 'ZONE_3_IN']\nzones = ['ZONE_2']\nzb.get_budget(names=names, zones=zones)\n\n# Look at all of the mass-balance records\nnames = ['TOTAL_IN', 'TOTAL_OUT', 'IN-OUT', 'PERCENT_DISCREPANCY']\nzb.get_budget(names=names)", "Convert Units\nThe ZoneBudget class supports the use of mathematical operators and returns a new copy of the object.", "cmd = flopy.utils.ZoneBudget(cbc_f, zon, kstpkper=(0, 0))\ncfd = cmd / 35.3147\ninyr = (cfd / (250 * 250)) * 365 * 12\n\ncmdbud = cmd.get_budget()\ncfdbud = cfd.get_budget()\ninyrbud = inyr.get_budget()\n\nnames = ['RECHARGE_IN']\nrowidx = np.in1d(cmdbud['name'], names)\ncolidx = 'ZONE_1'\n\nprint('{:,.1f} cubic meters/day'.format(cmdbud[rowidx][colidx][0]))\nprint('{:,.1f} cubic feet/day'.format(cfdbud[rowidx][colidx][0]))\nprint('{:,.1f} inches/year'.format(inyrbud[rowidx][colidx][0]))\n\ncmd is cfd", "Alias Names\nA dictionary of {zone: \"alias\"} pairs can be passed to replace the typical \"ZONE_X\" fieldnames of the ZoneBudget structured array with more descriptive names.", "aliases = {1: 'SURF', 2:'CONF', 3: 'UFA'}\nzb = flopy.utils.ZoneBudget(cbc_f, zon, totim=[1097.], aliases=aliases)\nzb.get_budget()", "Return the Budgets as a Pandas DataFrame\nSet kstpkper and totim keyword args to None (or omit) to return all times.\nThe get_dataframes() method will return a DataFrame multi-indexed on totim and name.", "zon = np.ones((nlay, nrow, ncol), np.int)\nzon[1, :, :] = 2\nzon[2, :, :] = 3\n\naliases = {1: 'SURF', 2:'CONF', 3: 'UFA'}\nzb = flopy.utils.ZoneBudget(cbc_f, zon, kstpkper=None, totim=None, aliases=aliases)\ndf = zb.get_dataframes()\nprint(df.head())\nprint(df.tail())", "Slice the multi-index dataframe to retrieve a subset of the budget", "dateidx1 = 1092.\ndateidx2 = 1097.\nnames = ['RECHARGE_IN', 'WELLS_OUT']\nzones = ['SURF', 'CONF']\ndf.loc[(slice(dateidx1, dateidx2), names), :][zones]", "Look at pumpage (WELLS_OUT) as a percentage of recharge (RECHARGE_IN)", "dateidx1 = 1092.\ndateidx2 = 1097.\nzones = ['SURF']\n\n# Pull out the individual records of interest\nrech = df.loc[(slice(dateidx1, dateidx2), ['RECHARGE_IN']), :][zones]\npump = df.loc[(slice(dateidx1, dateidx2), ['WELLS_OUT']), :][zones]\n\n# Remove the \"record\" field from the index so we can \n# take the difference of the two DataFrames\nrech = rech.reset_index()\nrech = rech.set_index(['totim'])\nrech = rech[zones]\npump = pump.reset_index()\npump = pump.set_index(['totim'])\npump = pump[zones] * -1\n\n# Compute pumping as a percentage of recharge\npump_as_pct = (pump / rech) * 100.\npump_as_pct\n\n# Use \"slice(None)\" to return all records\ndf.loc[(slice(dateidx1, dateidx2), slice(None)), :][zones]\n\n# Or all times\ndf.loc[(slice(None), names), :][zones]", "Pass start_datetime and timeunit keyword arguments to return a dataframe with a datetime multi-index", "df = zb.get_dataframes(start_datetime='1970-01-01', timeunit='D')\ndateidx1 = pd.Timestamp('1972-12-01')\ndateidx2 = pd.Timestamp('1972-12-06')\nnames = ['RECHARGE_IN', 'WELLS_OUT']\nzones = ['SURF', 'CONF']\ndf.loc[(slice(dateidx1, dateidx2), names), :][zones]", "Pass index_key to indicate which fields to use in the multi-index (defualt is \"totim\"; valid keys are \"totim\" and \"kstpkper\")", "df = zb.get_dataframes(index_key='kstpkper')\ndf.head()", "Write Budget Output to CSV\nWe can write the resulting recarray to a csv file with the .to_csv() method of the ZoneBudget object.", "zb = flopy.utils.ZoneBudget(cbc_f, zon, kstpkper=[(0, 0), (0, 1096)])\nzb.to_csv(os.path.join(loadpth, 'zonbud.csv'))\n\n# Read the file in to see the contents\nfname = os.path.join(loadpth, 'zonbud.csv')\ntry:\n import pandas as pd\n print(pd.read_csv(fname).to_string(index=False))\nexcept:\n with open(fname, 'r') as f:\n for line in f.readlines():\n print('\\t'.join(line.split(',')))" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
tensorflow/docs-l10n
site/ko/probability/examples/Bayesian_Gaussian_Mixture_Model.ipynb
apache-2.0
[ "Copyright 2018 The TensorFlow Probability Authors.\nLicensed under the Apache License, Version 2.0 (the \"License\");", "#@title Licensed under the Apache License, Version 2.0 (the \"License\"); { display-mode: \"form\" }\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.", "베이지안 가우시안 혼합 모델 및 해밀턴 MCMC\n<table class=\"tfo-notebook-buttons\" align=\"left\">\n <td><a target=\"_blank\" href=\"https://www.tensorflow.org/probability/examples/Bayesian_Gaussian_Mixture_Model\"><img src=\"https://www.tensorflow.org/images/tf_logo_32px.png\">TensorFlow.org에서 보기</a></td>\n <td><a target=\"_blank\" href=\"https://colab.research.google.com/github/tensorflow/docs-l10n/blob/master/site/ko/probability/examples/Bayesian_Gaussian_Mixture_Model.ipynb\"><img src=\"https://www.tensorflow.org/images/colab_logo_32px.png\">Google Colab에서 실행하기</a></td>\n <td><a target=\"_blank\" href=\"https://github.com/tensorflow/docs-l10n/blob/master/site/ko/probability/examples/Bayesian_Gaussian_Mixture_Model.ipynb\"><img src=\"https://www.tensorflow.org/images/GitHub-Mark-32px.png\">GitHub에서 보기</a></td>\n <td><a href=\"https://storage.googleapis.com/tensorflow_docs/docs-l10n/site/ko/probability/examples/Bayesian_Gaussian_Mixture_Model.ipynb\"><img src=\"https://www.tensorflow.org/images/download_logo_32px.png\">노트북 다운로드하기</a></td>\n</table>\n\n이 colab에서는 TensorFlow Probability 기본 형식만 사용하여 베이지안 가우시안 혼합 모델(BGMM)의 사후 확률에서 샘플링을 탐색합니다.\n모델\n각 차원 $D$의 $k\\in{1,\\ldots,K}$ 혼합 구성 요소에 대해 다음의 베이지안 가우시안 혼합 모델로 $i\\in{1,\\ldots,N}$ iid 샘플을 모델링하려고 합니다.\n$$\\begin{align} \\theta &\\sim \\text{Dirichlet}(\\text{concentration}=\\alpha_0)\\ \\mu_k &\\sim \\text{Normal}(\\text{loc}=\\mu_{0k}, \\text{scale}=I_D)\\ T_k &\\sim \\text{Wishart}(\\text{df}=5, \\text{scale}=I_D)\\ Z_i &\\sim \\text{Categorical}(\\text{probs}=\\theta)\\ Y_i &\\sim \\text{Normal}(\\text{loc}=\\mu_{z_i}, \\text{scale}=T_{z_i}^{-1/2})\\ \\end{align}$$\nscale 인수는 모두 cholesky 의미 체계를 가지고 있습니다. 이 규칙은 TF 분포의 규칙이기 때문에 사용됩니다(계산적으로 유리하므로 TF 분포에서 부분적으로 이 규칙을 사용합니다).\n목표는 다음의 사후 확률에서 샘플을 생성하는 것입니다.\n$$p\\left(\\theta, {\\mu_k, T_k}{k=1}^K \\Big| {y_i}{i=1}^N, \\alpha_0, {\\mu_{ok}}_{k=1}^K\\right)$$\n${Z_i}_{i=1}^N$는 존재하지 않는다는 점에 유의하세요. $N$로 조정되지 않는 확률 변수에만 관심을 둡니다(또한, 운 좋게도 $Z_i$를 무시하는 TF 분포가 있습니다).\n계산적으로 다루기 힘든 정규화 항으로 인해 이 분포에서 직접 샘플링하는 것은 불가능합니다.\n메트로폴리스-헤이스팅스 알고리즘은 다루기 힘든 정규화된 분포에서 샘플링하는 기술입니다.\nTensorFlow Probability는 메트로폴리스-헤이스팅스 기반의 여러 옵션을 포함하여 많은 MCMC 옵션을 제공합니다. 이 노트북에서는 해밀턴 몬테카를로(tfp.mcmc.HamiltonianMonteCarlo)를 사용합니다. 해밀턴 몬테카를로(HMC)는 신속하게 수렴하고 (좌표 방식이 아닌) 상태 공간을 공동으로 샘플링하며, TF의 장점 중 하나인 자동 미분을 활용하므로 종종 좋은 선택입니다. 즉, BGMM 사후 확률에서의 샘플링은 실제로 Gibb의 샘플링과 같은 다른 접근 방식을 사용하면 더 잘 수행될 수 있습니다.", "%matplotlib inline\n\n\nimport functools\n\nimport matplotlib.pyplot as plt; plt.style.use('ggplot')\nimport numpy as np\nimport seaborn as sns; sns.set_context('notebook')\n\nimport tensorflow.compat.v2 as tf\ntf.enable_v2_behavior()\nimport tensorflow_probability as tfp\n\ntfd = tfp.distributions\ntfb = tfp.bijectors\n\nphysical_devices = tf.config.experimental.list_physical_devices('GPU')\nif len(physical_devices) &gt; 0:\n tf.config.experimental.set_memory_growth(physical_devices[0], True)", "실제로 모델을 빌드하기 전에 새로운 유형의 분포를 정의해야 합니다. 위의 모델 사양에서 역공분산 행렬, 즉 정밀 행렬로 MVN을 매개변수화하고 있음이 분명합니다. 이를 TF에서 달성하려면, Bijector를 롤 아웃해야 합니다. 이 Bijector는 순방향 변환을 사용합니다.\n\nY = tf.linalg.triangular_solve((tf.linalg.matrix_transpose(chol_precision_tril), X, adjoint=True) + loc.\n\n그리고 log_prob 계산은 그 반대입니다. 즉, 이 계산은 다음과 같습니다.\n\nX = tf.linalg.matmul(chol_precision_tril, X - loc, adjoint_a=True).\n\nHMC에 필요한 것은 log_prob 뿐이므로, (tfd.MultivariateNormalTriL의 경우처럼) tf.linalg.triangular_solve를 호출하지 않습니다. 이는 tf.linalg.matmul이 일반적으로 더 나은 캐시 위치로 인해 더 빠르기 때문에 유리합니다.", "class MVNCholPrecisionTriL(tfd.TransformedDistribution):\n \"\"\"MVN from loc and (Cholesky) precision matrix.\"\"\"\n\n def __init__(self, loc, chol_precision_tril, name=None):\n super(MVNCholPrecisionTriL, self).__init__(\n distribution=tfd.Independent(tfd.Normal(tf.zeros_like(loc),\n scale=tf.ones_like(loc)),\n reinterpreted_batch_ndims=1),\n bijector=tfb.Chain([\n tfb.Affine(shift=loc),\n tfb.Invert(tfb.Affine(scale_tril=chol_precision_tril,\n adjoint=True)),\n ]),\n name=name)", "tfd.Independent 분포는 한 분포의 독립적인 그리기를 통계적으로 독립된 좌표가 있는 다변량 분포로 바꿉니다. log_prob 계산 측면에서, 이 '메타 분포'는 이벤트 차원에 대한 단순 합계로 나타납니다.\n또한 scale 행렬의 adjoint ('transpose')를 사용했습니다. 그 이유는 정밀도가 역공분산이면, 즉 $P=C^{-1}$이고 $C=AA^\\top$이면, $P=BB^{\\top}$이고 여기서 $B=A^{-\\top}$입니다.\n이 분포는 다소 까다로우므로 MVNCholPrecisionTriL이 예상되는 대로 동작하는지 빠르게 확인하겠습니다.", "def compute_sample_stats(d, seed=42, n=int(1e6)):\n x = d.sample(n, seed=seed)\n sample_mean = tf.reduce_mean(x, axis=0, keepdims=True)\n s = x - sample_mean\n sample_cov = tf.linalg.matmul(s, s, adjoint_a=True) / tf.cast(n, s.dtype)\n sample_scale = tf.linalg.cholesky(sample_cov)\n sample_mean = sample_mean[0]\n return [\n sample_mean,\n sample_cov,\n sample_scale,\n ]\n\ndtype = np.float32\ntrue_loc = np.array([1., -1.], dtype=dtype)\ntrue_chol_precision = np.array([[1., 0.],\n [2., 8.]],\n dtype=dtype)\ntrue_precision = np.matmul(true_chol_precision, true_chol_precision.T)\ntrue_cov = np.linalg.inv(true_precision)\n\nd = MVNCholPrecisionTriL(\n loc=true_loc,\n chol_precision_tril=true_chol_precision)\n\n[sample_mean, sample_cov, sample_scale] = [\n t.numpy() for t in compute_sample_stats(d)]\n\nprint('true mean:', true_loc)\nprint('sample mean:', sample_mean)\nprint('true cov:\\n', true_cov)\nprint('sample cov:\\n', sample_cov)", "샘플 평균과 공분산이 실제 평균과 공분산에 가까우므로 분포가 올바르게 구현된 것처럼 보입니다. 이제 MVNCholPrecisionTriL tfp.distributions.JointDistributionNamed로 BGMM 모델을 지정합니다. 관찰 모델의 경우, tfd.MixtureSameFamily를 사용하여 ${Z_i}_{i=1}^N$ 그리기를 자동으로 통합합니다.", "dtype = np.float64\ndims = 2\ncomponents = 3\nnum_samples = 1000\n\nbgmm = tfd.JointDistributionNamed(dict(\n mix_probs=tfd.Dirichlet(\n concentration=np.ones(components, dtype) / 10.),\n loc=tfd.Independent(\n tfd.Normal(\n loc=np.stack([\n -np.ones(dims, dtype),\n np.zeros(dims, dtype),\n np.ones(dims, dtype),\n ]),\n scale=tf.ones([components, dims], dtype)),\n reinterpreted_batch_ndims=2),\n precision=tfd.Independent(\n tfd.WishartTriL(\n df=5,\n scale_tril=np.stack([np.eye(dims, dtype=dtype)]*components),\n input_output_cholesky=True),\n reinterpreted_batch_ndims=1),\n s=lambda mix_probs, loc, precision: tfd.Sample(tfd.MixtureSameFamily(\n mixture_distribution=tfd.Categorical(probs=mix_probs),\n components_distribution=MVNCholPrecisionTriL(\n loc=loc,\n chol_precision_tril=precision)),\n sample_shape=num_samples)\n))\n\ndef joint_log_prob(observations, mix_probs, loc, chol_precision):\n \"\"\"BGMM with priors: loc=Normal, precision=Inverse-Wishart, mix=Dirichlet.\n\n Args:\n observations: `[n, d]`-shaped `Tensor` representing Bayesian Gaussian\n Mixture model draws. Each sample is a length-`d` vector.\n mix_probs: `[K]`-shaped `Tensor` representing random draw from\n `Dirichlet` prior.\n loc: `[K, d]`-shaped `Tensor` representing the location parameter of the\n `K` components.\n chol_precision: `[K, d, d]`-shaped `Tensor` representing `K` lower\n triangular `cholesky(Precision)` matrices, each being sampled from\n a Wishart distribution.\n\n Returns:\n log_prob: `Tensor` representing joint log-density over all inputs.\n \"\"\"\n return bgmm.log_prob(\n mix_probs=mix_probs, loc=loc, precision=chol_precision, s=observations)", "'훈련' 데이터를 생성합니다.\n다음 데모에서는 무작위의 데이터를 샘플링합니다.", "true_loc = np.array([[-2., -2],\n [0, 0],\n [2, 2]], dtype)\nrandom = np.random.RandomState(seed=43)\n\ntrue_hidden_component = random.randint(0, components, num_samples)\nobservations = (true_loc[true_hidden_component] +\n random.randn(num_samples, dims).astype(dtype))", "HMC를 사용한 베이지안 추론\n이제 TFD를 사용하여 모델을 지정하고 일부 관찰 데이터를 얻었으므로 HMC를 실행하는 데 필요한 모든 부분을 확보했습니다.\nHMC를 실행하려면 부분 적용을 사용하여 샘플링하고 싶지 않은 항목을 '고정'합니다. 이 경우에는 observations만 고정하면 됩니다(하이퍼 매개변수는 이미 사전 확률 분포에 적용되었으며 joint_log_prob 함수 서명의 일부가 아닙니다).", "unnormalized_posterior_log_prob = functools.partial(joint_log_prob, observations)\n\ninitial_state = [\n tf.fill([components],\n value=np.array(1. / components, dtype),\n name='mix_probs'),\n tf.constant(np.array([[-2., -2],\n [0, 0],\n [2, 2]], dtype),\n name='loc'),\n tf.linalg.eye(dims, batch_shape=[components], dtype=dtype, name='chol_precision'),\n]", "제약 조건이 없는 표현\n해밀턴 몬테카를로(HMC)는 인수와 관련하여 대상 로그 확률 함수를 미분할 수 있어야 합니다. 또한 HMC는 상태 공간에 제약 조건이 없는 경우 훨씬 더 높은 통계 효율성을 나타낼 수 있습니다.\n즉, BGMM 사후 확률에서 샘플링할 때 두 가지 주요 문제를 해결해야 합니다.\n\n$\\theta$는 이산 확률 벡터를 나타냅니다. 즉, $\\sum_{k=1}^K \\theta_k = 1$ 및 $\\theta_k>0$와 같아야 합니다.\n$T_k$는 역공분산 행렬을 나타냅니다. 즉, $T_k \\succ 0$가 되어야 합니다. 이는 양정치가 됩니다.\n\n위의 요구 사항을 해결하려면 다음을 수행해야 합니다.\n\n제약 조건이 있는 변수를 제약 조건이 없는 공간으로 변환합니다.\n제약 조건이 없는 공간에서 MCMC를 실행합니다.\n제약 조건이 없는 변수를 제약 조건이 있는 공간으로 다시 변환합니다.\n\nMVNCholPrecisionTriL과 마찬가지로, 제약 조건이 없는 공간으로 확률 변수를 변환하려면 Bijector를 사용합니다.\n\n\nDirichlet은 softmax 함수를 통해 제약 조건이 없는 공간으로 변환됩니다.\n\n\n정밀도 확률 변수는 준 양정치 행렬에 대한 분포입니다. 이들에 대한 제약 조건을 없애기 위해서는 FillTriangular 및 TransformDiagonal bijector를 사용합니다. 이들 bijector는 벡터를 하부 삼각 행렬로 변환하고 대각선이 양수인지 확인합니다. 전자는 $d^2$ 대신 $d(d+1)/2$ float만 샘플링할 수 있으므로 유용합니다.", "unconstraining_bijectors = [\n tfb.SoftmaxCentered(),\n tfb.Identity(),\n tfb.Chain([\n tfb.TransformDiagonal(tfb.Softplus()),\n tfb.FillTriangular(),\n ])]\n\n@tf.function(autograph=False)\ndef sample():\n return tfp.mcmc.sample_chain(\n num_results=2000,\n num_burnin_steps=500,\n current_state=initial_state,\n kernel=tfp.mcmc.SimpleStepSizeAdaptation(\n tfp.mcmc.TransformedTransitionKernel(\n inner_kernel=tfp.mcmc.HamiltonianMonteCarlo(\n target_log_prob_fn=unnormalized_posterior_log_prob,\n step_size=0.065,\n num_leapfrog_steps=5),\n bijector=unconstraining_bijectors),\n num_adaptation_steps=400),\n trace_fn=lambda _, pkr: pkr.inner_results.inner_results.is_accepted)\n\n[mix_probs, loc, chol_precision], is_accepted = sample()", "이제 chain을 실행하고 사후 확률 분포의 평균을 출력합니다.", "acceptance_rate = tf.reduce_mean(tf.cast(is_accepted, dtype=tf.float32)).numpy()\nmean_mix_probs = tf.reduce_mean(mix_probs, axis=0).numpy()\nmean_loc = tf.reduce_mean(loc, axis=0).numpy()\nmean_chol_precision = tf.reduce_mean(chol_precision, axis=0).numpy()\nprecision = tf.linalg.matmul(chol_precision, chol_precision, transpose_b=True)\n\n\nprint('acceptance_rate:', acceptance_rate)\nprint('avg mix probs:', mean_mix_probs)\nprint('avg loc:\\n', mean_loc)\nprint('avg chol(precision):\\n', mean_chol_precision)\n\nloc_ = loc.numpy()\nax = sns.kdeplot(loc_[:,0,0], loc_[:,0,1], shade=True, shade_lowest=False)\nax = sns.kdeplot(loc_[:,1,0], loc_[:,1,1], shade=True, shade_lowest=False)\nax = sns.kdeplot(loc_[:,2,0], loc_[:,2,1], shade=True, shade_lowest=False)\nplt.title('KDE of loc draws');", "결론\n이 간단한 colab에서는 TensorFlow Probability 기본 형식을 사용하여 계층적 베이지안 혼합 모델을 빌드하는 방법을 보았습니다." ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
JavierVLAB/DataAnalysisScience
AutoMPG/AutoMPG.ipynb
gpl-3.0
[ "<h1>Exploration of Auto MPG</h1>", "import math\nimport numpy\nimport pandas as pd\nimport matplotlib.pyplot as plt\n%matplotlib inline\n\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.metrics import mean_squared_error\n\n\ncars_names = ['mpg','cylinders','displacement','horsepower',\n 'weight','acceleration','model year','origin','car name']\n\ncars = pd.read_table(\"auto-mpg.data.txt\", delim_whitespace=True, names = cars_names)\n\n#Size of the Dataset\ncars.shape\n\ncars.columns\n\ncars.head(5)\n\ncars.info()\n\n#horsepower have to be numeric\n#horsepower have 6 rows with the value '?'\n#drop this rows and \n\ncars = cars[cars['horsepower'] != '?' ]\ncars['horsepower'] = cars['horsepower'].astype('float32')\n\n\nimport seaborn as sns\n\ncars = cars[['mpg', 'cylinders', 'displacement', 'horsepower', 'weight',\n 'acceleration', 'model year', 'origin']]\n\n#for i in c:\n# cars[i].hist()\n# plt.xlabel(i)\n# plt.show()\n\nsns.pairplot(cars)", "<h2>Linear Regression</h2>", "c = ['mpg', 'cylinders', 'displacement', 'horsepower', 'weight',\n 'acceleration', 'model year', 'origin']\nlimit = int(3*cars.shape[0]/4)\nX_train = cars.iloc[:limit,1:]\ny_train = cars.iloc[:limit,0]\nX_test = cars.iloc[limit:,1:]\ny_test = cars.iloc[limit:,0]\n\n\nlr = LinearRegression()\n\n#Remember the [[]], is the data in 1 column\nlr.fit(X_train, y_train)\npredictions = lr.predict(X_test)\n\n#mean_squared_error\nmse = mean_squared_error(y_test, predictions)\n\n# The coefficients\nprint('Coefficients: ', lr.coef_)\n# The mean square error\nprint(\"Residual sum of squares: %.2f\" % mse)\n# Explained variance score: 1 is perfect prediction\nprint('Variance score: %.2f' % lr.score(X_test, y_test))\n\nplt.scatter(X_test[\"weight\"], y_test, c='b')\nplt.scatter(X_test[\"weight\"], prediction, c='r')\n\n\nplt.show()", "<p>The variance score is very low due to we use all the features.</p>\n<p>For a better solution we aplied a feature selection. </p>\n<p>From the pair graph, we can see that the features 'cylinders', 'model year' and 'origin' dont'show a clear correlation with the 'mpg' variable, so those are rejected.</p>", "c = ['mpg','horsepower', \"weight\",'acceleration']\nlimit = int(3*cars.shape[0]/4)\nX_train = cars[c].iloc[:limit,1:]\ny_train = cars.iloc[:limit,0]\nX_test = cars[c].iloc[limit:,1:]\ny_test = cars.iloc[limit:,0]\n\nlr2 = LinearRegression()\n\n#Remember the [[]], is the data in 1 column\nlr2.fit(X_train, y_train)\npredictions = lr2.predict(X_test)\n\n#mean_squared_error\nmse = mean_squared_error(y_test, predictions)\n\n# The coefficients\nprint('Coefficients: ', lr2.coef_)\n# The mean square error\nprint(\"Residual sum of squares: %.2f\" % mse)\n# Explained variance score: 1 is perfect prediction\nprint('Variance score: %.2f' % lr2.score(X_test, y_test))\n\nfeature = \"weight\"\n\nplt.scatter(X_test[feature], y_test, c='b')\nplt.scatter(X_test[feature], prediction, c='r')\n\nplt.ylim([10,45])\nplt.xlim([1500,4000])\nplt.ylabel('MPG')\nplt.xlabel(feature)\nplt.show()" ]
[ "markdown", "code", "markdown", "code", "markdown", "code" ]
Merinorus/adaisawesome
Homework/01 - Pandas and Data Wrangling/temp/Data Wrangling with Pandas.ipynb
gpl-3.0
[ "Table of Contents\n<p><div class=\"lev1\"><a href=\"#Data-Wrangling-with-Pandas\"><span class=\"toc-item-num\">1&nbsp;&nbsp;</span>Data Wrangling with Pandas</a></div><div class=\"lev2\"><a href=\"#Date/Time-data-handling\"><span class=\"toc-item-num\">1.1&nbsp;&nbsp;</span>Date/Time data handling</a></div><div class=\"lev2\"><a href=\"#Merging-and-joining-DataFrame-objects\"><span class=\"toc-item-num\">1.2&nbsp;&nbsp;</span>Merging and joining DataFrame objects</a></div><div class=\"lev2\"><a href=\"#Concatenation\"><span class=\"toc-item-num\">1.3&nbsp;&nbsp;</span>Concatenation</a></div><div class=\"lev2\"><a href=\"#Exercise-1\"><span class=\"toc-item-num\">1.4&nbsp;&nbsp;</span>Exercise 1</a></div><div class=\"lev2\"><a href=\"#Reshaping-DataFrame-objects\"><span class=\"toc-item-num\">1.5&nbsp;&nbsp;</span>Reshaping DataFrame objects</a></div><div class=\"lev2\"><a href=\"#Pivoting\"><span class=\"toc-item-num\">1.6&nbsp;&nbsp;</span>Pivoting</a></div><div class=\"lev2\"><a href=\"#Data-transformation\"><span class=\"toc-item-num\">1.7&nbsp;&nbsp;</span>Data transformation</a></div><div class=\"lev3\"><a href=\"#Dealing-with-duplicates\"><span class=\"toc-item-num\">1.7.1&nbsp;&nbsp;</span>Dealing with duplicates</a></div><div class=\"lev3\"><a href=\"#Value-replacement\"><span class=\"toc-item-num\">1.7.2&nbsp;&nbsp;</span>Value replacement</a></div><div class=\"lev3\"><a href=\"#Inidcator-variables\"><span class=\"toc-item-num\">1.7.3&nbsp;&nbsp;</span>Inidcator variables</a></div><div class=\"lev2\"><a href=\"#Categorical-Data\"><span class=\"toc-item-num\">1.8&nbsp;&nbsp;</span>Categorical Data</a></div><div class=\"lev3\"><a href=\"#Discretization\"><span class=\"toc-item-num\">1.8.1&nbsp;&nbsp;</span>Discretization</a></div><div class=\"lev3\"><a href=\"#Permutation-and-sampling\"><span class=\"toc-item-num\">1.8.2&nbsp;&nbsp;</span>Permutation and sampling</a></div><div class=\"lev2\"><a href=\"#Data-aggregation-and-GroupBy-operations\"><span class=\"toc-item-num\">1.9&nbsp;&nbsp;</span>Data aggregation and GroupBy operations</a></div><div class=\"lev3\"><a href=\"#Apply\"><span class=\"toc-item-num\">1.9.1&nbsp;&nbsp;</span>Apply</a></div><div class=\"lev2\"><a href=\"#Exercise-2\"><span class=\"toc-item-num\">1.10&nbsp;&nbsp;</span>Exercise 2</a></div><div class=\"lev2\"><a href=\"#References\"><span class=\"toc-item-num\">1.11&nbsp;&nbsp;</span>References</a></div>\n\n# Data Wrangling with Pandas\n\nNow that we have been exposed to the basic functionality of Pandas, lets explore some more advanced features that will be useful when addressing more complex data management tasks.\n\nAs most statisticians/data analysts will admit, often the lion's share of the time spent implementing an analysis is devoted to preparing the data itself, rather than to coding or running a particular model that uses the data. This is where Pandas and Python's standard library are beneficial, providing high-level, flexible, and efficient tools for manipulating your data as needed.", "%matplotlib inline\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nsns.set_context('notebook')", "Date/Time data handling\nDate and time data are inherently problematic. There are an unequal number of days in every month, an unequal number of days in a year (due to leap years), and time zones that vary over space. Yet information about time is essential in many analyses, particularly in the case of time series analysis.\nThe datetime built-in library handles temporal information down to the nanosecond.", "from datetime import datetime\n\nnow = datetime.now()\nnow\n\nnow.day\n\nnow.weekday()", "In addition to datetime there are simpler objects for date and time information only, respectively.", "from datetime import date, time\n\ntime(3, 24)\n\ndate(1970, 9, 3)", "Having a custom data type for dates and times is convenient because we can perform operations on them easily. For example, we may want to calculate the difference between two times:", "my_age = now - datetime(1970, 1, 1)\nmy_age\n\nprint(type(my_age))\nmy_age.days/365", "In this section, we will manipulate data collected from ocean-going vessels on the eastern seaboard. Vessel operations are monitored using the Automatic Identification System (AIS), a safety at sea navigation technology which vessels are required to maintain and that uses transponders to transmit very high frequency (VHF) radio signals containing static information including ship name, call sign, and country of origin, as well as dynamic information unique to a particular voyage such as vessel location, heading, and speed. \nThe International Maritime Organization’s (IMO) International Convention for the Safety of Life at Sea requires functioning AIS capabilities on all vessels 300 gross tons or greater and the US Coast Guard requires AIS on nearly all vessels sailing in U.S. waters. The Coast Guard has established a national network of AIS receivers that provides coverage of nearly all U.S. waters. AIS signals are transmitted several times each minute and the network is capable of handling thousands of reports per minute and updates as often as every two seconds. Therefore, a typical voyage in our study might include the transmission of hundreds or thousands of AIS encoded signals. This provides a rich source of spatial data that includes both spatial and temporal information.\nFor our purposes, we will use summarized data that describes the transit of a given vessel through a particular administrative area. The data includes the start and end time of the transit segment, as well as information about the speed of the vessel, how far it travelled, etc.", "segments = pd.read_csv(\"Data/AIS/transit_segments.csv\")\nsegments.head()", "For example, we might be interested in the distribution of transit lengths, so we can plot them as a histogram:", "segments.seg_length.hist(bins=500)", "Though most of the transits appear to be short, there are a few longer distances that make the plot difficult to read. This is where a transformation is useful:", "segments.seg_length.apply(np.log).hist(bins=500)", "We can see that although there are date/time fields in the dataset, they are not in any specialized format, such as datetime.", "segments.st_time.dtype", "Our first order of business will be to convert these data to datetime. The strptime method parses a string representation of a date and/or time field, according to the expected format of this information.", "datetime.strptime(segments.st_time.ix[0], '%m/%d/%y %H:%M')", "The dateutil package includes a parser that attempts to detect the format of the date strings, and convert them automatically.", "from dateutil.parser import parse\n\nparse(segments.st_time.ix[0])", "We can convert all the dates in a particular column by using the apply method.", "segments.st_time.apply(lambda d: datetime.strptime(d, '%m/%d/%y %H:%M'))", "As a convenience, Pandas has a to_datetime method that will parse and convert an entire Series of formatted strings into datetime objects.", "pd.to_datetime(segments.st_time[:10])", "Pandas also has a custom NA value for missing datetime objects, NaT.", "pd.to_datetime([None])", "Also, if to_datetime() has problems parsing any particular date/time format, you can pass the spec in using the format= argument.\nThe read_* functions now have an optional parse_dates argument that try to convert any columns passed to it into datetime format upon import:", "segments = pd.read_csv(\"Data/AIS/transit_segments.csv\", parse_dates=['st_time', 'end_time'])\n\nsegments.dtypes", "Columns of the datetime type have an accessor to easily extract properties of the data type. This will return a Series, with the same row index as the DataFrame. For example:", "segments.st_time.dt.month.head()\n\nsegments.st_time.dt.hour.head()", "This can be used to easily filter rows by particular temporal attributes:", "segments[segments.st_time.dt.month==2].head()", "In addition, time zone information can be applied:", "segments.st_time.dt.tz_localize('UTC').head()\n\nsegments.st_time.dt.tz_localize('UTC').dt.tz_convert('US/Eastern').head()", "Merging and joining DataFrame objects\nNow that we have the vessel transit information as we need it, we may want a little more information regarding the vessels themselves. In the data/AIS folder there is a second table that contains information about each of the ships that traveled the segments in the segments table.", "vessels = pd.read_csv(\"Data/AIS/vessel_information.csv\", index_col='mmsi')\nvessels.head()\n\n[v for v in vessels.type.unique() if v.find('/')==-1]\n\nvessels.type.value_counts()", "The challenge, however, is that several ships have travelled multiple segments, so there is not a one-to-one relationship between the rows of the two tables. The table of vessel information has a one-to-many relationship with the segments.\nIn Pandas, we can combine tables according to the value of one or more keys that are used to identify rows, much like an index. Using a trivial example:", "df1 = pd.DataFrame(dict(id=range(4), age=np.random.randint(18, 31, size=4)))\ndf2 = pd.DataFrame(dict(id=list(range(3))+list(range(3)), \n score=np.random.random(size=6)))\n\ndf1\n\ndf2\n\npd.merge(df1, df2)", "Notice that without any information about which column to use as a key, Pandas did the right thing and used the id column in both tables. Unless specified otherwise, merge will used any common column names as keys for merging the tables. \nNotice also that id=3 from df1 was omitted from the merged table. This is because, by default, merge performs an inner join on the tables, meaning that the merged table represents an intersection of the two tables.", "pd.merge(df1, df2, how='outer')", "The outer join above yields the union of the two tables, so all rows are represented, with missing values inserted as appropriate. One can also perform right and left joins to include all rows of the right or left table (i.e. first or second argument to merge), but not necessarily the other.\nLooking at the two datasets that we wish to merge:", "segments.head(1)\n\nvessels.head(1)", "we see that there is a mmsi value (a vessel identifier) in each table, but it is used as an index for the vessels table. In this case, we have to specify to join on the index for this table, and on the mmsi column for the other.", "segments_merged = pd.merge(vessels, segments, left_index=True, right_on='mmsi')\n\nsegments_merged.head()", "In this case, the default inner join is suitable; we are not interested in observations from either table that do not have corresponding entries in the other. \nNotice that mmsi field that was an index on the vessels table is no longer an index on the merged table.\nHere, we used the merge function to perform the merge; we could also have used the merge method for either of the tables:", "vessels.merge(segments, left_index=True, right_on='mmsi').head()", "Occasionally, there will be fields with the same in both tables that we do not wish to use to join the tables; they may contain different information, despite having the same name. In this case, Pandas will by default append suffixes _x and _y to the columns to uniquely identify them.", "segments['type'] = 'foo'\npd.merge(vessels, segments, left_index=True, right_on='mmsi').head()", "This behavior can be overridden by specifying a suffixes argument, containing a list of the suffixes to be used for the columns of the left and right columns, respectively.\nConcatenation\nA common data manipulation is appending rows or columns to a dataset that already conform to the dimensions of the exsiting rows or colums, respectively. In NumPy, this is done either with concatenate or the convenience \"functions\" c_ and r_:", "np.concatenate([np.random.random(5), np.random.random(5)])\n\nnp.r_[np.random.random(5), np.random.random(5)]\n\nnp.c_[np.random.random(5), np.random.random(5)]", "Notice that c_ and r_ are not really functions at all, since it is performing some sort of indexing operation, rather than being called. They are actually class instances, but they are here behaving mostly like functions. Don't think about this too hard; just know that they are there.\n\nThis operation is also called binding or stacking.\nWith Pandas' indexed data structures, there are additional considerations as the overlap in index values between two data structures affects how they are concatenate.\nLets import two microbiome datasets, each consisting of counts of microorganiams from a particular patient. We will use the first column of each dataset as the index.", "mb1 = pd.read_excel('Data/microbiome/MID1.xls', 'Sheet 1', index_col=0, header=None)\nmb2 = pd.read_excel('Data/microbiome/MID2.xls', 'Sheet 1', index_col=0, header=None)\nmb1.shape, mb2.shape\n\nmb1.head()", "Let's give the index and columns meaningful labels:", "mb1.columns = mb2.columns = ['Count']\n\nmb1.index.name = mb2.index.name = 'Taxon'\n\nmb1.head()", "The index of these data is the unique biological classification of each organism, beginning with domain, phylum, class, and for some organisms, going all the way down to the genus level.", "mb1.index[:3]\n\nmb1.index.is_unique", "If we concatenate along axis=0 (the default), we will obtain another data frame with the the rows concatenated:", "pd.concat([mb1, mb2], axis=0).shape", "However, the index is no longer unique, due to overlap between the two DataFrames.", "pd.concat([mb1, mb2], axis=0).index.is_unique", "Concatenating along axis=1 will concatenate column-wise, but respecting the indices of the two DataFrames.", "pd.concat([mb1, mb2], axis=1).shape\n\npd.concat([mb1, mb2], axis=1).head()", "If we are only interested in taxa that are included in both DataFrames, we can specify a join=inner argument.", "pd.concat([mb1, mb2], axis=1, join='inner').head()", "If we wanted to use the second table to fill values absent from the first table, we could use combine_first.", "mb1.combine_first(mb2).head()", "We can also create a hierarchical index based on keys identifying the original tables.", "pd.concat([mb1, mb2], keys=['patient1', 'patient2']).head()\n\npd.concat([mb1, mb2], keys=['patient1', 'patient2']).index.is_unique", "Alternatively, you can pass keys to the concatenation by supplying the DataFrames (or Series) as a dict, resulting in a \"wide\" format table.", "pd.concat(dict(patient1=mb1, patient2=mb2), axis=1).head()", "If you want concat to work like numpy.concatanate, you may provide the ignore_index=True argument.\nExercise 1\nIn the data/microbiome subdirectory, there are 9 spreadsheets of microbiome data that was acquired from high-throughput RNA sequencing procedures, along with a 10th file that describes the content of each. Write code that imports each of the data spreadsheets and combines them into a single DataFrame, adding the identifying information from the metadata spreadsheet as columns in the combined DataFrame.", "# Write solution here", "Reshaping DataFrame objects\nIn the context of a single DataFrame, we are often interested in re-arranging the layout of our data. \nThis dataset is from Table 6.9 of Statistical Methods for the Analysis of Repeated Measurements by Charles S. Davis, pp. 161-163 (Springer, 2002). These data are from a multicenter, randomized controlled trial of botulinum toxin type B (BotB) in patients with cervical dystonia from nine U.S. sites.\n\nRandomized to placebo (N=36), 5000 units of BotB (N=36), 10,000 units of BotB (N=37)\nResponse variable: total score on Toronto Western Spasmodic Torticollis Rating Scale (TWSTRS), measuring severity, pain, and disability of cervical dystonia (high scores mean more impairment)\nTWSTRS measured at baseline (week 0) and weeks 2, 4, 8, 12, 16 after treatment began", "cdystonia = pd.read_csv(\"Data/cdystonia.csv\", index_col=None)\ncdystonia.head()", "This dataset includes repeated measurements of the same individuals (longitudinal data). Its possible to present such information in (at least) two ways: showing each repeated measurement in their own row, or in multiple columns representing multiple measurements.\nThe stack method rotates the data frame so that columns are represented in rows:", "stacked = cdystonia.stack()\nstacked", "To complement this, unstack pivots from rows back to columns.", "stacked.unstack().head()", "For this dataset, it makes sense to create a hierarchical index based on the patient and observation:", "cdystonia2 = cdystonia.set_index(['patient','obs'])\ncdystonia2.head()\n\ncdystonia2.index.is_unique", "If we want to transform this data so that repeated measurements are in columns, we can unstack the twstrs measurements according to obs.", "twstrs_wide = cdystonia2['twstrs'].unstack('obs')\ntwstrs_wide.head()\n\ncdystonia_wide = (cdystonia[['patient','site','id','treat','age','sex']]\n .drop_duplicates()\n .merge(twstrs_wide, right_index=True, left_on='patient', how='inner')\n .head())\ncdystonia_wide", "A slightly cleaner way of doing this is to set the patient-level information as an index before unstacking:", "(cdystonia.set_index(['patient','site','id','treat','age','sex','week'])['twstrs']\n .unstack('week').head())", "To convert our \"wide\" format back to long, we can use the melt function, appropriately parameterized. This function is useful for DataFrames where one\nor more columns are identifier variables (id_vars), with the remaining columns being measured variables (value_vars). The measured variables are \"unpivoted\" to\nthe row axis, leaving just two non-identifier columns, a variable and its corresponding value, which can both be renamed using optional arguments.", "pd.melt(cdystonia_wide, id_vars=['patient','site','id','treat','age','sex'], \n var_name='obs', value_name='twsters').head()", "This illustrates the two formats for longitudinal data: long and wide formats. Its typically better to store data in long format because additional data can be included as additional rows in the database, while wide format requires that the entire database schema be altered by adding columns to every row as data are collected.\nThe preferable format for analysis depends entirely on what is planned for the data, so it is imporant to be able to move easily between them.\nPivoting\nThe pivot method allows a DataFrame to be transformed easily between long and wide formats in the same way as a pivot table is created in a spreadsheet. It takes three arguments: index, columns and values, corresponding to the DataFrame index (the row headers), columns and cell values, respectively.\nFor example, we may want the twstrs variable (the response variable) in wide format according to patient, as we saw with the unstacking method above:", "cdystonia.pivot(index='patient', columns='obs', values='twstrs').head()", "If we omit the values argument, we get a DataFrame with hierarchical columns, just as when we applied unstack to the hierarchically-indexed table:", "cdystonia.pivot('patient', 'obs')", "A related method, pivot_table, creates a spreadsheet-like table with a hierarchical index, and allows the values of the table to be populated using an arbitrary aggregation function.", "cdystonia.pivot_table(index=['site', 'treat'], columns='week', values='twstrs', \n aggfunc=max).head(20)", "For a simple cross-tabulation of group frequencies, the crosstab function (not a method) aggregates counts of data according to factors in rows and columns. The factors may be hierarchical if desired.", "pd.crosstab(cdystonia.sex, cdystonia.site)", "Data transformation\nThere are a slew of additional operations for DataFrames that we would collectively refer to as \"transformations\" which include tasks such as removing duplicate values, replacing values, and grouping values.\nDealing with duplicates\nWe can easily identify and remove duplicate values from DataFrame objects. For example, say we want to removed ships from our vessels dataset that have the same name:", "vessels.duplicated(subset='names')\n\nvessels.drop_duplicates(['names'])", "Value replacement\nFrequently, we get data columns that are encoded as strings that we wish to represent numerically for the purposes of including it in a quantitative analysis. For example, consider the treatment variable in the cervical dystonia dataset:", "cdystonia.treat.value_counts()", "A logical way to specify these numerically is to change them to integer values, perhaps using \"Placebo\" as a baseline value. If we create a dict with the original values as keys and the replacements as values, we can pass it to the map method to implement the changes.", "treatment_map = {'Placebo': 0, '5000U': 1, '10000U': 2}\n\ncdystonia['treatment'] = cdystonia.treat.map(treatment_map)\ncdystonia.treatment", "Alternately, if we simply want to replace particular values in a Series or DataFrame, we can use the replace method. \nAn example where replacement is useful is dealing with zeros in certain transformations. For example, if we try to take the log of a set of values:", "vals = pd.Series([float(i)**10 for i in range(10)])\nvals\n\nnp.log(vals)", "In such situations, we can replace the zero with a value so small that it makes no difference to the ensuing analysis. We can do this with replace.", "vals = vals.replace(0, 1e-6)\nnp.log(vals)", "We can also perform the same replacement that we used map for with replace:", "cdystonia2.treat.replace({'Placebo': 0, '5000U': 1, '10000U': 2})", "Inidcator variables\nFor some statistical analyses (e.g. regression models or analyses of variance), categorical or group variables need to be converted into columns of indicators--zeros and ones--to create a so-called design matrix. The Pandas function get_dummies (indicator variables are also known as dummy variables) makes this transformation straightforward.\nLet's consider the DataFrame containing the ships corresponding to the transit segments on the eastern seaboard. The type variable denotes the class of vessel; we can create a matrix of indicators for this. For simplicity, lets filter out the 5 most common types of ships:", "top5 = vessels.type.isin(vessels.type.value_counts().index[:5])\ntop5.head(10)\n\nvessels5 = vessels[top5]\n\npd.get_dummies(vessels5.type).head(10)", "Categorical Data\nPandas provides a convenient dtype for reprsenting categorical (factor) data, called category. \nFor example, the treat column in the cervical dystonia dataset represents three treatment levels in a clinical trial, and is imported by default as an object type, since it is a mixture of string characters.", "cdystonia.treat.head()", "We can convert this to a category type either by the Categorical constructor, or casting the column using astype:", "pd.Categorical(cdystonia.treat)\n\ncdystonia['treat'] = cdystonia.treat.astype('category')\n\ncdystonia.treat.describe()", "By default the Categorical type represents an unordered categorical.", "cdystonia.treat.cat.categories", "However, an ordering can be imposed. The order is lexical by default, but will assume the order of the listed categories to be the desired order.", "cdystonia.treat.cat.categories = ['Placebo', '5000U', '10000U']\n\ncdystonia.treat.cat.as_ordered().head()", "The important difference between the category type and the object type is that category is represented by an underlying array of integers, which is then mapped to character labels.", "cdystonia.treat.cat.codes", "Notice that these are 8-bit integers, which are essentially single bytes of data, making memory usage lower.\nThere is also a performance benefit. Consider an operation such as calculating the total segment lengths for each ship in the segments table (this is also a preview of pandas' groupby operation!):", "%time segments.groupby(segments.name).seg_length.sum().sort_values(ascending=False, inplace=False).head()\n\nsegments['name'] = segments.name.astype('category')\n\n%time segments.groupby(segments.name).seg_length.sum().sort_values(ascending=False, inplace=False).head()", "Hence, we get a considerable speedup simply by using the appropriate dtype for our data.\nDiscretization\nPandas' cut function can be used to group continuous or countable data in to bins. Discretization is generally a very bad idea for statistical analysis, so use this function responsibly!\nLets say we want to bin the ages of the cervical dystonia patients into a smaller number of groups:", "cdystonia.age.describe()", "Let's transform these data into decades, beginnnig with individuals in their 20's and ending with those in their 80's:", "pd.cut(cdystonia.age, [20,30,40,50,60,70,80,90])[:30]", "The parentheses indicate an open interval, meaning that the interval includes values up to but not including the endpoint, whereas the square bracket is a closed interval, where the endpoint is included in the interval. We can switch the closure to the left side by setting the right flag to False:", "pd.cut(cdystonia.age, [20,30,40,50,60,70,80,90], right=False)[:30]", "Since the data are now ordinal, rather than numeric, we can give them labels:", "pd.cut(cdystonia.age, [20,40,60,80,90], labels=['young','middle-aged','old','really old'])[:30]", "A related function qcut uses empirical quantiles to divide the data. If, for example, we want the quartiles -- (0-25%], (25-50%], (50-70%], (75-100%] -- we can just specify 4 intervals, which will be equally-spaced by default:", "pd.qcut(cdystonia.age, 4)[:30]", "Alternatively, one can specify custom quantiles to act as cut points:", "quantiles = pd.qcut(segments.seg_length, [0, 0.01, 0.05, 0.95, 0.99, 1])\nquantiles[:30]", "Note that you can easily combine discretiztion with the generation of indicator variables shown above:", "pd.get_dummies(quantiles).head(10)", "Permutation and sampling\nFor some data analysis tasks, such as simulation, we need to be able to randomly reorder our data, or draw random values from it. Calling NumPy's permutation function with the length of the sequence you want to permute generates an array with a permuted sequence of integers, which can be used to re-order the sequence.", "new_order = np.random.permutation(len(segments))\nnew_order[:30]", "Using this sequence as an argument to the take method results in a reordered DataFrame:", "segments.take(new_order).head()", "Compare this ordering with the original:", "segments.head()", "For random sampling, DataFrame and Series objects have a sample method that can be used to draw samples, with or without replacement:", "vessels.sample(n=10)\n\nvessels.sample(n=10, replace=True)", "Data aggregation and GroupBy operations\nOne of the most powerful features of Pandas is its GroupBy functionality. On occasion we may want to perform operations on groups of observations within a dataset. For exmaple:\n\naggregation, such as computing the sum of mean of each group, which involves applying a function to each group and returning the aggregated results\nslicing the DataFrame into groups and then doing something with the resulting slices (e.g. plotting)\ngroup-wise transformation, such as standardization/normalization", "cdystonia_grouped = cdystonia.groupby(cdystonia.patient)", "This grouped dataset is hard to visualize", "cdystonia_grouped", "However, the grouping is only an intermediate step; for example, we may want to iterate over each of the patient groups:", "for patient, group in cdystonia_grouped:\n print('patient', patient)\n print('group', group)", "A common data analysis procedure is the split-apply-combine operation, which groups subsets of data together, applies a function to each of the groups, then recombines them into a new data table.\nFor example, we may want to aggregate our data with with some function.\n\n<div align=\"right\">*(figure taken from \"Python for Data Analysis\", p.251)*</div>\n\nWe can aggregate in Pandas using the aggregate (or agg, for short) method:", "cdystonia_grouped.agg(np.mean).head()", "Notice that the treat and sex variables are not included in the aggregation. Since it does not make sense to aggregate non-string variables, these columns are simply ignored by the method.\nSome aggregation functions are so common that Pandas has a convenience method for them, such as mean:", "cdystonia_grouped.mean().head()", "The add_prefix and add_suffix methods can be used to give the columns of the resulting table labels that reflect the transformation:", "cdystonia_grouped.mean().add_suffix('_mean').head()\n\n# The median of the `twstrs` variable\ncdystonia_grouped['twstrs'].quantile(0.5)", "If we wish, we can easily aggregate according to multiple keys:", "cdystonia.groupby(['week','site']).mean().head()", "Alternately, we can transform the data, using a function of our choice with the transform method:", "normalize = lambda x: (x - x.mean())/x.std()\n\ncdystonia_grouped.transform(normalize).head()", "It is easy to do column selection within groupby operations, if we are only interested split-apply-combine operations on a subset of columns:", "cdystonia_grouped['twstrs'].mean().head()\n\n# This gives the same result as a DataFrame\ncdystonia_grouped[['twstrs']].mean().head()", "If you simply want to divide your DataFrame into chunks for later use, its easy to convert them into a dict so that they can be easily indexed out as needed:", "chunks = dict(list(cdystonia_grouped))\n\nchunks[4]", "By default, groupby groups by row, but we can specify the axis argument to change this. For example, we can group our columns by dtype this way:", "grouped_by_type = cdystonia.groupby(cdystonia.dtypes, axis=1)\n{g:grouped_by_type.get_group(g) for g in grouped_by_type.groups}", "Its also possible to group by one or more levels of a hierarchical index. Recall cdystonia2, which we created with a hierarchical index:", "cdystonia2.head(10)\n\ncdystonia2.groupby(level='obs', axis=0)['twstrs'].mean()", "Apply\nWe can generalize the split-apply-combine methodology by using apply function. This allows us to invoke any function we wish on a grouped dataset and recombine them into a DataFrame.\nThe function below takes a DataFrame and a column name, sorts by the column, and takes the n largest values of that column. We can use this with apply to return the largest values from every group in a DataFrame in a single call.", "def top(df, column, n=5):\n return df.sort_values(by=column, ascending=False)[:n]", "To see this in action, consider the vessel transit segments dataset (which we merged with the vessel information to yield segments_merged). Say we wanted to return the 3 longest segments travelled by each ship:", "top3segments = segments_merged.groupby('mmsi').apply(top, column='seg_length', n=3)[['names', 'seg_length']]\ntop3segments.head(15)", "Notice that additional arguments for the applied function can be passed via apply after the function name. It assumes that the DataFrame is the first argument.\nRecall the microbiome data sets that we used previously for the concatenation example. Suppose that we wish to aggregate the data at a higher biological classification than genus. For example, we can identify samples down to class, which is the 3rd level of organization in each index.", "mb1.index[:3]", "Using the string methods split and join we can create an index that just uses the first three classifications: domain, phylum and class.", "class_index = mb1.index.map(lambda x: ' '.join(x.split(' ')[:3]))\n\nmb_class = mb1.copy()\nmb_class.index = class_index", "However, since there are multiple taxonomic units with the same class, our index is no longer unique:", "mb_class.head()", "We can re-establish a unique index by summing all rows with the same class, using groupby:", "mb_class.groupby(level=0).sum().head(10)", "Exercise 2\nLoad the dataset in titanic.xls. It contains data on all the passengers that travelled on the Titanic.", "from IPython.core.display import HTML\nHTML(filename='Data/titanic.html')", "Women and children first?\n\nDescribe each attribute, both with basic statistics and plots. State clearly your assumptions and discuss your findings.\nUse the groupby method to calculate the proportion of passengers that survived by sex.\nCalculate the same proportion, but by class and sex.\nCreate age categories: children (under 14 years), adolescents (14-20), adult (21-64), and senior(65+), and calculate survival proportions by age category, class and sex.", "titanic_df = pd.read_excel('Data/titanic.xls', 'titanic', index_col=None, header=0)\n\ntitanic_df\n\ntitanic_nameduplicate = titanic_df.duplicated(subset='name')\n\n#titanic_nameduplicate\n\ntitanic_df.drop_duplicates(['name'])\n\ngender_map = {'male':0, 'female':1}\n\ntitanic_df['sex'] = titanic_df.sex.map(gender_map)\n\ntitanic_df\n\ntitanic_grouped = titanic_df.groupby(titanic_df.sex)\n\ntitanic_grouped\n\nfor sex, survived in titanic_grouped:\n print('sex', sex)\n print('survived', survived)\n\ntitanic_grouped.agg(survived.mean).head()", "References\nPython for Data Analysis Wes McKinney" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
diegocavalca/Studies
phd-thesis/benchmarkings/am207-NILM-project-master/CO.ipynb
cc0-1.0
[ "Karen Yu, Nick Vasios, Thibaut Perol\nAM207 Final Project\nEnergy Disaggregation from Non-Intrusive Load Monitoring\nDISAGGREGATION USING COMBINATORIAL OPTIMIZATION\nImporting Necessary Packages", "from __future__ import print_function, division\n\nimport numpy as np\nimport pandas as pd\nfrom os.path import join\nimport pickle\nimport copy\n\nfrom pylab import rcParams\nimport matplotlib.pyplot as plt\n%matplotlib inline\nrcParams['figure.figsize'] = (13, 6)\nimport nilmtk\nfrom nilmtk import DataSet, TimeFrame, MeterGroup, HDFDataStore\nfrom nilmtk.utils import print_dict,find_nearest\nfrom nilmtk.feature_detectors import cluster\nfrom nilmtk.disaggregate import Disaggregator\nfrom nilmtk.electric import get_vampire_power\nfrom nilmtk.metrics import f1_score\n\nimport warnings\nfrom warnings import warn\nwarnings.filterwarnings(\"ignore\")\nimport seaborn as sns\n# sns.set_style(\"white\")\n\n# Fix the seed for repeatability of experiments\nSEED = 42\nnp.random.seed(SEED)", "The Heart of the Notebook: The Combinatorial Optimization class", "class CombinatorialOptimisation(Disaggregator):\n \"\"\"\n \n A Combinatorial Optimization Algorithm based on the implementation by NILMTK\n \n This class is build upon the main Dissagregator class already implemented by NILMTK\n All the methods from Dissagregator are passed in here as well since we import the class\n as shown above. We should note howeger that Dissagregator is nothing more than a general interface\n class upon which all dissagregator algortihms are build. All the methods are initialized in the \n Dissagregator class but the specific implementation is based upon the method to be implemented. \n In other words, even though we pass in Dissagregator, all methods will be redefined again to work with \n the Combinatorial Optimization algorithm as you can see below. \n\n \n Attributes\n ----------\n model : list of dicts\n Each dict has these keys:\n states : list of ints (the power (Watts) used in different states)\n training_metadata : ElecMeter or MeterGroup object used for training\n this set of states. We need this information because we\n need the appliance type (and perhaps some other metadata)\n for each model.\n\n state_combinations : 2D array\n Each column is an appliance.\n Each row is a possible combination of power demand values e.g.\n [[0, 0, 0, 0],\n [0, 0, 0, 100],\n [0, 0, 50, 0],\n [0, 0, 50, 100], ...]\n\n MIN_CHUNK_LENGTH : int\n \"\"\"\n\n def __init__(self):\n self.model = []\n self.state_combinations = None\n self.MIN_CHUNK_LENGTH = 100\n self.MODEL_NAME = 'Combinatorial Optimization'\n\n def train(self, metergroup, num_states_dict=None, **load_kwargs):\n \"\"\"\n Train using 1D CO. Places the learnt model in the `model` attribute.\n\n Parameters\n ----------\n metergroup : a nilmtk.MeterGroup object\n num_states_dict : dict\n **load_kwargs : keyword arguments passed to `meter.power_series()`\n\n Notes\n -----\n * only uses first chunk for each meter (TODO: handle all chunks).\n \"\"\"\n \n # Initializing dictionary to save the number of states\n if num_states_dict is None:\n num_states_dict = {}\n\n # The CO class is only able to train in new models. We can only train once. If model exists, raise an error\n if self.model:\n raise RuntimeError(\n \"This implementation of Combinatorial Optimisation\"\n \" does not support multiple calls to `train`.\")\n\n # How many meters do we have in the training set?\n num_meters = len(metergroup.meters)\n \n # If more than 20 then reduce the number of clusters to reduce the computational cost. \n if num_meters > 20:\n max_num_clusters = 2\n else:\n max_num_clusters = 3\n \n print('Now training...')\n print('Loop in all meters begins...')\n \n # We now loop in all meters passed in in the training data set\n # Every time, we load the data in the meter and we call the method\n # --> train_on_chunk. For more info about this method please see below\n for i, meter in enumerate(metergroup.submeters().meters):\n \n #print('We now train for submeter {}'.format(meter))\n \n # Load the time series for the power consumption for this meter\n power_series = meter.power_series(**load_kwargs)\n \n # Note that we do not effectively load until we use the next() method\n # We load and save into chunk. Chunk will be used in training\n chunk = power_series.next()\n \n # Get the number of total states from the dictionary\n num_total_states = num_states_dict.get(meter)\n if num_total_states is not None:\n num_on_states = num_total_states - 1\n else:\n num_on_states = None\n \n #print('i={},num_total_states={},num_on_states={}'.format(i,meter,num_total_states,num_on_states)) \n \n # The actual training happens now. We call train_on_chunk using the time series we loaded on chunk for this meter\n self.train_on_chunk(chunk, meter, max_num_clusters, num_on_states)\n\n # Check to see if there are any more chunks.\n try:\n power_series.next()\n except StopIteration:\n pass\n else:\n warn(\"The current implementation of CombinatorialOptimisation\"\n \" can only handle a single chunk. But there are multiple\"\n \" chunks available. So have only trained on the\"\n \" first chunk!\")\n\n print(\"Done training!\")\n\n def train_on_chunk(self, chunk, meter, max_num_clusters, num_on_states):\n \"\"\"\n \n Train on chunk trains the Combinatorial Optimization Model based on the time series for the power consumption\n passed in chunk. This method is based on the sklearn machine learning library and in particular the KMEANS \n algorithm. It calls the cluster function which is imported in the beginning of this notebook. Cluster, prepares \n the data in chunk so that its size is always compatible and the same and then calls the KMEANS algorithm to \n perform the clustering. Function cluster returns only the centers of the clustered data which correspond to the\n individual states for the given appliance/meter \n \"\"\"\n \n # Check if we've already trained on this meter. We only allow training once on each meter\n meters_in_model = [d['training_metadata'] for d in self.model]\n if meter in meters_in_model:\n raise RuntimeError(\n \"Meter {} is already in model!\"\n \" Can't train twice on the same meter!\"\n .format(meter))\n\n # Do the KMEANS clustering and return the centers\n states = cluster(chunk, max_num_clusters, num_on_states)\n print('\\t Now Clustering in Train on Chunk')\n #print('\\t {}'.format(states))\n \n # Append the clustered data to the model\n self.model.append({\n 'states': states,\n 'training_metadata': meter})\n\n def _set_state_combinations_if_necessary(self):\n \"\"\"Get centroids\"\"\"\n # If we import sklearn at the top of the file then auto doc fails.\n if (self.state_combinations is None or\n self.state_combinations.shape[1] != len(self.model)):\n \n from sklearn.utils.extmath import cartesian\n \n # Saving the centroids in centroids (appliance states)\n centroids = [model['states'] for model in self.model]\n # Function cartesian returns all possible combinations \n # than can be performed using centroids\n self.state_combinations = cartesian(centroids)\n print()\n #print('Now printing the state combinations...')\n #print(cartesian(centroids))\n\n def disaggregate(self, mains, output_datastore,\n vampire_power=None, **load_kwargs):\n '''Disaggregate mains according to the model learnt previously.\n\n Parameters\n ----------\n mains : nilmtk.ElecMeter or nilmtk.MeterGroup\n output_datastore : instance of nilmtk.DataStore subclass\n For storing power predictions from disaggregation algorithm.\n vampire_power : None or number (watts)\n If None then will automatically determine vampire power\n from data. If you do not want to use vampire power then\n set vampire_power = 0.\n sample_period : number, optional\n The desired sample period in seconds. Set to 60 by default.\n sections : TimeFrameGroup, optional\n Set to mains.good_sections() by default.\n **load_kwargs : key word arguments\n Passed to `mains.power_series(**kwargs)`\n '''\n \n # Performing default pre disaggregation checks. Checking meters etc..\n load_kwargs = self._pre_disaggregation_checks(load_kwargs)\n\n # Disaggregation defauls. Sample perios and sections\n load_kwargs.setdefault('sample_period', 60)\n load_kwargs.setdefault('sections', mains.good_sections())\n\n # Initializing time frames and fetching the meter for the aggregated data\n timeframes = []\n building_path = '/building{}'.format(mains.building())\n mains_data_location = building_path + '/elec/meter1'\n data_is_available = False\n \n # We now load the aggregated data for power consumption of the whole house in small chunks\n # Every iteration of the following loop we perform the CO step to disaggregate\n \n counter = 0\n print('Disaggregation now begins...')\n for chunk in mains.power_series(**load_kwargs):\n counter += 1\n # Check that chunk is sensible size\n if len(chunk) < self.MIN_CHUNK_LENGTH:\n continue\n \n print('\\t Now processing chunk {}...'.format(counter))\n\n # Record metadata\n timeframes.append(chunk.timeframe)\n measurement = chunk.name\n\n # This is where the disaggregation happens\n # Vampire Power is just the minimum of the power series in this chunk\n appliance_powers = self.disaggregate_chunk(chunk, vampire_power)\n\n # Here we save the disaggregated data for this chunk in Pandas dataframe and update the \n # HDF5 file we created.\n for i, model in enumerate(self.model):\n # Fetch the disag data for this appliance\n appliance_power = appliance_powers[i]\n if len(appliance_power) == 0:\n continue\n data_is_available = True\n \n # Just for saving.. Nothing major happening here\n cols = pd.MultiIndex.from_tuples([chunk.name])\n meter_instance = model['training_metadata'].instance()\n df = pd.DataFrame(\n appliance_power.values, index=appliance_power.index,\n columns=cols)\n key = '{}/elec/meter{}'.format(building_path, meter_instance)\n output_datastore.append(key, df)\n\n # Copy mains data to disag output\n mains_df = pd.DataFrame(chunk, columns=cols)\n output_datastore.append(key=mains_data_location, value=mains_df)\n\n if data_is_available:\n self._save_metadata_for_disaggregation(\n output_datastore=output_datastore,\n sample_period=load_kwargs['sample_period'],\n measurement=measurement,\n timeframes=timeframes,\n building=mains.building(),\n meters=[d['training_metadata'] for d in self.model]\n )\n \n print('Disaggregation Completed Successfully...!!!')\n\n def disaggregate_chunk(self, mains, vampire_power=None):\n \"\"\"In-memory disaggregation.\n\n Parameters\n ----------\n mains : pd.Series\n vampire_power : None or number (watts)\n If None then will automatically determine vampire power\n from data. If you do not want to use vampire power then\n set vampire_power = 0.\n\n Returns\n -------\n appliance_powers : pd.DataFrame where each column represents a\n disaggregated appliance. Column names are the integer index\n into `self.model` for the appliance in question.\n \"\"\"\n if not self.model:\n raise RuntimeError(\n \"The model needs to be instantiated before\"\n \" calling `disaggregate`. The model\"\n \" can be instantiated by running `train`.\")\n\n if len(mains) < self.MIN_CHUNK_LENGTH:\n raise RuntimeError(\"Chunk is too short.\")\n\n # sklearn produces lots of DepreciationWarnings with PyTables\n import warnings\n warnings.filterwarnings(\"ignore\", category=DeprecationWarning)\n\n # Because CombinatorialOptimisation could have been trained using\n # either train() or train_on_chunk(), we must\n # set state_combinations here.\n self._set_state_combinations_if_necessary()\n\n # Add vampire power to the model (Min of power series of the aggregated data)\n if vampire_power is None:\n vampire_power = get_vampire_power(mains)\n if vampire_power > 0:\n print()\n #print(\"Including vampire_power = {} watts to model...\".format(vampire_power))\n \n # How many combinations\n n_rows = self.state_combinations.shape[0]\n vampire_power_array = np.zeros((n_rows, 1)) + vampire_power\n state_combinations = np.hstack(\n (self.state_combinations, vampire_power_array))\n else:\n state_combinations = self.state_combinations\n\n summed_power_of_each_combination = np.sum(state_combinations, axis=1)\n # summed_power_of_each_combination is now an array where each\n # value is the total power demand for each combination of states.\n\n # Start disaggregation\n \n # The following line finds the best combination from all the possible combinations\n # Returns the index to find the best combination as well as the residual\n # Uses the Find_Nearest algorithm\n indices_of_state_combinations, residual_power = find_nearest(\n summed_power_of_each_combination, mains.values)\n\n # Now update the state for each appliance with the optimal one and return the list\n # as Dataframe\n appliance_powers_dict = {}\n for i, model in enumerate(self.model):\n #print()\n #print(\"Estimating power demand for '{}'\".format(model['training_metadata']))\n predicted_power = state_combinations[\n indices_of_state_combinations, i].flatten()\n column = pd.Series(predicted_power, index=mains.index, name=i)\n appliance_powers_dict[i] = column\n\n appliance_powers = pd.DataFrame(appliance_powers_dict)\n return appliance_powers\n\n # The current implementation of the CO does not make use of the following 2 functions.\n #\n #\n # -------------------------------------------------------------------------------------\n def import_model(self, filename):\n imported_model = pickle.load(open(filename, 'r'))\n self.model = imported_model.model\n # recreate datastores from filenames\n for pair in self.model:\n pair['training_metadata'].store = HDFDataStore(\n pair['training_metadata'].store)\n self.state_combinations = imported_model.state_combinations\n self.MIN_CHUNK_LENGTH = imported_model.MIN_CHUNK_LENGTH\n\n def export_model(self, filename):\n # Can't pickle datastore, so convert to filenames\n exported_model = copy.deepcopy(self)\n for pair in exported_model.model:\n pair['training_metadata'].store = (\n pair['training_metadata'].store.store.filename)\n pickle.dump(exported_model, open(filename, 'wb'))", "Importing and Loading the REDD dataset", "data_dir = '\\Users\\Nick\\Google Drive\\PhD\\Courses\\Semester 2\\AM207\\Project'\nwe = DataSet(join(data_dir, 'REDD.h5'))\nprint('loaded ' + str(len(we.buildings)) + ' buildings')", "We want to train the Combinatorial Optimization Algorithm using the data for 5 buildings and then test it against the last building. To simplify our analysis and also to enable comparison with other methods (Neural Nets, FHMM, MLE etc) we will only try to dissagregate data associated with the fridge and the microwave. However, the REDD dataset that we are using here does not contain data measurements for the fridge and microwave for all buildings. In particular, building 4 does not have measurements for the fridge. As a result, we will exclude building 4 from the dataset and we will only import the meters associated with the fridge from other buildings. \nThe train data set will consist of meters associated with the fridge and microwave from buildings 1,2,3 and 6. We will then test the combinatorial optimization algorithm against the aggregated data for building 5. \nWe first plot the time window span for all buildings", "for i in xrange(1,7):\n print('Timeframe for building {} is {}'.format(i,we.buildings[i].elec.get_timeframe())) ", "Unfortunately, due to a bug in one of the main classes of the NILMTK package the implementation of the Combinatorial Optimization do not save the meters for the disaggregated data correctly unless the building on which we test on also exists in the trainihg set. More on this issue can be found here https://github.com/nilmtk/nilmtk/issues/194\nHowever, for us it makes no sense to use the same building for training and testing since we would like to compare this algorithm with the results from FHMM and Neural Networks. In order to circumvent this bug we do the following:\nThe main issue is that the meter for the building we would like to disaggregate must be on the training set in order to be able to disaggregate correctly. That being said, we still want to train as less as possible on the meter we want to test on since we would like to see how the algorithm performs when a completely unknown dataset is available. In order to do that we create a metergroup comprising of the following:\n1) The meters for the Frigde and Microwave for all buildings but building 5, since building 5 is the building we would like to test on. Later we will see that building 4 needs to be excluded as well because there is no meter associated with the fridge for this building. \n2) The meters for the Frigde and Microwave for building 5 which is the building we would like to test on, but we limit the time window to be a very very small one. Doing that, we make sure that the meters are there and understood by the Combinatorial Optimization Class but at the same time, by limiting the time window to just a few housrd for this building do not provide enough data to overtrain. In other words, we only do this in order to be able to disaggregate correctly. \nAfter we train we will test the algorithm against the data from building 5 that werent fed into the training meters. After we disaggregate we will compare with the ground truth for the same exact window. \nModifying Datasets to work with CO", "# Data file directory\ndata_dir = '\\Users\\Nick\\Google Drive\\PhD\\Courses\\Semester 2\\AM207\\Project'\n\n# Make the Data set\nData = DataSet(join(data_dir, 'REDD.h5'))\n\n# Make copies of the Data Set so that local changes would not affect the global dataset\nData_for_5 = DataSet(join(data_dir, 'REDD.h5'))\nData_for_rest = DataSet(join(data_dir, 'REDD.h5'))\n\n# How many buildings in the data set?\nprint(' Found {} buildings in the Data Ser.. Buildings Loaded successfully.'.format(len(Data.buildings)))\n\n# This is the point that we will break the data from building 5 so that we only include a small \n# portion in the training set. In fact, the line below makes sure than only a day of data is seen during training.\nbreak_point = '2011-04-19 02:00'\n\n# Changing the window for building 5\nData_for_5.set_window(end=break_point)\n\n# Making a metergroup..\ne = [Data_for_5.buildings[5].elec[a] for a in ['fridge','microwave']]\nme = MeterGroup(e)\n\n# The data that we pass in for training for building 5 look like this...\nme.plot()", "Creating MeterGroups with the desired appliances from the desired buildings\nBelow we define a function tha is able to create a metergroup that only includes meters for the appliances that we are interested in and is also able to exclude buildings that we don't want in the meter. Also, if an appliance is requested but a meter is not found then the meter is skipped but the metergoup is created nontheless.", "def get_all_trainings(appliance, dataset, buildings_to_exclude):\n\n # Filtering by appliances: \n elecs = []\n for app in appliance:\n app_l = [app]\n print ('Now loading data for ' + app + ' for all buildings in the data to create the metergroup')\n print()\n for building in dataset.buildings: \n if building not in buildings_to_exclude:\n print ('Processing Building ' + str(building) + '...')\n print()\n try:\n elec = dataset.buildings[building].elec[app] \n \n elecs.append(elec)\n\n except KeyError:\n print ('Appliance '+str(app)+' does not exist in this building')\n print ('Building skipped...')\n print ()\n \n\n metergroup = MeterGroup(elecs)\n\n return metergroup", "Now we set the appliances that we want as well as the buildings to exclude and we create the metergroup", "applianceName = ['fridge','microwave']\nbuildings_to_exclude = [4,5]\n\nmetergroup = get_all_trainings(applianceName,Data_for_rest,buildings_to_exclude)\n\nprint('Now printing the Meter Group...')\nprint()\nprint(metergroup)", "As we can see the Metergroup was successfully created and contains all the appliances we requested (Fridge and Microwave) in all buildings that the appliances exist apart from the ones we excluded\nCorrecting the MeterGroup (Necessary for the CO to work)\nNow we need to perform the trick we mentioned previously. We need to also include the meter from building 5 with the Fridge and Microwave which is the building we are going to test on but we need to make sure that only a very small portion of the data is seen for this building. We already took care of that by changing the window for the data in building 5 so now we only have to include the meters for the Fridge and Microwave for building 5 from the reduced time dataset", "def correct_meter(Data,building,appliance,oldmeter):\n \n # Unpack meters from the MeterGroup\n meters = oldmeter.all_meters()\n \n # Get the rest of the meters and append\n for a in appliance:\n meter_to_add = Data.buildings[building].elec[a]\n meters.append(meter_to_add)\n \n # Group again in a single metergroup and return\n return MeterGroup(meters) \n\ncorr_metergroup = correct_meter(Data_for_5,5,applianceName,metergroup)\n\nprint('The Modified Meter is now..')\nprint()\nprint(corr_metergroup)", "As we can see the metergroup was updated successfully\nTraining\nWe now need to train in the Metergroup we just created. First, let us load the class for the CO", "# Train\nco = CombinatorialOptimisation()", "Now Let's train", "co.train(corr_metergroup)", "Preparing the Testing Data\nNow that the training is done, the only thing that we have to do is to prepare the Data for Building 5 that we want to test on and call the Disaggregation. The data set is now the remaining part of building 5 that is not seen. After that, we only keep the Main meter which contains ifrormation about the aggregated data consumption and we disaggregate.", "Test_Data = DataSet(join(data_dir, 'REDD.h5'))\nTest_Data.set_window(start=break_point)\n\n# The building number on which we test\nbuilding_for_testing = 5\n\ntest = Test_Data.buildings[building_for_testing].elec\n\nmains = test.mains()", "Disaggregating the test data\nThe disaggregation Begins Now", "# Disaggregate\ndisag_filename = join(data_dir, 'COMBINATORIAL_OPTIMIZATION.h5')\n\nmains = test.mains()\n\ntry:\n output = HDFDataStore(disag_filename, 'w')\n co.disaggregate(mains, output)\nexcept ValueError:\n output.close()\n output = HDFDataStore(disag_filename, 'w')\n co.disaggregate(mains, output)\n\nfor meter in range(1, 2):\n df1 = output.store.get('/building5/elec/meter{}'.format(meter))\n df2 = we.store.store.get('/building5/elec/meter{}'.format(meter))\n\n \noutput.close()", "OK.. Now we are all done. All that remains is to interpret the results and plot the scores..\nPost Processing & Results", "# Opening the Dataset with the Disaggregated data\ndisag = DataSet(disag_filename)\n\n# Getting electric appliances and meters\ndisag_elec = disag.buildings[building_for_testing].elec\n\n# We also get the electric appliances and meters for the ground truth data to compare\nelec = Test_Data.buildings[building_for_testing].elec\n\ne = [test[a] for a in applianceName]\nme = MeterGroup(e)\nprint(me)", "Resampling to align meters\nBefore we are able to calculate and plot the metrics we need to align the ground truth meter with the disaggregated meters. Why so? If you notice in the dissagregation method of the CO class above, you may see that by default the time sampling is changed from 3s which is the raw data to 60s. This has to happen in order to make the disaggregation more efficient computationally but also because it is impossible to disaggregate using the actual time step. So in order to compare now we have to resample the meter for the ground truth and align it", "def align_two_meters(master, slave, func='when_on'):\n \"\"\"Returns a generator of 2-column pd.DataFrames. The first column is from\n `master`, the second from `slave`.\n\n Takes the sample rate and good_periods of `master` and applies to `slave`.\n\n Parameters\n ----------\n master, slave : ElecMeter or MeterGroup instances\n \"\"\"\n sample_period = master.sample_period()\n period_alias = '{:d}S'.format(sample_period)\n sections = master.good_sections()\n master_generator = getattr(master, func)(sections=sections)\n for master_chunk in master_generator:\n if len(master_chunk) < 2:\n return\n chunk_timeframe = TimeFrame(master_chunk.index[0],\n master_chunk.index[-1])\n slave_generator = getattr(slave, func)(sections=[chunk_timeframe])\n slave_chunk = next(slave_generator)\n\n # TODO: do this resampling in the pipeline?\n slave_chunk = slave_chunk.resample(period_alias)\n if slave_chunk.empty:\n continue\n master_chunk = master_chunk.resample(period_alias)\n\n return master_chunk,slave_chunk\n", "Here we just plot the disaggregated data alongside the ground truth for the Fridge", "disag_elec.select(instance=18).plot()\nme.select(instance=18).plot()", "Aligning meters, Converting to Numpy and Computing Metrics\nIn this part of the Notebook, we call the function we previously defined to align the meters and then we convert the meters to pandas and ultimately to numpy arrays. We check if any NaN's exist (which is something possible after resmplilng.. Resampling errors may occur) and replace them with 0's if they do. We also compute the following metrics for each appliance:\n1) True Positive, False Positive, False Negative, True Negative\n2) Precision and Recall\n3) Accuracy and F1-Score\nFor more information about these metrics please refer to the report.", "appliances_scores = {}\n\nfor m in me.meters:\n print('Processing {}...'.format(m.label()))\n ground_truth = m\n inst = m.instance()\n prediction = disag_elec.select(instance=inst)\n \n a = prediction.meters[0]\n \n b = a.power_series_all_data()\n \n pr_a,gt_a = align_two_meters(prediction.meters[0],ground_truth)\n \n gt = gt_a.as_matrix()\n pr = pr_a.as_matrix()\n \n if np.all(np.isnan(pr)==False):\n print('\\t Predictions array seems to be fine...')\n print('\\t No Nans detected')\n print()\n else:\n print('\\t Serious error in Predictions...')\n print('\\t The resampled array contains Nans')\n print()\n \n gt_states_on = gt > 0.1\n pr_states_on = pr > 0.1\n \n TP = np.sum(np.logical_and(gt_states_on==True,pr_states_on[1:]==True))\n FP = np.sum(np.logical_and(gt_states_on==True,pr_states_on[1:]==False))\n FN = np.sum(np.logical_and(gt_states_on==False,pr_states_on[1:]==True))\n TN = np.sum(np.logical_and(gt_states_on==False,pr_states_on[1:]==False))\n P = np.sum(gt_states_on==True)\n N = np.sum(gt_states_on==False)\n \n recall = TP/float(TP+FN)\n precision = TP/float(TP+FP)\n f1 = 2*precision*recall/(precision+recall)\n accuracy = (TP+TN)/float(P+N)\n \n result = {'F1-Score':f1,\n 'Precision':precision,\n 'Recall':recall,\n 'Accuracy':accuracy}\n appliances_scores[m.label()] = result\n\nprint(appliances_scores)\n\nNames = ['Fridge','Microwave']", "Results\nNow we just plot the scores for both the Fridge and the Microwave in order to be able to visualize what is going on. We do not comment on the results in this notebook since we do this in the report. There is a separate notebook where all these results are combined along with the corresponding results from the Neural Network and the FHMM method and the total results are reported side by side to ease comparison. We plot them here as well for housekeeping although it is redundant.\nF1-Score", "x = np.arange(2)\ny = np.array([appliances_scores[i]['F1-Score'] for i in Names])\ny[np.isnan(y)] = 0.001\n\nf = plt.figure(figsize=(18,8))\nplt.rc('font', size=20, **{'family': 'serif', 'serif': ['Computer Modern']})\nplt.rc('text', usetex=True)\nax = f.add_axes([0.2,0.2,0.8,0.8])\nax.bar(x,y,align='center')\nax.set_xticks(x)\nax.set_yticks(y)\nax.set_yticklabels(y,fontsize=20)\nax.set_xticklabels(Names,fontsize=20)\nax.set_xlim([min(x)-0.5,max(x)+0.5])\nplt.xlabel('Appliances',fontsize=20)\nplt.ylabel('F1-Score',fontsize=20)\nplt.title('Combinatorial Optimization',fontsize=22)\nplt.show()", "Precision", "x = np.arange(2)\ny = np.array([appliances_scores[i]['Precision'] for i in Names])\ny[np.isnan(y)] = 0.001\n\nf = plt.figure(figsize=(18,8))\nplt.rc('font', size=20, **{'family': 'serif', 'serif': ['Computer Modern']})\nplt.rc('text', usetex=True)\nax = f.add_axes([0.2,0.2,0.8,0.8])\nax.bar(x,y,align='center')\nax.set_xticks(x)\nax.set_yticks(y)\nax.set_yticklabels(y,fontsize=20)\nax.set_xticklabels(Names,fontsize=20)\nax.set_xlim([min(x)-0.5,max(x)+0.5])\nplt.xlabel('Appliances',fontsize=20)\nplt.ylabel('Precision',fontsize=20)\nplt.title('Combinatorial Optimization',fontsize=22)\nplt.show()", "Recall", "x = np.arange(2)\ny = np.array([appliances_scores[i]['Recall'] for i in Names])\ny[np.isnan(y)] = 0.001\n\nf = plt.figure(figsize=(18,8))\nplt.rc('font', size=20, **{'family': 'serif', 'serif': ['Computer Modern']})\nplt.rc('text', usetex=True)\nax = f.add_axes([0.2,0.2,0.8,0.8])\nax.bar(x,y,align='center')\nax.set_xticks(x)\nax.set_yticks(y)\nax.set_yticklabels(y,fontsize=20)\nax.set_xticklabels(['Fridge','Sockets','Lights'],fontsize=20)\nax.set_xlim([min(x)-0.5,max(x)+0.5])\nplt.xlabel('Appliances',fontsize=20)\nplt.ylabel('Recall',fontsize=20)\nplt.title('Combinatorial Optimization',fontsize=22)\nplt.show()", "Accuracy", "x = np.arange(2)\ny = np.array([appliances_scores[i]['Accuracy'] for i in Names])\ny[np.isnan(y)] = 0.001\n\nf = plt.figure(figsize=(18,8))\nplt.rc('font', size=20, **{'family': 'serif', 'serif': ['Computer Modern']})\nplt.rc('text', usetex=True)\nax = f.add_axes([0.2,0.2,0.8,0.8])\nax.bar(x,y,align='center')\nax.set_xticks(x)\nax.set_yticks(y)\nax.set_yticklabels(y,fontsize=20)\nax.set_xticklabels(['Fridge','Sockets','Lights'],fontsize=20)\nax.set_xlim([min(x)-0.5,max(x)+0.5])\nplt.xlabel('Appliances',fontsize=20)\nplt.ylabel('Accuracy',fontsize=20)\nplt.title('Combinatorial Optimization',fontsize=22)\nplt.show()" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
amandersillinois/landlab
notebooks/tutorials/flexure/flexure_1d.ipynb
mit
[ "<a href=\"http://landlab.github.io\"><img style=\"float: left\" src=\"../../landlab_header.png\"></a>\nUsing the Landlab 1D flexure component\n<hr>\n<small>For more Landlab tutorials, click here: <a href=\"https://landlab.readthedocs.io/en/latest/user_guide/tutorials.html\">https://landlab.readthedocs.io/en/latest/user_guide/tutorials.html</a></small>\n<hr>\n\nIn this example we will:\n* create a Landlab component that solves the (1D) flexure equation\n* apply a point load\n* run the component\n* plot some output\n* apply a distributed load\n(Note that this tutorial uses the one-dimensional flexure component, Flexure1D. A separate tutorial notebook, \"lots_of_loads\", explores the two-dimensional elastic flexure component Flexure.)\nA bit of magic so that we can plot within this notebook.", "# %matplotlib inline\nimport numpy as np\nimport matplotlib.pyplot as plt", "Create the grid\nWe are going to build a uniform rectilinear grid with a node spacing of 100 km in the y-direction and 10 km in the x-direction on which we will solve the flexure equation.\nFirst we need to import RasterModelGrid.", "from landlab import RasterModelGrid", "Create a rectilinear grid with a spacing of 100 km between rows and 10 km between columns. The numbers of rows and columms are provided as a tuple of (n_rows, n_cols), in the same manner as similar numpy functions. The spacing is also a tuple, (dy, dx).", "grid = RasterModelGrid((3, 800), xy_spacing=(100e3, 10e3))\n\ngrid.dy, grid.dx", "Create the component\nNow we create the flexure component and tell it to use our newly created grid. First, though, we'll examine the Flexure component a bit.", "from landlab.components import Flexure1D", "The Flexure1D component, as with most landlab components, will require our grid to have some data that it will use. We can get the names of these data fields with the input_var_names attribute of the component class.", "Flexure1D.input_var_names", "We see that flexure uses just one data field: the change in lithospheric loading. Landlab component classes can provide additional information about each of these fields. For instance, to see the units for a field, use the var_units method.", "Flexure1D.var_units('lithosphere__increment_of_overlying_pressure')", "To print a more detailed description of a field, use var_help.", "Flexure1D.var_help('lithosphere__increment_of_overlying_pressure')", "What about the data that Flexure1D provides? Use the output_var_names attribute.", "Flexure1D.output_var_names\n\nFlexure1D.var_help('lithosphere_surface__increment_of_elevation')", "Now that we understand the component a little more, create it using our grid.", "grid.add_zeros(\"lithosphere__increment_of_overlying_pressure\", at=\"node\")\n\nflex = Flexure1D(grid, method='flexure')", "Add a point load\nFirst we'll add just a single point load to the grid. We need to call the update method of the component to calculate the resulting deflection (if we don't run update the deflections would still be all zeros).\nUse the load_at_node attribute of Flexure1D to set the loads. Notice that load_at_node has the same shape as the grid. Likewise, x_at_node and dz_at_node also reshaped.", "flex.load_at_node[1, 200] = 1e6\nflex.update()\nplt.plot(flex.x_at_node[1, :400] / 1000., flex.dz_at_node[1, :400])", "Before we make any changes, reset the deflections to zero.", "flex.dz_at_node[:] = 0.", "Now we will double the effective elastic thickness but keep the same point load. Notice that, as expected, the deflections are more spread out.", "flex.eet *= 2.\nflex.update()\nplt.plot(flex.x_at_node[1, :400] / 1000., flex.dz_at_node[1, :400])", "Add some loading\nWe will now add a distributed load. As we saw above, for this component, the name of the attribute that holds the applied loads is load_at_node. For this example we create a loading that increases linearly of the center portion of the grid until some maximum. This could by thought of as the water load following a sea-level rise over a (linear) continental shelf.", "flex.load_at_node[1, :100] = 0.\nflex.load_at_node[1, 100:300] = np.arange(200) * 1e6 / 200.\nflex.load_at_node[1, 300:] = 1e6\n\nplt.plot(flex.load_at_node[1, :400])", "Update the component to solve for deflection\nClear the current deflections, and run update to get the new deflections.", "flex.dz_at_node[:] = 0.\nflex.update()\n\nplt.plot(flex.x_at_node[1, :400] / 1000., flex.dz_at_node[1, :400])", "Exercise: try maintaining the same loading distribution but double the effective elastic thickness.\nClick here for more <a href=\"https://landlab.readthedocs.io/en/latest/user_guide/tutorials.html\">Landlab tutorials</a>" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
nick-youngblut/SIPSim
ipynb/bac_genome/fullCyc/trimDataset/dataset_info.ipynb
mit
[ "General info on the fullCyc dataset (as it pertains to SIPSim validation)\n\nSimulating 12C gradients\nDetermining if simulated taxon abundance distributions resemble the true distributions\nSimulation parameters to infer from dataset:\nInfer total richness of bulk soil community \nrichness of starting community\n\n\nInfer abundance distribution of bulk soil community\nNO: distribution fit\nINSTEAD: using relative abundances of bulk soil community\n\n\nGet distribution of total OTU abundances per fraction\nNumber of sequences per sample\n\n\n\nUser variables", "%load_ext rpy2.ipython\n\n%%R\nworkDir = '/home/nick/notebook/SIPSim/dev/fullCyc/'\nphyseqDir = '/home/nick/notebook/SIPSim/dev/fullCyc_trim/'\n\nphyseqBulkCore = 'bulk-core_trm'\nphyseqSIP = 'SIP-core_unk_trm'\n\nampFragFile = '/home/nick/notebook/SIPSim/dev/bac_genome1147/validation/ampFrags_kde.pkl'", "Init", "import os\n\n%%R\nlibrary(ggplot2)\nlibrary(dplyr)\nlibrary(tidyr)\nlibrary(phyloseq)\nlibrary(fitdistrplus)\nlibrary(sads)\n\n%%R\ndir.create(workDir, showWarnings=FALSE)", "Loading phyloseq list datasets", "%%R\n# bulk core samples\nF = file.path(physeqDir, physeqBulkCore)\nphyseq.bulk = readRDS(F)\n#physeq.bulk.m = physeq.bulk %>% sample_data\nphyseq.bulk %>% names\n\n%%R\n# SIP core samples\nF = file.path(physeqDir, physeqSIP)\nphyseq.SIP = readRDS(F)\n#physeq.SIP.m = physeq.SIP %>% sample_data\nphyseq.SIP %>% names", "Infer abundance distribution of each bulk soil community\n\ndistribution fit", "%%R\n\nphyseq2otu.long = function(physeq){\n df.OTU = physeq %>%\n transform_sample_counts(function(x) x/sum(x)) %>%\n otu_table %>%\n as.matrix %>% \n as.data.frame \n\n df.OTU$OTU = rownames(df.OTU)\n df.OTU = df.OTU %>% \n gather('sample', 'abundance', 1:(ncol(df.OTU)-1))\n return(df.OTU)\n}\n\ndf.OTU.l = lapply(physeq.bulk, physeq2otu.long) \ndf.OTU.l %>% names\n \n#df.OTU = do.call(rbind, lapply(physeq.bulk, physeq2otu.long))\n#df.OTU$Day = gsub('.+\\\\.D([0-9]+)\\\\.R.+', '\\\\1', df.OTU$sample) \n#df.OTU %>% head(n=3)\n\n%%R -w 450 -h 400\n\nlapply(df.OTU.l, function(x) descdist(x$abundance, boot=1000))\n\n%%R\nfitdists = function(x){\n fit.l = list()\n #fit.l[['norm']] = fitdist(x$abundance, 'norm')\n fit.l[['exp']] = fitdist(x$abundance, 'exp')\n fit.l[['logn']] = fitdist(x$abundance, 'lnorm')\n fit.l[['gamma']] = fitdist(x$abundance, 'gamma')\n fit.l[['beta']] = fitdist(x$abundance, 'beta')\n \n # plotting\n plot.legend = c('exponential', 'lognormal', 'gamma', 'beta')\n par(mfrow = c(2,1))\n denscomp(fit.l, legendtext=plot.legend)\n qqcomp(fit.l, legendtext=plot.legend)\n \n # fit summary\n gofstat(fit.l, fitnames=plot.legend) %>% print\n \n return(fit.l)\n}\n\nfits.l = lapply(df.OTU.l, fitdists)\nfits.l %>% names\n\n%%R\n# getting summaries for lognormal fits\nget.summary = function(x, id='logn'){\n summary(x[[id]])\n}\n\nfits.s = lapply(fits.l, get.summary)\nfits.s %>% names\n\n%%R\n# listing estimates for fits\ndf.fits = do.call(rbind, lapply(fits.s, function(x) x$estimate)) %>% as.data.frame\ndf.fits$Sample = rownames(df.fits)\ndf.fits$Day = gsub('.+D([0-9]+)\\\\.R.+', '\\\\1', df.fits$Sample) %>% as.numeric\ndf.fits\n\n%%R -w 650 -h 300\nggplot(df.fits, aes(Day, meanlog,\n ymin=meanlog-sdlog,\n ymax=meanlog+sdlog)) +\n geom_pointrange() +\n geom_line() +\n theme_bw() +\n theme(\n text = element_text(size=16)\n )\n\n%%R\n# mean of estimaates\napply(df.fits, 2, mean)", "Relative abundance of most abundant taxa", "%%R -w 800\ndf.OTU = do.call(rbind, df.OTU.l) %>%\n mutate(abundance = abundance * 100) %>%\n group_by(sample) %>%\n mutate(rank = row_number(desc(abundance))) %>%\n ungroup() %>%\n filter(rank < 10)\n\nggplot(df.OTU, aes(rank, abundance, color=sample, group=sample)) +\n geom_point() +\n geom_line() +\n labs(y = '% rel abund')", "Making a community file for the simulations", "%%R -w 800 -h 300\ndf.OTU = do.call(rbind, df.OTU.l) %>%\n mutate(abundance = abundance * 100) %>%\n group_by(sample) %>%\n mutate(rank = row_number(desc(abundance))) %>%\n group_by(rank) %>%\n summarize(mean_abundance = mean(abundance)) %>%\n ungroup() %>%\n mutate(library = 1,\n mean_abundance = mean_abundance / sum(mean_abundance) * 100) %>%\n rename('rel_abund_perc' = mean_abundance) %>%\n dplyr::select(library, rel_abund_perc, rank) %>%\n as.data.frame\n\ndf.OTU %>% nrow %>% print\n\nggplot(df.OTU, aes(rank, rel_abund_perc)) +\n geom_point() +\n geom_line() +\n labs(y = 'mean % rel abund')", "Adding reference genome taxon names", "ret = !SIPSim KDE_info -t /home/nick/notebook/SIPSim/dev/bac_genome1147/validation/ampFrags_kde.pkl\nret = ret[1:]\nret[:5]\n\n%%R\n\nF = '/home/nick/notebook/SIPSim/dev/fullCyc_trim//ampFrags_kde_amplified.txt'\nret = read.delim(F, sep='\\t')\nret = ret$genomeID\nret %>% length %>% print\nret %>% head\n\n%%R\nret %>% length %>% print\ndf.OTU %>% nrow\n\n%%R -i ret\n\n# randomize\nret = ret %>% sample %>% sample %>% sample\n\n# adding to table\ndf.OTU$taxon_name = ret[1:nrow(df.OTU)]\ndf.OTU = df.OTU %>% \n dplyr::select(library, taxon_name, rel_abund_perc, rank)\ndf.OTU %>% head\n\n%%R\n#-- debug -- #\ndf.gc = read.delim('~/notebook/SIPSim/dev/bac_genome1147/validation/ampFrags_parsed_kde_info.txt', \n sep='\\t', row.names=)\ntop.taxa = df.gc %>% \n filter(KDE_ID == 1, median > 1.709, median < 1.711) %>% \n dplyr::select(taxon_ID) %>% \n mutate(taxon_ID = taxon_ID %>% sample) %>%\n head\n\ntop.taxa = top.taxa$taxon_ID %>% as.vector\ntop.taxa\n\n%%R\n#-- debug -- #\np1 = df.OTU %>%\n filter(taxon_name %in% top.taxa)\np2 = df.OTU %>%\n head(n=length(top.taxa))\np3 = anti_join(df.OTU, rbind(p1, p2), c('taxon_name' = 'taxon_name'))\n\ndf.OTU %>% nrow %>% print\np1 %>% nrow %>% print\np2 %>% nrow %>% print\np3 %>% nrow %>% print\n\np1 = p2$taxon_name\np2$taxon_name = top.taxa\n\ndf.OTU = rbind(p2, p1, p3)\ndf.OTU %>% nrow %>% print\ndf.OTU %>% head", "Writing file", "%%R \n\nF = file.path(workDir, 'fullCyc_12C-Con_trm_comm.txt')\nwrite.table(df.OTU, F, sep='\\t', quote=FALSE, row.names=FALSE)\ncat('File written:', F, '\\n')", "parsing amp-Frag file to match comm file", "!tail -n +2 /home/nick/notebook/SIPSim/dev/fullCyc/fullCyc_12C-Con_trm_comm.txt | \\\n cut -f 2 > /home/nick/notebook/SIPSim/dev/fullCyc/fullCyc_12C-Con_trm_comm_taxa.txt\n\noutFile = os.path.splitext(ampFragFile)[0] + '_parsed.pkl'\n!SIPSim KDE_parse \\\n $ampFragFile \\\n /home/nick/notebook/SIPSim/dev/fullCyc/fullCyc_12C-Con_trm_comm_taxa.txt \\\n > $outFile\n \nprint 'File written {}'.format(outFile)\n!SIPSim KDE_info -n $outFile" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
dianafprieto/SS_2017
02_NB_IntroductionNumpy.ipynb
mit
[ "<!-- <img src=\"files/images/python-screenshot.jpg\" width=\"600\"> -->\n<img src=\"imgs/header.png\">\nBasics of Numerical Python Arrays (numpy)\n1. In-place Arithmetics", "import numpy as np", "Case 1: a = a+b\nThe sum is first computed and resulting in a new array and the a is bound to the new array", "a = np.array(range(10000000))\nb = np.array(range(9999999,-1,-1))\n\n%%time\na = a + b ", "Case 2: a += b\nThe elements of b are directly added into the elements of a (in memory) - no intermediate array. These operators implement the so-called \"in-place arithmetics\" (e.g., +=, *=, /=, -= )", "a = np.array(range(10000000))\nb = np.array(range(9999999,-1,-1))\n\n%%time\na +=b ", "2. Vectorization", "#Apply function to a complete array instead of writing loop to iterate over all elements of the array. \n#This is called vectorization. The opposite of vectorization (for loops) is known as the scalar implementation\n\ndef f(x):\n return x*np.exp(4)\n\nprint(f(a))", "3. Slicing and reshape\nArray slicing\nx[i:j:s]\n\npicks out the elements starting with index i and stepping s indices at the time up to, but not including, j.", "x = np.array(range(100))\n\nx[1:-1] # picks out all elements except the first and the last, but contrary to lists, a[1:-1] is not a copy of the data in a.\nx[0:-1:2] # picks out every two elements up to, but not including, the last element, while \nx[::4] # picks out every four elements in the whole array.", "Array shape manipulation", "a = np.linspace(-1, 1, 6)\nprint (a)\n\na.shape\na.size\n\n# rows, columns\na.shape = (2, 3) \na = a.reshape(2, 3) # alternative\n\na.shape\nprint (a)\n\n# len(a) always returns the length of the first dimension of an array. -> no. of rows", "Exercise\n1. Create a 10x10 2d array with 1 on the border and 0 inside", "Z = np.ones((10,10))\nZ[1:-1,1:-1] = 0\nprint(Z)", "2. Create a structured array representing a position (x,y) and a color (r,g,b)", "Z = np.zeros(10, [ ('position', [ ('x', float, 1),\n ('y', float, 1)]),\n ('color', [ ('r', float, 1),\n ('g', float, 1),\n ('b', float, 1)])])\nprint(Z)", "3. Consider a large vector Z, compute Z to the power of 3 using 2 different methods", "x = np.random.rand(5e7)\n\n%timeit np.power(x,3)\n%timeit x*x*x" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
jingr1/SelfDrivingCar
TowDHistogramFilter/TowDHistogramFilter.ipynb
mit
[ "Two Dimensional Histogram Filter - Your First Feature (and your first bug).\nWriting code is important. But a big part of being on a self driving car team is working with a large existing codebase. On high stakes engineering projects like a self driving car, you will probably have to earn the trust of your managers and coworkers before they'll let you make substantial changes to the code base. \nA typical assignment for someone new to a team is to make progress on a backlog of bugs. So with that in mind, that's what you will be doing for your first project in the Nanodegree.\nYou'll go through this project in a few parts:\n\nExplore the Code - don't worry about bugs at this point. The goal is to get a feel for how this code base is organized and what everything does.\nImplement a Feature - write code that gets the robot moving correctly.\nFix a Bug - Implementing motion will reveal a bug which hadn't shown up before. Here you'll identify what the bug is and take steps to reproduce it. Then you'll identify the cause and fix it.\n\nPart 1: Exploring the code\nIn this section you will just run some existing code to get a feel for what this localizer does.\nYou can navigate through this notebook using the arrow keys on your keyboard. You can run the code in a cell by pressing Ctrl + Enter\nNavigate through the cells below. In each cell you should\n\nRead through the code. It's okay to not understand everything at this point. \nMake a guess about what will happen when you run the code. \nRun the code and compare what you see with what you expected. \nWhen you get to a TODO read the instructions carefully and complete the activity.", "# This code \"imports\" code from some of the other files we've written\n# in this directory. Specifically simulate.py and helpers.py\nimport simulate as sim\nimport helpers\nimport localizer\n\n# Don't worry too much about this code for now...\nfrom __future__ import division, print_function\n%load_ext autoreload\n%autoreload 2\n\n# This code defines a 5x5 robot world as well as some other parameters\n# which we will discuss later. It then creates a simulation and shows \n# the initial beliefs. \nR = 'r'\nG = 'g'\ngrid = [\n [R,G,G,G,R],\n [G,G,R,G,R],\n [G,R,G,G,G],\n [R,R,G,R,G],\n [R,G,R,G,R],\n]\nblur = 0.05\np_hit = 200.0\nsimulation = sim.Simulation(grid, blur, p_hit)\nsimulation.show_beliefs()", "Run the code below multiple times by repeatedly pressing Ctrl + Enter.\nAfter each run observe how the state has changed.", "simulation.run(1)\nsimulation.show_beliefs()", "What do you think this call to run is doing? Look at the code in simulate.py to find out.\nSpend a few minutes looking at the run method and the methods it calls to get a sense for what's going on.\nWhat am I looking at?\nThe red star shows the robot's true position. The blue circles indicate the strength of the robot's belief that it is at any particular location.\nIdeally we want the biggest blue circle to be at the same position as the red star.", "# We will provide you with the function below to help you look\n# at the raw numbers.\n\ndef show_rounded_beliefs(beliefs):\n for row in beliefs:\n for belief in row:\n print(\"{:0.3f}\".format(belief), end=\" \")\n print()\n \n# The {:0.3f} notation is an example of \"string \n# formatting\" in Python. You can learn more about string \n# formatting at https://pyformat.info/\n\nshow_rounded_beliefs(simulation.beliefs)", "Part 2: Implement a 2D sense function.\nAs you can see, the robot's beliefs aren't changing. No matter how many times we call the simulation's sense method, nothing happens. The beliefs remain uniform.\nInstructions\n\nOpen localizer.py and complete the sense function.\nRun the code in the cell below to import the localizer module (or reload it) and then test your sense function.\nIf the test passes, you've successfully implemented your first feature! Keep going with the project. If your tests don't pass (they likely won't the first few times you test), keep making modifications to the sense function until they do!", "from imp import reload\nreload(localizer)\ndef test_sense():\n R = 'r'\n _ = 'g'\n\n simple_grid = [\n [_,_,_],\n [_,R,_],\n [_,_,_]\n ]\n\n p = 1.0 / 9\n initial_beliefs = [\n [p,p,p],\n [p,p,p],\n [p,p,p]\n ]\n\n observation = R\n\n expected_beliefs_after = [\n [1/11, 1/11, 1/11],\n [1/11, 3/11, 1/11],\n [1/11, 1/11, 1/11]\n ]\n\n p_hit = 3.0\n p_miss = 1.0\n beliefs_after_sensing = localizer.sense(\n observation, simple_grid, initial_beliefs, p_hit, p_miss)\n\n if helpers.close_enough(beliefs_after_sensing, expected_beliefs_after):\n print(\"Tests pass! Your sense function is working as expected\")\n return\n \n elif not isinstance(beliefs_after_sensing, list):\n print(\"Your sense function doesn't return a list!\")\n return\n \n elif len(beliefs_after_sensing) != len(expected_beliefs_after):\n print(\"Dimensionality error! Incorrect height\")\n return\n \n elif len(beliefs_after_sensing[0] ) != len(expected_beliefs_after[0]):\n print(\"Dimensionality Error! Incorrect width\")\n return\n \n elif beliefs_after_sensing == initial_beliefs:\n print(\"Your code returns the initial beliefs.\")\n return\n \n total_probability = 0.0\n for row in beliefs_after_sensing:\n for p in row:\n total_probability += p\n if abs(total_probability-1.0) > 0.001:\n \n print(\"Your beliefs appear to not be normalized\")\n return\n \n print(\"Something isn't quite right with your sense function\")\n\ntest_sense()", "Integration Testing\nBefore we call this \"complete\" we should perform an integration test. We've verified that the sense function works on it's own, but does the localizer work overall?\nLet's perform an integration test. First you you should execute the code in the cell below to prepare the simulation environment.", "from simulate import Simulation\nimport simulate as sim\nimport helpers\nreload(localizer)\nreload(sim)\nreload(helpers)\n\nR = 'r'\nG = 'g'\ngrid = [\n [R,G,G,G,R,R,R],\n [G,G,R,G,R,G,R],\n [G,R,G,G,G,G,R],\n [R,R,G,R,G,G,G],\n [R,G,R,G,R,R,R],\n [G,R,R,R,G,R,G],\n [R,R,R,G,R,G,G],\n]\n\n# Use small value for blur. This parameter is used to represent\n# the uncertainty in MOTION, not in sensing. We want this test\n# to focus on sensing functionality\nblur = 0.1\np_hit = 100.0\nsimulation = sim.Simulation(grid, blur, p_hit)\n\n# Use control+Enter to run this cell many times and observe how \n# the robot's belief that it is in each cell (represented by the\n# size of the corresponding circle) changes as the robot moves.\n# The true position of the robot is given by the red star.\n\n# Run this cell about 15-25 times and observe the results\nsimulation.run(1)\nsimulation.show_beliefs()\n\n# If everything is working correctly you should see the beliefs\n# converge to a single large circle at the same position as the \n# red star.\n#\n# When you are satisfied that everything is working, continue\n# to the next section", "Part 3: Identify and Reproduce a Bug\nSoftware has bugs. That's okay.\nA user of your robot called tech support with a complaint\n\n\"So I was using your robot in a square room and everything was fine. Then I tried loading in a map for a rectangular room and it drove around for a couple seconds and then suddenly stopped working. Fix it!\"\n\nNow we have to debug. We are going to use a systematic approach.\n\nReproduce the bug\nRead (and understand) the error message (when one exists)\nWrite a test that triggers the bug.\nGenerate a hypothesis for the cause of the bug.\nTry a solution. If it fixes the bug, great! If not, go back to step 4.\n\nStep 1: Reproduce the bug\nThe user said that rectangular environments seem to be causing the bug. \nThe code below is the same as the code you were working with when you were doing integration testing of your new feature. See if you can modify it to reproduce the bug.", "from simulate import Simulation\nimport simulate as sim\nimport helpers\nreload(localizer)\nreload(sim)\nreload(helpers)\n\nR = 'r'\nG = 'g'\n\ngrid = [ \n [R,G,G,G,R,R,R],\n [G,G,R,G,R,G,R],\n [G,R,G,G,G,G,R],\n [R,R,G,R,G,G,G],\n]\n\nblur = 0.001\np_hit = 100.0\nsimulation = sim.Simulation(grid, blur, p_hit)\n\n# remember, the user said that the robot would sometimes drive around for a bit...\n# It may take several calls to \"simulation.run\" to actually trigger the bug.\nsimulation.run(5)\nsimulation.show_beliefs()\n\nsimulation.run(3)", "Step 2: Read and Understand the error message\nIf you triggered the bug, you should see an error message directly above this cell. The end of that message should say:\nIndexError: list index out of range\nAnd just above that you should see something like\npath/to/your/directory/localizer.pyc in move(dy, dx, beliefs, blurring)\n 38 new_i = (i + dy ) % width\n 39 new_j = (j + dx ) % height\n---&gt; 40 new_G[int(new_i)][int(new_j)] = cell\n 41 return blur(new_G, blurring)\nThis tells us that line 40 (in the move function) is causing an IndexError because \"list index out of range\".\nIf you aren't sure what this means, use Google! \nCopy and paste IndexError: list index out of range into Google! When I do that, I see something like this:\n\nBrowse through the top links (often these will come from stack overflow) and read what people have said about this error until you are satisfied you understand how it's caused.\nStep 3: Write a test that reproduces the bug\nThis will help you know when you've fixed it and help you make sure you never reintroduce it in the future. You might have to try many potential solutions, so it will be nice to have a single function to call to confirm whether or not the bug is fixed", "# According to the user, sometimes the robot actually does run \"for a while\" \n# - How can you change the code so the robot runs \"for a while\"?\n# - How many times do you need to call simulation.run() to consistently\n# reproduce the bug?\n# Modify the code below so that when the function is called \n# it consistently reproduces the bug.\ndef test_robot_works_in_rectangle_world():\n from simulate import Simulation\n import simulate as sim\n import helpers\n reload(localizer)\n reload(sim)\n reload(helpers)\n\n R = 'r'\n G = 'g'\n\n grid = [ \n [R,G,G,G,R,R,R],\n [G,G,R,G,R,G,R],\n [G,R,G,G,G,G,R],\n [R,R,G,R,G,G,G],\n ]\n\n blur = 0.001\n p_hit = 100.0\n for i in range(1000):\n simulation = sim.Simulation(grid, blur, p_hit)\n simulation.run(10)\n \ntest_robot_works_in_rectangle_world()", "Step 4: Generate a Hypothesis\nIn order to have a guess about what's causing the problem, it will be helpful to use some Python debuggin tools\nThe pdb module (python debugger) will be helpful here!\nSetting up the debugger\n\nOpen localizer.py and uncomment the line to the top that says import pdb\nJust before the line of code that is causing the bug new_G[int(new_i)][int(new_j)] = cell, add a new line of code that says pdb.set_trace()\nRun your test by calling your test function (run the cell below this one)\nYou should see a text entry box pop up! For now, type c into the box and hit enter to continue program execution. Keep typing c and enter until the bug is triggered again", "test_robot_works_in_rectangle_world()", "Using the debugger\nThe debugger works by pausing program execution wherever you write pdb.set_trace() in your code. You also have access to any variables which are accessible from that point in your code. \nTry running your test again. This time, when the text entry box shows up, type new_i and hit enter. You will see the value of the new_i variable show up in the debugger window. Play around with the debugger: find the values of new_j, height, and width. Do they seem reasonable / correct?\nWhen you are done playing around, type c to continue program execution. Was the bug triggered? Keep playing until you have a guess about what is causing the bug.\nStep 5: Write a Fix\nYou have a hypothesis about what's wrong. Now try to fix it. When you're done you should call your test function again. You may want to remove (or comment out) the line you added to localizer.py that says pdb.set_trace() so your test can run without you having to type c into the debugger box.", "test_robot_works_in_rectangle_world()", "Congratulations!\nYou've implemented your first feature and successfully debugged a problem the robot was having with rectangular environments. Well done." ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
CivicKnowledge/metatab-py
examples/Pandas Reporter Example.ipynb
bsd-3-clause
[ "import pandas as pd\nimport numpy as np\nimport pandasreporter as pr\n\n\n# B17001, Poverty Status by Sex by Age\nb17001 = pr.get_dataframe('B17001', '140', '05000US06073', cache=True).ct_columns\n# B17024, Age by Ratio of Income to Poverty Level\nb17024 = pr.get_dataframe('B17024', '140', '05000US06073', cache=True).ct_columns\n# B17017, Poverty Status by Household Type by Age of Householder\nb17017 = pr.get_dataframe('B17017', '140', '05000US06073', cache=True).ct_columns", "B17001 Poverty Status by Sex by Age\nFor the Poverty Status by Sex by Age we'll select the columns for male and female, below poverty, 65 and older. \nNOTE if you want to get seniors of a particular race, use table C17001a-g, condensed race iterations. The 'C' tables have fewer age ranges, but there is no 'C' table for all races: There is a C17001a for Whites, a condensed version of B17001a, but there is no C17001 for a condensed version of B17001", "[e for e in b17001.columns if '65 to 74' in str(e) or '75 years' in str(e) ]\n\n# Now create a subset dataframe with just the columns we need. \nb17001s = b17001[['geoid', 'B17001015', 'B17001016','B17001029','B17001030']]\nb17001s.head()", "Senior poverty rates\nCreating the sums for the senior below poverty rates at the tract level is easy, but there is a serious problem with the results: the numbers are completely unstable. The minimum RSE is 22%, and the median is about 60%. These are useless results.", "b17001_65mf = pr.CensusDataFrame()\nb17001_65mf['geoid'] = b17001['geoid']\nb17001_65mf['poverty_65'], b17001_65mf['poverty_65_m90'] = b17001.sum_m('B17001015', 'B17001016','B17001029','B17001030')\nb17001_65mf.add_rse('poverty_65')\nb17001_65mf.poverty_65_rse.replace([np.inf, -np.inf], np.nan).dropna().describe()" ]
[ "code", "markdown", "code", "markdown", "code" ]
tensorflow/examples
courses/udacity_intro_to_tensorflow_for_deep_learning/l08c04_time_windows.ipynb
apache-2.0
[ "Copyright 2018 The TensorFlow Authors.", "#@title Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.", "Time windows\n<table class=\"tfo-notebook-buttons\" align=\"left\">\n <td>\n <a target=\"_blank\" href=\"https://colab.research.google.com/github/tensorflow/examples/blob/master/courses/udacity_intro_to_tensorflow_for_deep_learning/l08c04_time_windows.ipynb\"><img src=\"https://www.tensorflow.org/images/colab_logo_32px.png\" />Run in Google Colab</a>\n </td>\n <td>\n <a target=\"_blank\" href=\"https://github.com/tensorflow/examples/blob/master/courses/udacity_intro_to_tensorflow_for_deep_learning/l08c04_time_windows.ipynb\"><img src=\"https://www.tensorflow.org/images/GitHub-Mark-32px.png\" />View source on GitHub</a>\n </td>\n</table>\n\nSetup", "import tensorflow as tf", "Time Windows\nFirst, we will train a model to forecast the next step given the previous 20 steps, therefore, we need to create a dataset of 20-step windows for training.", "dataset = tf.data.Dataset.range(10)\nfor val in dataset:\n print(val.numpy())\n\ndataset = tf.data.Dataset.range(10)\ndataset = dataset.window(5, shift=1)\nfor window_dataset in dataset:\n for val in window_dataset:\n print(val.numpy(), end=\" \")\n print()\n\ndataset = tf.data.Dataset.range(10)\ndataset = dataset.window(5, shift=1, drop_remainder=True)\nfor window_dataset in dataset:\n for val in window_dataset:\n print(val.numpy(), end=\" \")\n print()\n\ndataset = tf.data.Dataset.range(10)\ndataset = dataset.window(5, shift=1, drop_remainder=True)\ndataset = dataset.flat_map(lambda window: window.batch(5))\nfor window in dataset:\n print(window.numpy())\n\ndataset = tf.data.Dataset.range(10)\ndataset = dataset.window(5, shift=1, drop_remainder=True)\ndataset = dataset.flat_map(lambda window: window.batch(5))\ndataset = dataset.map(lambda window: (window[:-1], window[-1:]))\nfor x, y in dataset:\n print(x.numpy(), y.numpy())\n\ndataset = tf.data.Dataset.range(10)\ndataset = dataset.window(5, shift=1, drop_remainder=True)\ndataset = dataset.flat_map(lambda window: window.batch(5))\ndataset = dataset.map(lambda window: (window[:-1], window[-1:]))\ndataset = dataset.shuffle(buffer_size=10)\nfor x, y in dataset:\n print(x.numpy(), y.numpy())\n\ndataset = tf.data.Dataset.range(10)\ndataset = dataset.window(5, shift=1, drop_remainder=True)\ndataset = dataset.flat_map(lambda window: window.batch(5))\ndataset = dataset.map(lambda window: (window[:-1], window[-1:]))\ndataset = dataset.shuffle(buffer_size=10)\ndataset = dataset.batch(2).prefetch(1)\nfor x, y in dataset:\n print(\"x =\", x.numpy())\n print(\"y =\", y.numpy())\n\ndef window_dataset(series, window_size, batch_size=32,\n shuffle_buffer=1000):\n dataset = tf.data.Dataset.from_tensor_slices(series)\n dataset = dataset.window(window_size + 1, shift=1, drop_remainder=True)\n dataset = dataset.flat_map(lambda window: window.batch(window_size + 1))\n dataset = dataset.shuffle(shuffle_buffer)\n dataset = dataset.map(lambda window: (window[:-1], window[-1]))\n dataset = dataset.batch(batch_size).prefetch(1)\n return dataset" ]
[ "markdown", "code", "markdown", "code", "markdown", "code" ]
wcmckee/wcmckee.com
posts/niktrans.ipynb
mit
[ "<h1>NikTrans</h1>\n\nPython script to create Nikola sites from a list of schools. Edits conf.py file for site name and licence.", "import os\nimport json\n\nos.system('python3 nikoladu.py')\nos.chdir('/home/wcmckee/nik1/')\nos.system('nikola build')\nos.system('rsync -azP /home/wcmckee/nik1/* wcmckee@wcmckee.com:/home/wcmckee/github/wcmckee.com/output/minedujobs')\n\nopccschho = open('/home/wcmckee/ccschool/cctru.json', 'r')\n\nopcz = opccschho.read()\n\nrssch = json.loads(opcz)\n\nfilrma = ('/home/wcmckee/ccschol/')\n\nfor rs in rssch.keys():\n hythsc = (rs.replace(' ', '-'))\n hylow = hythsc.lower()\n hybrac = hylow.replace('(', '')\n hybaec = hybrac.replace(')', '')\n os.mkdir(filrma + hybaec)\n \n os.system('nikola init -q ' + filrma + hybaec)\n ", "I want to open each of the conf.py files and replace the nanme of the site with hythsc.lower\nDir /home/wcmckee/ccschol has all the schools folders. Need to replace in conf.py Demo Name \nwith folder name of school.\nSchools name missing characters - eg ardmore", "lisschol = os.listdir('/home/wcmckee/ccschol/')\n\nfindwat = ('LICENSE = \"\"\"')\n\ndef replacetext(findtext, replacetext):\n for lisol in lisschol:\n filereaz = ('/home/wcmckee/ccschol/' + hybaec + '/conf.py')\n f = open(filereaz,'r')\n filedata = f.read()\n f.close()\n\n newdata = filedata.replace(findtext, '\"' + replacetext + '\"')\n #print (newdata)\n f = open(filereaz,'w')\n f.write(newdata)\n f.close()\n\n\nreplacetext('LICENSE = \"\"\"', 'LICENSE = \"\"\"<a rel=\"license\" href=\"http://creativecommons.org/licenses/by/4.0/\"><img alt=\"Creative Commons Attribution 4.0 International License\" style=\"border-width:0; margin-bottom:12px;\" src=\"https://i.creativecommons.org/l/by/4.0/88x31.png\"></a>\"')\n\n\nlicfil = 'LICENSE = \"\"\"<a rel=\"license\" href=\"http://creativecommons.org/licenses/by/4.0/\"><img alt=\"Creative Commons Attribution 4.0 International License\" style=\"border-width:0; margin-bottom:12px;\" src=\"https://i.creativecommons.org/l/by/4.0/88x31.png\"></a>\"'\n\nopwcm = ('/home/wcmckee/github/wcm.com/conf.py')\n\nfor lisol in lisschol:\n print (lisol)\n rdwcm = open(opwcm, 'r')\n \n filewcm = rdwcm.read()\n newdata = filewcm.replace('wcmckee', lisol)\n\n rdwcm.close()\n #print (newdata)\n \n f = open('/home/wcmckee/ccschol/' + lisol + '/conf.py','w')\n f.write(newdata)\n f.close()\n\n\nfor rdlin in rdwcm.readlines():\n #print (rdlin)\n if 'BLOG_TITLE' in rdlin:\n print (rdlin)\n \n\nfor lisol in lisschol:\n print (lisol)\n hythsc = (lisol.replace(' ', '-'))\n hylow = hythsc.lower()\n hybrac = hylow.replace('(', '')\n hybaec = hybrac.replace(')', '')\n filereaz = ('/home/wcmckee/ccschol/' + hybaec + '/conf.py')\n f = open(filereaz,'r')\n filedata = f.read()\n f.close()\n\n newdata = filedata.replace('LICENCE = \"\"\"', licfil )\n #print (newdata)\n f = open(filereaz,'w')\n f.write(newdata)\n f.close()\n\n\n\nfor lisol in lisschol:\n print (lisol)\n hythsc = (lisol.replace(' ', '-'))\n hylow = hythsc.lower()\n hybrac = hylow.replace('(', '')\n hybaec = hybrac.replace(')', '')\n filereaz = ('/home/wcmckee/ccschol/' + hybaec + '/conf.py')\n f = open(filereaz,'r')\n filedata = f.read()\n f.close()\n\n newdata = filedata.replace('\"Demo Site\"', '\"' + hybaec + '\"')\n #print (newdata)\n f = open(filereaz,'w')\n f.write(newdata)\n f.close()\n\n\nfor lisol in lisschol:\n print (lisol)\n hythsc = (lisol.replace(' ', '-'))\n hylow = hythsc.lower()\n hybrac = hylow.replace('(', '')\n hybaec = hybrac.replace(')', '')\n filereaz = ('/home/wcmckee/ccschol/' + hybaec + '/conf.py')\n f = open(filereaz,'r')\n filedata = f.read()\n f.close()\n\n newdata = filedata.replace('\"Demo Site\"', '\"' + hybaec + '\"')\n #print (newdata)\n f = open(filereaz,'w')\n f.write(newdata)\n f.close()\n", "Perform Nikola build of all the sites in ccschol folder", "buildnik = input('Build school sites y/N ')\n\n\n\nfor lisol in lisschol:\n print (lisol)\n os.chdir('/home/wcmckee/ccschol/' + lisol)\n if 'y' in buildnik:\n os.system('nikola build')\n\nmakerst = open('/home/wcmckee/ccs')\n\nfor rs in rssch.keys():\n hythsc = (rs.replace(' ', '-'))\n hylow = hythsc.lower()\n hybrac = hylow.replace('(', '-')\n hybaec = hybrac.replace(')', '')\n \n #print (hylow())\n filereaz = ('/home/wcmckee/ccschol/' + hybaec + '/conf.py')\n f = open(filereaz,'r')\n filedata = f.read()\n \n\n newdata = filedata.replace(\"Demo Site\", hybaec)\n f.close()\n f = open(filereaz,'w')\n f.write(newdata)\n f.close()" ]
[ "markdown", "code", "markdown", "code", "markdown", "code" ]
mcocdawc/chemcoord
Tutorial/Gradients.ipynb
lgpl-3.0
[ "Gradients", "import pandas as pd\nimport numpy as np\nimport chemcoord as cc\nimport sympy\nsympy.init_printing()\n\nmolecule = cc.Cartesian.read_xyz('MIL53_beta.xyz', start_index=1)\nr, theta = sympy.symbols('r, theta', real=True)", "Let's build the construction table in order to bend one of the terephtalic acid ligands.", "fragment = molecule.get_fragment([(12, 17), (55, 60)])\nconnection = np.array([[3, 99, 1, 12], [17, 3, 99, 12], [60, 3, 17, 12]])\nconnection = pd.DataFrame(connection[:, 1:], index=connection[:, 0], columns=['b', 'a', 'd'])\nc_table = molecule.get_construction_table([(fragment, connection)])\nmolecule = molecule.loc[c_table.index]\nzmolecule = molecule.get_zmat(c_table)", "This gives the following movement:", "zmolecule_symb = zmolecule.copy()\nzmolecule_symb.safe_loc[3, 'angle'] += theta\n\ncc.xyz_functions.view([zmolecule_symb.subs(theta, a).get_cartesian() for a in [-30, 0, 30]])", "Gradient for Zmat to Cartesian\nFor the gradients it is very illustrating to compare:\n$$\nf(x + h) \\approx f(x) + f'(x) h\n$$\n$f(x + h)$ will be zmolecule2\nand\n$h$ will be dist_zmol\nThe boolean chain argument denotes if the movement should be chained or not.\nBond", "dist_zmol1 = zmolecule.copy()\n\nr = 3\n\ndist_zmol1.unsafe_loc[:, ['bond', 'angle', 'dihedral']] = 0\ndist_zmol1.unsafe_loc[3, 'bond'] = r\n\ncc.xyz_functions.view([molecule,\n molecule + zmolecule.get_grad_cartesian(chain=False)(dist_zmol1),\n molecule + zmolecule.get_grad_cartesian()(dist_zmol1),\n (zmolecule + dist_zmol1).get_cartesian()])", "Angle", "angle = 30\n\ndist_zmol2 = zmolecule.copy()\ndist_zmol2.unsafe_loc[:, ['bond', 'angle', 'dihedral']] = 0\ndist_zmol2.unsafe_loc[3, 'angle'] = angle\n\ncc.xyz_functions.view([molecule,\n molecule + zmolecule.get_grad_cartesian(chain=False)(dist_zmol2),\n molecule + zmolecule.get_grad_cartesian()(dist_zmol2),\n (zmolecule + dist_zmol2).get_cartesian()])", "Note that the deviation between $f(x + h)$ and $f(x) + h f'(x)$ is not an error in the implementation but a visualisation of the small angle approximation.\nThe smaller the angle the better is the linearisation.\nGradient for Cartesian to Zmat", "x_dist = 2\n\ndist_mol = molecule.copy()\ndist_mol.loc[:, ['x', 'y', 'z']] = 0.\ndist_mol.loc[13, 'x'] = x_dist\n\nzmat_dist = molecule.get_grad_zmat(c_table)(dist_mol)", "It is immediately obvious, that only the ['bond', 'angle', 'dihedral'] of those atoms change,\nwhich are either moved themselves in cartesian space or use moved references.", "zmat_dist[(zmat_dist.loc[:, ['bond', 'angle', 'dihedral']] != 0).any(axis=1)]" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
LimeeZ/phys292-2015-work
assignments/assignment08/InterpolationEx01.ipynb
mit
[ "Interpolation Exercise 1", "%matplotlib inline\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport numpy as np\n\nfrom scipy.interpolate import interp1d", "2D trajectory interpolation\nThe file trajectory.npz contains 3 Numpy arrays that describe a 2d trajectory of a particle as a function of time:\n\nt which has discrete values of time t[i].\nx which has values of the x position at those times: x[i] = x(t[i]).\ny which has values of the y position at those times: y[i] = y(t[i]).\n\nLoad those arrays into this notebook and save them as variables x, y and t:", "with np.load('trajectory.npz') as data:\n t = data['t']\n x = data['x']\n y = data['y']\n\nprint(x)\n\nassert isinstance(x, np.ndarray) and len(x)==40\nassert isinstance(y, np.ndarray) and len(y)==40\nassert isinstance(t, np.ndarray) and len(t)==40", "Use these arrays to create interpolated functions $x(t)$ and $y(t)$. Then use those functions to create the following arrays:\n\nnewt which has 200 points between ${t_{min},t_{max}}$.\nnewx which has the interpolated values of $x(t)$ at those times.\nnewy which has the interpolated values of $y(t)$ at those times.", "newt = np.linspace(min(t),max(t), 200)\nf = np.sin(newt)\n\napproxx= interp1d(x,t,kind = 'cubic')\nnewx = np.linspace(np.min(t), np.max(t), 200)\n\napproxy = interp1d(y,t,kind = 'cubic')\nnewy = np.linspace(np.min(t), np.max(t), 200)\n\n?interp1d\n\n\nassert newt[0]==t.min()\nassert newt[-1]==t.max()\nassert len(newt)==200\nassert len(newx)==200\nassert len(newy)==200", "Make a parametric plot of ${x(t),y(t)}$ that shows the interpolated values and the original points:\n\nFor the interpolated points, use a solid line.\nFor the original points, use circles of a different color and no line.\nCustomize you plot to make it effective and beautiful.", "plt.plot(newt, f, marker='o', linestyle='', label='original data')\nplt.plot(newx, newy, marker='.', label='interpolated');\nplt.legend();\nplt.xlabel('x')\nplt.ylabel('f(x)');\n\nassert True # leave this to grade the trajectory plot" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
mdbecker/daa_philly_2015
DataPhilly_Analysis.ipynb
mit
[ "Analyzing the Philadelphia Data Science Scene with Python\nInstructions\n\nThe latest version of this notebook can always be found and viewed online here. It's strongly recommended that you view the online version of this document.\nInstructions for setting up Jupyter Notebook and the required libraries can be found online here.\nThe repo for this project can be found and forked here.\n\nDataPhilly\n<img src=\"dataphilly.jpeg\" width=\"70%\" />\nDataPhilly is a local data meetup group I started back in 2012. I had attended a few data science conferences and I was really disappointed about the lack of a local meetup group for people interested in data science. And so DataPhilly was born!\nJupyter Notebook\nThe Jupyter Notebook is a web application that allows you to create and share documents that contain live code, equations, visualizations and explanatory text. Uses include: data cleaning and transformation, numerical simulation, statistical modeling, machine learning and much more.\n<img src=\"jupyterpreview.png\" width=\"70%\" />\nThrough Jupyter's kernel and messaging architecture, the Notebook allows code to be run in a range of different programming languages. For each notebook document that a user opens, the web application starts a kernel that runs the code for that notebook. Each kernel is capable of running code in a single programming language and there are kernels available in the following languages\n\nPython(https://github.com/ipython/ipython)\nJulia (https://github.com/JuliaLang/IJulia.jl)\nR (https://github.com/takluyver/IRkernel)\nRuby (https://github.com/minrk/iruby)\nHaskell (https://github.com/gibiansky/IHaskell)\nScala (https://github.com/Bridgewater/scala-notebook)\nnode.js (https://gist.github.com/Carreau/4279371)\nGo (https://github.com/takluyver/igo)\n\nThe default kernel runs Python code. The notebook provides a simple way for users to pick which of these kernels is used for a given notebook.\nJupyter examples and tutorials can be found in the Jupyter github repo here.\nThe task\nThe task I'll be walking you through today will demonstrate how to use Python for exploratory data analysis. The dataset I'll use is one I created by querying the Meetup API for the DataPhilly meetup. I'll walk you through using Jupyter notebook (The webapp we're using now), Pandas (an excel like tool for data exploration) and scikit-learn (a Python machine learning library) to explore the DataPhilly dataset. I won't go in depth into these tools but my hope is that you'll leave my talk wanting to learn more about using Python for exploratory data analysis and that you'll learn some interesting things about DataPhilly in the process.\nInitializing our environment\nFirst let's start off by initializing our environment\n* %matplotlib inline initializes matplotlib so that we can display graphs and charts in our notebook.\n* import seaborn as sns imports seaborn a graphing library built on top of matplotlib.\n* import pandas as pd imports pandas a tool I'll explain in the next section.\nHint: If you've installed Jupyter Notebook and you're running this on your machine, you can use the run button <i class=\"fa-step-forward fa\"></i> in the toolbar at the top of the page to execute each cell\nClick on the cell above and the cell below. You'll notice that the cell above is Markdown. You can edit it by double clicking on it. The cell below contains Python code which can be modified and executed. If the code has any output it will be printed out below the cell with <font color=\"darkred\">Out [n]:</font> in front of it.", "%matplotlib inline\nimport seaborn as sns\nimport pandas as pd\nfrom matplotlib import rcParams\n\n# Modify aesthetics for visibility during presentation\nsns.set_style('darkgrid', {'axes.facecolor': '#C2C2C8'})\nsns.set_palette('colorblind')\n\n# Make everything bigger for visibility during presentation\nrcParams['figure.figsize'] = 20, 10\nrcParams['axes.titlesize'] = 'xx-large'\nrcParams['axes.labelsize'] = 'x-large'\nrcParams['xtick.labelsize'] = 'x-large'\nrcParams['ytick.labelsize'] = 'x-large'\nrcParams['legend.fontsize'] = 'xx-large'\nrcParams['lines.linewidth'] = 4.0\nrcParams['grid.linewidth'] = 2.0\n\n# Hide warnings in the notebook\nimport warnings\nwarnings.filterwarnings('ignore')", "Pandas\n<img src=\"pandas_logo.png\" width=\"50%\" />\nPandas is a library that provides data analysis tools for the Python programming language. You can think of it as Excel on steroids, but in Python.\nTo start off, I've used the meetup API to gather a bunch of data on members of the DataPhilly meetup group. First let's start off by looking at the events we've had over the past few years. I've loaded the data into a pandas DataFrame and stored it in the file events.pkl. A DataFrame is a table similar to an Excel spreadsheet. Let's load it and see what it looks like:\nDataPhilly events dataset", "events_df = pd.read_pickle('events.pkl')\nevents_df = events_df.sort_values(by='time')\nevents_df", "You can access values in a DataFrame column like this:", "events_df['yes_rsvp_count']", "You can access a row of a DataFrame using iloc:", "events_df.iloc[4]", "We can view the first few rows using the head method:", "events_df.head()", "And similarly the last few using tail:", "events_df.tail(3)", "We can see that the yes_rsvp_count contains the number of people who RSVPed yes for each event. First let's look at some basic statistics:", "yes_rsvp_count = events_df['yes_rsvp_count']\nyes_rsvp_count.sum(), yes_rsvp_count.mean(), yes_rsvp_count.min(), yes_rsvp_count.max()", "When we access a single column of the DataFrame like this we get a Series object which is just a 1-dimensional version of a DataFrame.", "type(yes_rsvp_count)", "We can use the built-in describe method to print out a lot of useful stats in a nice tabular format:", "yes_rsvp_count.describe()", "Next I'd like to graph the number of RSVPs over time to see if there are any interesting trends. To do this let's first sum the waitlist_count and yes_rsvp_count columns and make a new column called total_RSVP_count.", "events_df['total_RSVP_count'] = events_df['waitlist_count'] + events_df['yes_rsvp_count']\nevents_df['total_RSVP_count']", "We can plot these values using the plot method", "events_df['total_RSVP_count'].plot()", "The plot method utilizes the matplotlib library behind the scenes to draw the plot. This is interesting, but it would be nice to have the dates of the meetups on the X-axis of the plot.\nTo accomplish this, let's convert the time field from a unix epoch timestamp to a python datetime utilizing the apply method and a function.", "events_df.head(2)\n\nimport datetime\ndef get_datetime_from_epoch(epoch):\n return datetime.datetime.fromtimestamp(epoch/1000.0)\n\nevents_df['time'] = events_df['time'].apply(get_datetime_from_epoch)\nevents_df['time']", "Next let's make the time column the index of the DataFrame using the set_index method and then re-plot our data.", "events_df.set_index('time', inplace=True)\nevents_df[['total_RSVP_count']].plot()", "We can also easily plot multiple columns on the same plot.", "all_rsvps = events_df[['yes_rsvp_count', 'waitlist_count', 'total_RSVP_count']]\nall_rsvps.plot(title='Attendance over time')", "DataPhilly members dataset\nAlright so I'm seeing some interesting trends here. Let's take a look at something different.\nThe Meetup API also provides us access to member info. Let's have a look at the data we have available:", "members_df = pd.read_pickle('members.pkl')\nfor column in ['joined', 'visited']:\n members_df[column] = members_df[column].apply(get_datetime_from_epoch)\nmembers_df.head(3)", "You'll notice that I've anonymized the meetup member_id and the member's name. I've also used the python module SexMachine to infer members gender based on their first name. I ran SexMachine on the original names before I anonymized them. Let's have a closer look at the gender breakdown of our members:", "gender_counts = members_df['gender'].value_counts()\ngender_counts", "Next let's use the hist method to plot a histogram of membership_count. This is the number of groups each member is in.", "members_df['membership_count'].hist(bins=20)", "Something looks odd here let's check out the value_counts:", "members_df['membership_count'].value_counts().head()", "Okay so most members are members of 0 meetup groups?! This seems odd! I did a little digging and came up with the answer; members can set their membership details to be private, and then this value will be zero. Let's filter out these members and recreate the histogram.", "members_df_non_zero = members_df[members_df['membership_count'] != 0]\nmembers_df_non_zero['membership_count'].hist(bins=50)", "Okay so most members are only members of a few meetup groups. There's some outliers that are pretty hard to read, let's try plotting this on a logarithmic scale to see if that helps:", "ax = members_df_non_zero['membership_count'].hist(bins=50)\nax.set_yscale('log')\nax.set_xlim(0, 500)", "Let's use a mask to filter out the outliers so we can dig into them a little further:", "all_the_meetups = members_df[members_df['membership_count'] > 100]\nfiltered = all_the_meetups[['membership_count', 'city', 'country', 'state']]\nfiltered.sort_values(by='membership_count', ascending=False)", "The people from Philly might actually be legitimate members, let's use a compound mask to filter them out as well:", "all_the_meetups = members_df[\n (members_df['membership_count'] > 100) & (members_df['city'] != 'Philadelphia')\n]\nfiltered = all_the_meetups[['membership_count', 'city', 'country', 'state']]\nfiltered.sort_values(by='membership_count', ascending=False)", "That's strange, I don't think we've ever had any members from Berlin, San Francisco, or Jerusalem in attendance :-).\nThe RSVP dataset\nMoving on, we also have all the events that each member RSVPed to:", "rsvps_df = pd.read_pickle('rsvps.pkl')\nrsvps_df.head(3)", "<img src=\"inner_join.png\" width=\"50%\" />\nWe can utilize the pandas merge method to join our members DataFrame and our rsvps DataFrame:", "joined_with_rsvps_df = pd.merge(members_df, rsvps_df, left_on='anon_id', right_on='member_id')\njoined_with_rsvps_df.head(3)\n\njoined_with_rsvps_df.columns", "Now we have a ton of data, let's see what kind of interesting things we can discover.\nLet's look at the some stats on male attendees vs. female attendees:\nFirst we can use the isin method to make DataFrames for male and female members.", "male_attendees = joined_with_rsvps_df[joined_with_rsvps_df['gender'].isin(['male', 'mostly_male'])]\nmale_attendees.tail(3)\n\nfemale_attendees = joined_with_rsvps_df[joined_with_rsvps_df['gender'].isin(['female', 'mostly_female'])]\nfemale_attendees.tail(3)", "Next we can use the sum method to count the number of male and female attendees per event and create a Series for each.", "event_ids = [\n '102502622', '106043892', '107740582', '120425212', '133803672', '138415912', '144769822', '149515412',\n '160323532', '168747852', '175993712', '182860422', '206754182', '215265722', '219055217', '219840555',\n '220526799', '221245827', '225488147', '89769502', '98833672'\n]\nmale_attendees[event_ids].sum().head(3)", "We can then recombine the male and female Series' into a new DataFrame.", "gender_attendance = pd.DataFrame({'male': male_attendees[event_ids].sum(), 'female': female_attendees[event_ids].sum()})\ngender_attendance.head(3)", "And then we can use merge again to combine this with our events DataFrame.", "events_with_gender_df = pd.merge(events_df, gender_attendance, left_on='id', right_index=True)\nevents_with_gender_df.head(3)", "The we can plot the attendance by gender over time", "gender_df = events_with_gender_df[['female', 'male']]\ngender_df.plot(title='Attendance by gender over time')", "This might be easier to interpret by looking at the percentage of females in attendance. We can use the div (divide) method to calculate this.", "female_ratio = gender_df['female'].div(gender_df['male'] + gender_df['female'])\nfemale_ratio.plot(title='Percentage female attendance over time', ylim=(0.0, 1.0))", "The members DataFrame also has some other interesting stuff in it. Let's take a look at the topics column.", "members_df['topics'].iloc[0]", "Let's see if we can identify any trends in member's topics. Let's start off by identifying the most common topics:", "from collections import Counter\n\ntopic_counter = Counter()\nfor m in members_df['topics']:\n topic_counter.update([t['name'] for t in m])\ntopic_counter.most_common(20)", "Next let's create a new DataFrame where each column is one of the top 100 topics, and each row is a member. We'll set the values of each cell to be either 0 or 1 to indicate that that member has (or doesn't have) that topic.", "top_100_topics = set([t[0] for t in topic_counter.most_common(100)])\ntopic_member_map = {}\nfor i, m in members_df.iterrows():\n if m['topics']:\n top_topic_count = {}\n for topic in m['topics']:\n if topic['name'] in top_100_topics:\n top_topic_count[topic['name']] = 1\n topic_member_map[m['anon_id']] = top_topic_count\n \ntop_topic_df = pd.DataFrame(topic_member_map)\ntop_topic_df.head(3)", "Okay for what I'm going to do next, I want the rows to be the members and the columns to be the topics. We can use the T (transpose) method to fix this.", "top_topic_df = top_topic_df.T\ntop_topic_df.head(3)", "Next we can use the fillna method to fill in the missing values with zeros.", "top_topic_df.fillna(0, inplace=True)\ntop_topic_df.head(3)", "Next let's use a clustering algorithm to see if there are any patterns in the topics members are interested in. A clustering algorithm groups a set of data points so that similar objects are in the same group. This is a classic type of unsupervised machine learning. Below you can find visualisations of how different clustering algorithms perform on various kinds of data:\n<img src=\"plot_cluster_comparison_001.png\" width=\"90%\" />\nKmeans clustering is quick and can scale well to larger datasets. Let's see how it performs on our dataset:\nscikit-learn\n<img src=\"scikit-learn-logo-notext.png\" width=\"20%\" />\nWe'll use a python machine learning library called scikit-learn to do the clustering.", "from sklearn.cluster import MiniBatchKMeans as KMeans\nX = top_topic_df.as_matrix()\nn_clusters = 3\nk_means = KMeans(init='k-means++', n_clusters=n_clusters, n_init=10, random_state=47)\nk_means.fit(X)\nk_means.labels_", "We've grouped our members into 3 clusters, let's see how many members are in each cluster", "Counter(list(k_means.labels_)).most_common()", "Next let's see which topics are most popular in each cluster:", "from collections import defaultdict\n\ncluster_index_map = defaultdict(list)\nfor i in range(k_means.labels_.shape[0]):\n cluster_index_map[k_means.labels_[i]].append(top_topic_df.index[i])\n\nfor cluster_num in range(n_clusters):\n print 'Cluster {}'.format(cluster_num)\n f = top_topic_df[top_topic_df.index.isin(cluster_index_map[cluster_num])].sum()\n f2 = f[f > 0]\n f3 = f2.sort_values(ascending=False)\n print f3[:10]\n print", "So it looks like our biggest cluster (#2) contains members whose primary interest is data science.\nThe second biggest cluster (#1) contains members whose primary interests are technology, and data science just happens to be one of those interests.\nThe smallest cluster (#0) contains members whose primary interests are around socializing.\nBased on this information we might be able to engage members in the \"social\" (#0) cluster by having more socially oriented events. We might be able to engaged with the members in cluster (#1) by having more events geared toward beginners.\nConclusion\nHopefully you learned a little bit about DataPhilly and doing exploratory analysis in Python. There's tons of extra data in our datasets that I don't even have time to get into today. If you feel like you missed anything and would like to revist it, you can find this Notebook and instructions for how to use it in my github repo http://github.com/mdbecker/. If you find something interesting in the data and you'd like to share it with me I'm @beckerfuffle on Twitter, and you can always contact me through the DataPhilly Meetup page." ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
ds-modules/LINGUIS-110
FormantsUpdated/Assignment.ipynb
mit
[ "Linguistics 110: Vowel Formants\nProfessor Susan Lin\nIn this notebook, we use both data from an outside source and that the class generated to explore the relationships between formants, gender, and height.\nTable of Contents\n1 - Exploring TIMIT Data\n2 - Using the Class's Data\n3 - Vowel Spaces\n4 - Variation in Vowel Spaces\n5 - Formants vs Height\nRemember that to run a cell, you can either click the play button in the toolbar, or you can press shift and enter on your keyboard. To get a quick review of Jupyter notebooks, you can look at the VOT Notebook. Make sure to run the following cell before you get started.", "# DON'T FORGET TO RUN THIS CELL\nimport math\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nimport datascience as ds\nimport matplotlib.pyplot as plt\nsns.set_style('darkgrid')\n\n%matplotlib inline\n\nimport warnings\nwarnings.filterwarnings('ignore')", "Exploring TIMIT Data <a id='timit'></a>\nWe will start off by exploring TIMIT data taken from 8 different regions. These measurements are taken at the midpoint of vowels, where vowel boundaries were determined automatically using forced alignment.\nUploading the data\nPrior to being able to work with the data, we have to upload our dataset. The following two lines of code will read in our data and create a dataframe. The last line of code prints the timit dataframe, but instead of printing the whole dataframe, by using the method .head, it only prints the first 5 rows.", "timit = pd.read_csv('data/timitvowels.csv')\ntimit.head()", "Look at the dataframe you created and try to figure out what each column measures. Each column represents a different attribute, see the following table for more information.\n|Column Name|Details|\n|---|---|\n|speaker|unique speaker ID|\n|gender|Speaker’s self-reported gender|\n|region|Speaker dialect region number|\n|word|Lexical item (from sentence prompt)|\n|vowel|Vowel ID|\n|duration|Vowel duration (seconds)|\n|F1/F2/F3/f0|f0 and F1-F3 in BPM (Hz)|\nSometimes data is encoded with with an identifier, or key, to save space and simplify calculations. Each of those keys corresponds to a specific value. If you look at the region column, you will notice that all of the values are numbers. Each of those numbers corresponds to a region, for example, in our first row the speaker, cjf0, is from region 1. That corresponds to New England. Below is a table with all of the keys for region.\n|Key|Region|\n|---|---|\n|1|New England|\n|2|Northern|\n|3|North Midland|\n|4|South Midland|\n|5|Southern|\n|6|New York City|\n|7|Western|\n|8|Army Brat|\nTransformations\nWhen inspecting data, you may realize that there are changes to be made -- possibly due to the representation to the data or errors in the recording. Before jumping into analysis, it is important to clean the data. \nOne thing to notice about timit is that the column vowel contains ARPABET identifiers for the vowels. We want to convert the vowel column to be IPA characters, and will do so in the cell below.", "IPAdict = {\"AO\" : \"ɔ\", \"AA\" : \"ɑ\", \"IY\" : \"i\", \"UW\" : \"u\", \"EH\" : \"ɛ\", \"IH\" : \"ɪ\", \"UH\":\"ʊ\", \"AH\": \"ʌ\", \"AX\" : \"ə\", \"AE\":\"æ\", \"EY\" :\"eɪ\", \"AY\": \"aɪ\", \"OW\":\"oʊ\", \"AW\":\"aʊ\", \"OY\" :\"ɔɪ\", \"ER\":\"ɚ\"}\ntimit['vowel'] = [IPAdict[x] for x in timit['vowel']]\ntimit.head()", "Most of the speakers will say the same vowel multiple times, so we are going to average those values together. The end result will be a dataframe where each row represents the average values for each vowel for each speaker.", "timit_avg = timit.groupby(['speaker', 'vowel', 'gender', 'region']).mean().reset_index()\ntimit_avg.head()", "Splitting on Gender\nUsing the same dataframe from above, timit_avg, we are going to split into dataframes grouped by gender. To identify the possible values of gender in the gender column, we can use the method .unique on the column.", "timit_avg.gender.unique()", "You could see that for this specific dataset there are only \"female\" and \"male\" values in the column. Given that information, we'll create two subsets based off of gender. \nWe'll split timit_avg into two separate dataframes, one for females, timit_female, and one for males, timit_male. Creating these subset dataframes does not affect the original timit_avg dataframe.", "timit_female = timit_avg[timit_avg['gender'] == 'female']\ntimit_male = timit_avg[timit_avg['gender'] == 'male']", "Distribution of Formants\nWe want to inspect the distributions of F1, F2, and F3 for those that self-report as male and those that self-report as female to identify possible trends or relationships. Having our two split dataframes, timit_female and timit_male, eases the plotting process. \nRun the cell below to see the distribution of F1.", "sns.distplot(timit_female['F1'], kde_kws={\"label\": \"female\"})\nsns.distplot(timit_male['F1'], kde_kws={\"label\": \"male\"})\nplt.title('F1')\nplt.xlabel(\"Hz\")\nplt.ylabel('Proportion per Hz');", "Does there seem to be a notable difference between male and female distributions of F1?\nNext, we plot F2.", "sns.distplot(timit_female['F2'], kde_kws={\"label\": \"female\"})\nsns.distplot(timit_male['F2'], kde_kws={\"label\": \"male\"})\nplt.title('F2')\nplt.xlabel(\"Hz\")\nplt.ylabel('Proportion per Hz');", "Finally, we create the same visualization, but for F3.", "sns.distplot(timit_female['F3'], kde_kws={\"label\": \"female\"})\nsns.distplot(timit_male['F3'], kde_kws={\"label\": \"male\"})\nplt.title('F3')\nplt.xlabel(\"Hz\")\nplt.ylabel('Proportion per Hz');", "Do you see a more pronounced difference across the the different F values? Are they the same throughout? Can we make any meaningful assumptions from these visualizations?\nAn additional question: How do you think the fact that we average each vowel together first for each individual affects the shape of the histograms?\nUsing the Class's Data <a id='cls'></a>\nThis portion of the notebook will rely on the data that was submit for HW5. Just like we did for the TIMIT data, we are going to read it into a dataframe and modify the column vowel to reflect the corresponding IPA translation. We will name the dataframe class_data.", "# reading in the data\nclass_data = pd.read_csv('data/110_formants.csv')\nclass_data.head()", "The ID column contains a unique value for each individual. Each individual has a row for each of the different vowels they measured.", "# translating the vowel column\nclass_data['vowel'] = [IPAdict[x] for x in class_data['vowel']]\nclass_data.head()", "Splitting on Gender\nAs we did with the TIMIT data, we are going to split class_data based on self-reported gender. We need to figure out what the possible responses for the column were.", "class_data['Gender'].unique()", "Notice that there are three possible values for the column. We do not have a large enough sample size to responsibly come to conclusions for Prefer not to answer, so for now we'll compare Male and Female. We'll call our new split dataframes class_female and class_male.", "class_female = class_data[class_data['Gender'] == 'Female']\nclass_male = class_data[class_data['Gender'] == 'Male']", "Comparing Distributions\nThe following visualizations compare the the distribution of formants for males and females, like we did for the TIMIT data.\nFirst, we'll start with F1.", "sns.distplot(class_female['F1'], kde_kws={\"label\": \"female\"})\nsns.distplot(class_male['F1'], kde_kws={\"label\": \"male\"})\nplt.title('F1')\nplt.xlabel(\"Hz\")\nplt.ylabel('Proportion per Hz');", "Next is F2.", "sns.distplot(class_female['F2'], kde_kws={\"label\": \"female\"})\nsns.distplot(class_male['F2'], kde_kws={\"label\": \"male\"})\nplt.title('F2')\nplt.xlabel(\"Hz\")\nplt.ylabel('Proportion per Hz');", "And finally F3.", "sns.distplot(class_female['F3'], kde_kws={\"label\": \"female\"})\nsns.distplot(class_male['F3'], kde_kws={\"label\": \"male\"})\nplt.title('F3')\nplt.xlabel(\"Hz\")\nplt.ylabel('Proportion per Hz');", "Do the spread of values appear to be the same for females and males? Do the same patterns that occur in the TIMIT data appear in the class's data?\nVowel Spaces <a id='vs'></a>\nRun the cell below to define some functions that we will be using.", "def plot_blank_vowel_chart():\n im = plt.imread('images/blankvowel.png')\n plt.imshow(im, extent=(plt.xlim()[0], plt.xlim()[1], plt.ylim()[0], plt.ylim()[1]))\n\ndef plot_vowel_space(avgs_df):\n plt.figure(figsize=(10, 8))\n plt.gca().invert_yaxis()\n plt.gca().invert_xaxis()\n \n vowels = ['eɪ', 'i', 'oʊ', 'u', 'æ', 'ɑ', 'ɚ', 'ɛ', 'ɪ', 'ʊ', 'ʌ'] + ['ɔ']\n \n for i in range(len(avgs_df)):\n plt.scatter(avgs_df.loc[vowels[i]]['F2'], avgs_df.loc[vowels[i]]['F1'], marker=r\"$ {} $\".format(vowels[i]), s=1000)\n \n plt.ylabel('F1')\n plt.xlabel('F2')", "We are going to be recreating the following graphic from this website.\n\nBefore we can get to creating, we need to get a singular value for each column for each of the vowels (so we can create coordinate pairs). To do this, we are going to find the average formant values for each of the vowels in our dataframes. We'll do this for both timit and class_data.", "class_vowel_avgs = class_data.drop('ID', axis=1).groupby('vowel').mean()\nclass_vowel_avgs.head()\n\ntimit_vowel_avgs = timit.groupby('vowel').mean()\ntimit_vowel_avgs.head()", "Each of these new tables has a row for each vowel, which comprisises of the averaged values across all speakers.\nPlotting the Vowel Space\nRun the cell below to construct a vowel space for the class's data, in which we plot F1 on F2.\nNote that both axes are descending.", "plot_vowel_space(class_vowel_avgs)\nplt.xlabel('F2 (Hz)')\nplt.ylabel('F1 (Hz)');", "Using Logarithmic Axes\nIn our visualization above, we use linear axes in order to construct our vowel space. The chart we are trying to recreate has logged axes (though the picture does not indicate it). Below we log-transform all of the values in our dataframes.", "log_timit_vowels = timit_vowel_avgs.apply(np.log)\nlog_class_vowels = class_vowel_avgs.apply(np.log)\nclass_data['log(F1)'] = np.log(class_data['F1'])\nclass_data['log(F2)'] = np.log(class_data['F2'])\nlog_class_vowels.head()", "Below we plot the vowel space using these new values.", "plot_vowel_space(log_class_vowels)\nplt.xlabel('log(F2) (Hz)')\nplt.ylabel('log(F1) (Hz)');", "What effect does using the logged values have, if any? What advantages does using these values have? Are there any negatives? This paper might give some ideas.\nOverlaying a Vowel Space Chart\nFinally, we are going to overlay a blank vowel space chart outline to see how close our data reflects the theoretical vowel chart.", "plot_vowel_space(log_class_vowels)\nplot_blank_vowel_chart()\nplt.xlabel('log(F2) (Hz)')\nplt.ylabel('log(F1) (Hz)');", "How well does it match the original?\nBelow we generate the same graph, except using the information from the TIMIT dataset.", "plot_vowel_space(log_timit_vowels)\nplot_blank_vowel_chart()\nplt.xlabel('log(F2) (Hz)')\nplt.ylabel('log(F1) (Hz)');", "How does the TIMIT vowel space compare to the vowel space from our class data? What may be the cause for any differences between our vowel space and the one constructed using the TIMIT data? Do you notice any outliers or do any points that seem off?\nVariation in Vowel Spaces <a id='vvs'></a>\nIn the following visualizations, we are going to show each individual vowel from each person in the F2 and F1 dimensions (logged). Each color corresponds to a different vowel -- see the legend for the exact pairs.", "sns.lmplot('log(F2)', 'log(F1)', hue='vowel', data=class_data, fit_reg=False, size=8, scatter_kws={'s':30})\nplt.xlim(8.2, 6.7)\nplt.ylim(7.0, 5.7);", "In the following visualization, we replace the colors with the IPA characters and attempt to clump the vowels together.", "plt.figure(figsize=(10, 12))\npick_vowel = lambda v: class_data[class_data['vowel'] == v]\ncolors = ['Greys_r', 'Purples_r', 'Blues_r', 'Greens_r', 'Oranges_r', \\\n 'Reds_r', 'GnBu_r', 'PuRd_r', 'winter_r', 'YlOrBr_r', 'pink_r', 'copper_r']\n\nfor vowel, color in list(zip(class_data.vowel.unique(), colors)):\n vowel_subset = pick_vowel(vowel)\n sns.kdeplot(vowel_subset['log(F2)'], vowel_subset['log(F1)'], n_levels=1, cmap=color, shade=False, shade_lowest=False)\n\nfor i in range(1, len(class_data)+1):\n plt.scatter(class_data['log(F2)'][i], class_data['log(F1)'][i], color='black', linewidths=.5, marker=r\"$ {} $\".format(class_data['vowel'][i]), s=40)\n\nplt.xlim(8.2, 6.7)\nplt.ylim(7.0, 5.7);", "Formants vs Height <a id='fvh'></a>\nWe are going to compare each of the formants and height to see if there is a relationship between the two. To help visualize that, we are going to plot a regression line, which is also referred to as the line of best fit.\nWe are going to use the maximum of each formant to compare to height. So for each speaker, we will calculate their greatest F1, F2, and F3 across all vowels, then compare one of those to their height. We create the necessary dataframe in the cell below using the class's data.", "genders = class_data['Gender']\nplotting_data = class_data.drop('vowel', axis=1)[np.logical_or(genders == 'Male', genders == 'Female')]\nmaxes = plotting_data.groupby(['ID', 'Gender']).max().reset_index()[plotting_data.columns[:-2]]\nmaxes.columns = ['ID', 'Language', 'Gender', 'Height', 'Max F1', 'Max F2', 'Max F3']\nmaxes_female = maxes[maxes['Gender'] == 'Female']\nmaxes_male = maxes[maxes['Gender'] == 'Male']\nmaxes.head()", "First we will plot Max F1 against Height.\nNote: Each gender has a different color dot, but the line represents the line of best fit for ALL points.", "sns.regplot('Height', 'Max F1', data=maxes)\nsns.regplot('Height', 'Max F1', data=maxes_male, fit_reg=False)\nsns.regplot('Height', 'Max F1', data=maxes_female, fit_reg=False)\nplt.xlabel('Height (cm)')\nplt.ylabel('Max F1 (Hz)')\nprint('female: green')\nprint('male: orange')", "Is there a general trend for the data that you notice? What do you notice about the different color dots?\nNext, we plot Max F2 on Height.", "sns.regplot('Height', 'Max F2', data=maxes)\nsns.regplot('Height', 'Max F2', data=maxes_male, fit_reg=False)\nsns.regplot('Height', 'Max F2', data=maxes_female, fit_reg=False)\nplt.xlabel('Height (cm)')\nplt.ylabel('Max F2 (Hz)')\nprint('female: green')\nprint('male: orange')", "Finally, Max F3 vs Height.", "sns.regplot('Height', 'Max F3', data=maxes)\nsns.regplot('Height', 'Max F3', data=maxes_male, fit_reg=False)\nsns.regplot('Height', 'Max F3', data=maxes_female, fit_reg=False)\nplt.xlabel('Height (cm)')\nplt.ylabel('Max F3 (Hz)')\nprint('female: green')\nprint('male: orange')", "Do you notice a difference between the trends for the three formants?\nNow we are going to plot two lines of best fit -- one for males, one for females. Before we plotted one line for all of the values, but now we are separating by gender to see if gender explains some of the difference in formants values. \nFor now, we're going deal with just Max F1.", "sns.lmplot('Height', 'Max F1', data=maxes, hue='Gender')\nplt.xlabel('Height (cm)')\nplt.ylabel('Max F1 (Hz)');", "Is there a noticeable difference between the two? Did you expect this result?\nWe're going to repeat the above graph, plotting a different regression line for males and females, but this time, using timit -- having a larger sample size may help expose patterns. Before we do that, we have to repeat the process of calulating the maximum value for each formants for each speaker. Run the cell below to do that and generate the plot. The blue dots are females, the orange dots are males, and the green line is the regression line for all speakers.", "timit_maxes = timit.groupby(['speaker', 'gender']).max().reset_index()\ntimit_maxes.columns = ['speaker', 'gender', 'region', 'height', 'word', 'vowel', 'Max duration', 'Max F1', 'Max F2', 'Max F3', 'Max f0']\nplt.xlim(140, 210)\nplt.ylim(500, 1400)\nsns.regplot('height', 'Max F1', data=timit_maxes[timit_maxes['gender'] == 'female'], scatter_kws={'alpha':0.3})\nsns.regplot('height', 'Max F1', data=timit_maxes[timit_maxes['gender'] == 'male'], scatter_kws={'alpha':0.3})\nsns.regplot('height', 'Max F1', data=timit_maxes, scatter=False)\nplt.xlabel('Height (cm)')\nplt.ylabel('Max F1 (Hz)');", "Does this graph differ from the one based on class_data? If it does, what are some possible explanations for this? From the visualization, what can you say about height as a predictor of Max F1? Do you think gender plays a role in the value of Max F1?\nDo you think similar patterns would emerge for Max F2 and Max F3? We only used Max F1, but consider trying to plot them by copying some of the code from above and making slight alterations (remember that to insert a code cell below, you can either press esc + b or click Insert &gt; Insert Cell Below on the toolbar).\n\nPlease fill out our feedback form!" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
lmoresi/UoM-VIEPS-Intro-to-Python
Notebooks/SolveMathProblems/0 - IntroductionToNumericalSolutions.ipynb
mit
[ "Numerical models\nWe start with the numerical solution of a very simple differential\nequation. In fact we choose something simple enough that we already \nknow the answer.\n\\[\n \\frac{d\\theta}{dt} = - k \\theta\n\\]\nThis is the equation which governs radioactive decay, in which case\n\\(\\theta \\) is the amount of the radioactive isotope remaining and \\(d\\theta / dt\\)\nis the activity that we can measure. \\(k \\) is closely related to the half life.\nThe solution to this equation is\n\\[\n \\theta(t) = \\theta_0 e^{-kt}\n\\]\nwhere \\(\\theta_0\\) is the amount of the radioactive material remaining.\nThe same equation also describes the cooling of, say, a cup of coffee. In this\ncase we interpret \\( \\theta \\) as the excess temperature (above room temperature). \n<img src=\"images/theta_t1.png\" width=\"66%\">", "%pylab inline\n\nimport numpy as np\n\ntheta_0 = 1.0\n\ntime_values = np.linspace(0,1.0,1000)\n\nfor const_k in [1.0, 3.1, 10.0, 31, 100.0]:\n\n exact_theta_values = theta_0 * np.exp(-const_k * time_values)\n plot(time_values, exact_theta_values)\n\n\npass", "We want to be able to march forward in time from our starting point\n(just like the picture above)\nwhere $\\theta = \\theta_0$ to obtain the value of $\\theta$ at\nlater times. To do this, we need to approximate the original\ndifferential equation, and, in particular, the value of the time\nderivative at each time. There are a number of ways to do this.\nFirst order numerical approximation\nAssume that the variation in \\(\\theta(t) \\) is linear, i.e.\n\\[\n \\theta(t') = \\theta_n + \\beta t'\n\\]\nwhere we use a local time coordinate \\(t' = t - n\\Delta t\\), so that when we differentiate\n\\[\n \\frac{d \\theta}{dt} = \\beta\n\\]\nTo determine the approximation for the derivative therefore\nbecomes the solution to the following equation:\n\\[\n \\begin{split}\n & \\theta_{n+1} = \\theta_n + \\beta \\Delta t \\\n & \\Rightarrow \\beta = \\frac{d \\theta}{dt} = \\frac{\\theta_{n+1} - \\theta_n}{\\Delta t}\n \\end{split}\n\\]\nThis is a first order difference expression for the derivative which we\nsubstitute into the original differential equation for radioactive decay at\nthe current timestep\n\\[\n \\frac{\\theta_{n+1} - \\theta_n}{\\Delta t} = - k \\theta_n\n\\]\nThis rearranges to give us a time-marching algorithm:\n\\[\n \\theta_{n+1} = \\theta_n (1-k \\Delta t)\n\\]\nIt is an indication of the fact that this problem is really not all that difficult\nthat this difference equation can be written recursively\nto give:\n\\[\n \\theta_{n+1} = \\theta_0 (1-k \\Delta t)^n\n\\]\nIn a moment we will compute some values for this expression to see how\naccurate it is. First we consider whether we can improve the accuracy of the\napproximation by doing a bit more work.", "steps = 10\ntheta_0 = 1.0\nconst_k = 10.0\ndelta_t = 1.0 / steps\n\n\ntheta_values = np.zeros(steps)\ntime_values = np.zeros(steps)\n\n\ntheta_values[0] = theta_0\ntime_values[0] = 0.0\n\nfor i in range(1, steps):\n theta_values[i] = theta_values[i-1] * (1 - const_k * delta_t)\n time_values[i] = time_values[i-1] + delta_t\n\nexact_theta_values = theta_0 * np.exp(-const_k * time_values)\n \nplot(time_values, exact_theta_values, linewidth=5.0)\nplot(time_values, theta_values, linewidth=3.0, color=\"red\")\n", "Higher order expansion\nFirst we try fitting the local expansion for \\(\\theta\\) through an\nadditional point. \nThis time we assume that the variation in \\(\\theta(t)\\) is quadratic, i.e.\n$$\n \\theta(t') = \\theta_{n-1} + \\beta t' + \\gamma {t'}^2\n$$\nThe local time coordinate is $t' = t - (n-1)\\Delta t$, and when we differentiate\n$$ \n \\frac{d \\theta}{dt} = \\beta + 2 \\gamma t'\n$$ \nTo solve for \\(\\beta\\) and \\(\\gamma\\) we fit the curve through the sample points:\n$$\n \\begin{split}\n \\theta_n &= \\theta_{n-1} + \\beta \\Delta t + \\gamma (\\Delta t)^2 \\\n \\theta_{n+1} &= \\theta_{n-1} + 2 \\beta \\Delta t + 4 \\gamma (\\Delta t)^2\n \\end{split}\n$$\nWhich solve to give\n$$\n \\begin{split}\n \\beta &= \\left( 4 \\theta_n - \\theta_{n+1} - 3\\theta_{n-1} \\right) \\frac{1}{2\\Delta t} \\\n \\gamma &= \\left( \\theta_{n+1} + \\theta_{n-1} -2 \\theta_n \\right) \\frac{1}{2\\Delta t^2} \n \\end{split}\n$$\nWe can subsitute this back into the equation above and then into the original differential equation and we obtain the following\n$$ \n \\left. \\frac{d\\theta}{dt} \\right|{t=n\\Delta t} = \\beta + 2\\gamma \\Delta t =\n \\frac{1}{2\\Delta t} \\left( \\theta{n+1} - \\theta_{n-1} \\right) = -k \\theta_n \n$$\nThe difference approximation to the derivative turns out to be the average of the expressions for the previous derivative and the new derivative. We have now included information about the current timestep and the previous timestep in our expression for the value of \\(\\theta\\) at the forthcoming timestep: \n$$\n \\theta_{n+1} = \\theta_{n-1} -2k \\theta_n \\Delta t\n$$", "steps = 100\ntheta_0 = 1.0\nconst_k = 10.0\ndelta_t = 1.0 / steps\n\ntheta_values = np.zeros(steps)\ntime_values = np.zeros(steps)\n\ntheta_values[0] = theta_0\ntime_values[0] = 0.0\n\ntheta_values[1] = theta_values[0] * (1 - const_k * delta_t)\ntime_values[1] = delta_t\n\nfor i in range(2, steps):\n theta_values[i] = theta_values[i-2] - 2.0 * theta_values[i-1] * const_k * delta_t\n time_values[i] = time_values[i-1] + delta_t\n\nexact_theta_values = theta_0 * np.exp(-const_k * time_values)\n \nplot(time_values, exact_theta_values, linewidth=5.0)\nplot(time_values, theta_values, linewidth=3.0, color=\"red\")", "The results are more accurate when a smaller timestep is used although it\nrequires more computation to achieve the greater accuracy. Higher order expansion\nalso increases the accuracy and may be more efficient in terms of the number of computations\nrequired for a given level of accuracy.\nNote, however, that the supposedly better quadratic expansion produces an error which\noscillates as time increases. Does this error grow ? Does this make second order\nexpansions useless ?\nSecond Order Runge-Kutta\n<img src=\"images/theta_rk2-1.png\" width=\"66%\">\nThe Runge-Kutta approach to higher order integration methods is\nillustrated in the figure above. The idea is to estimate the \ngradient \\(d \\theta / d t\\) at the half way point between two\ntimestep values. This is done in two stages. Initially a \nfirst order estimate, \\( \\hat{\\theta} \\) is made for the value of the function\n\\( \\theta\\) at \\(t=t+\\Delta t /2\\) in the future. This value is then\nsubsituted into the differential equation to obtain the\nestimate for the gradient at this time. The revised gradient is\nthen used to update the original \\(\\theta(t)\\) by an entire timestep.\nThe first order step is\n$$\n \\begin{split}\n \\hat{\\theta}(t+\\Delta t /2) & = \\theta(t) + \\left. \\frac{d \\theta}{d t} \\right|_t \\frac{\\Delta t}{2} \\\n &= \\theta(t) \\left[ 1-\\frac{k\\Delta t}{2} \\right]\n \\end{split}\n$$\nSubstitute to estimate the gradient at the mid-point\n$$\n \\left. \\frac{d \\theta}{d t} \\right|_{t+\\Delta t /2} \\approx -k \\theta(t) \\left[ 1-\\frac{k\\Delta t}{2} \\right]\n$$\nUse this value as the average gradient over the interval \\( t\\rightarrow t+\\Delta t\\) to update \\(\\theta\\)\n$$\n \\begin{split}\n \\theta(t+\\Delta t) & \\approx \\theta(t) + \\delta t \\left( -k \\theta(t) \\left[ 1-\\frac{k\\Delta t}{2} \\right] \\right) \\\n & \\approx \\theta(t) \\left( 1 - k \\Delta t + k^2 \\frac{\\Delta t^2}{2} \\right)\n \\end{split}\n$$\nIt's worth noting that the Taylor expansion of the solution should look like\n$$ \n e^{-kt} = 1 - kt + \\frac{k^2 t^2}{2!} - \\frac{k^3 t^3}{3!} + \\ldots\n$$\nThe Runge Kutta method can be extended by repeating the estimates on smaller regions of the interval. The usual choice is fourth order RK. This is largely because, obviously, it's accurate to fourth order, but also because the number of operations to go higher than fourth order is disproportionately large. See Numerical Recipes for a discussion on this and better methods for ODE's.", "steps = 100\ntheta_0 = 1.0\nconst_k = 10.0\ndelta_t = 1.0 / steps\n\n\ntheta_values = np.zeros(steps)\ntime_values = np.zeros(steps)\n\n\ntheta_values[0] = theta_0\ntime_values[0] = 0.0\n\nfor i in range(1, steps):\n theta_values[i] = theta_values[i-1] * (1 - const_k * delta_t + const_k**2 * delta_t**2 / 2.0)\n time_values[i] = time_values[i-1] + delta_t\n\nexact_theta_values = theta_0 * np.exp(-const_k * time_values)\n \nplot(time_values, exact_theta_values, linewidth=5.0)\nplot(time_values, theta_values, linewidth=3.0, color=\"red\")\n" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
metpy/MetPy
dev/_downloads/9041777e133eed610f5b243c688e89f9/surface_declarative.ipynb
bsd-3-clause
[ "%matplotlib inline", "Surface Analysis using Declarative Syntax\nThe MetPy declarative syntax allows for a simplified interface to creating common\nmeteorological analyses including surface observation plots.", "from datetime import datetime, timedelta\n\nimport cartopy.crs as ccrs\nimport pandas as pd\n\nfrom metpy.cbook import get_test_data\nimport metpy.plots as mpplots", "Getting the data\nIn this example, data is originally from the Iowa State ASOS archive\n(https://mesonet.agron.iastate.edu/request/download.phtml) downloaded through a separate\nPython script. The data are pre-processed to determine sky cover and weather symbols from\ntext output.", "data = pd.read_csv(get_test_data('SFC_obs.csv', as_file_obj=False),\n infer_datetime_format=True, parse_dates=['valid'])", "Plotting the data\nUse the declarative plotting interface to plot surface observations over the state of\nGeorgia.", "# Plotting the Observations using a 15 minute time window for surface observations\nobs = mpplots.PlotObs()\nobs.data = data\nobs.time = datetime(1993, 3, 12, 13)\nobs.time_window = timedelta(minutes=15)\nobs.level = None\nobs.fields = ['tmpf', 'dwpf', 'emsl', 'cloud_cover', 'wxsym']\nobs.locations = ['NW', 'SW', 'NE', 'C', 'W']\nobs.colors = ['red', 'green', 'black', 'black', 'blue']\nobs.formats = [None, None, lambda v: format(10 * v, '.0f')[-3:], 'sky_cover',\n 'current_weather']\nobs.vector_field = ('uwind', 'vwind')\nobs.reduce_points = 1\n\n# Add map features for the particular panel\npanel = mpplots.MapPanel()\npanel.layout = (1, 1, 1)\npanel.area = 'ga'\npanel.projection = ccrs.PlateCarree()\npanel.layers = ['coastline', 'borders', 'states']\npanel.plots = [obs]\n\n# Collecting panels for complete figure\npc = mpplots.PanelContainer()\npc.size = (10, 10)\npc.panels = [panel]\n\n# Showing the results\npc.show()" ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
letsgoexploring/teaching
winter2017/econ129/python/Econ129_Winter2017_Homework2.ipynb
mit
[ "import matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\n%matplotlib inline", "Homework 2 (DUE: Thursday February 16)\nInstructions: Complete the instructions in this notebook. You may work together with other students in the class and you may take full advantage of any internet resources available. You must provide thorough comments in your code so that it's clear that you understand what your code is doing and so that your code is readable.\nSubmit the assignment by saving your notebook as an html file (File -> Download as -> HTML) and uploading it to the appropriate Dropbox folder on EEE.\nQuestion 1\nFor each of the following first-difference processes, compute the values of $y$ from $t=0$ through $t = 20$. For each, assume that $y_0 = 0$, $w_1 = 1$, and $w_2 = w_3 = \\cdots w_T = 0$. \n\n$y_t = 0.99y_{t-1} + w_t$\n$y_t = y_{t-1} + w_t$\n$y_t = 1.01y_{t-1} + w_t$\n\nPlot the the simulated values for each process on the same axes and be sure to include a legend.", "# Question 1\n\n", "Question 2\nFor each of the following first-difference processes, compute the values of $y$ from $t=0$ through $t = 12$. For each, assume that $y_0 = 0$. \n\n$y_t = 1 + 0.5y_{t-1}$\n$y_t = 0.5y_{t-1}$\n$y_t = -1 + 0.5y_{t-1}$\n\nPlot the the simulated values for each process on the same axes and be sure to include a legend. Set the $y$-axis limits to $[-3,3]$.", "# Question 2\n\n", "Question 3\nDownload a file called Econ129_US_Production_A_Data.csv from the link \"Production data for the US\" under the \"Data\" section on the course website. The file contains annual production data for the US economy including ouput, consumption, investment, and labor hours, among others. The capital stock of the US is only given for 1948. Import the data into a Pandas DataFrame and do the following:\n\nSuppose that the depreciation rate for the US is $\\delta = 0.0375$. Use the capital accumulation equation $K_{t+1} = I_t + (1-\\delta)K_t$ to fill in the missing values for the capital column. Construct a plot of the computed capital stock.\nAdd columns to your DataFrame equal to capital per worker and output per worker by dividing the capital and output columns by the labor column. Print the first five rows of the DataFrame.\nPrint the average annual growth rates of capital per worker and output per worker for the US.\n\nRecall that the average annnual growth rate of a quantity $y$ from date $0$ to date $T$ is:\n\\begin{align}\ng & = \\left(\\frac{y_T}{y_0}\\right)^{\\frac{1}{T}}-1\n\\end{align}", "# Question 3.1\n\n\n# Question 3.2\n\n\n# Question 3.3\n", "Question 4: The Solow model with exogenous population and TFP growth\nSuppose that the aggregate production function is given by:\n\\begin{align}\nY_t & = A_tK_t^{\\alpha} L_t^{1-\\alpha}, \\tag{1}\n\\end{align}\nwhere $Y_t$ denotes output, $K_t$ denotes the capital stock, $L_t$ denotes the labor supply, and $A_t$ denotes total factor productivity $TFP$. $\\alpha$ is a constant.\nThe supply of labor grows at an exogenously determined rate $n$ and so it's value is determined recursively by a first-order difference equation:\n\\begin{align}\nL_{t+1} & = (1+n) L_t. \\tag{2}\n\\end{align}\nLikewise, TFP grows at an exogenously determined rate $g$:\n\\begin{align}\nA_{t+1} & = (1+g) A_t. \\tag{3}\n\\end{align}\nThe rest of the economy is characterized by the same equations as before:\n\\begin{align}\nC_t & = (1-s)Y_t \\tag{4}\\\nY_t & = C_t + I_t \\tag{5}\\\nK_{t+1} & = I_t + ( 1- \\delta)K_t. \\tag{6}\\\n\\end{align}\nEquation (4) is the consumption function where $s$ denotes the exogenously given saving rate. Equation (5) is the aggregate market clearing condition. Finally, Equation (6) is the capital evolution equation specifying that capital in year $t+1$ is the sum of newly created capital $I_t$ and the capital stock from year $t$ that has not depreciated $(1-\\delta)K_t$.\nCombine Equations (1) and (4) through (6) to eliminate $C_t$, $I_t$, and $Y_t$ and obtain a recurrence relation specifying $K_{t+1}$ as a funtion of $K_t$, $A_t$, and $L_t$:\n\\begin{align}\nK_{t+1} & = sA_tK_t^{\\alpha}L_t^{1-\\alpha} + ( 1- \\delta)K_t \\tag{7}\n\\end{align}\nGiven an initial values for capital and labor, Equations (2), (3), and (7) can be iterated on to compute the values of the capital stock and labor supply at some future date $T$. Furthermore, the values of consumption, output, and investment at date $T$ can also be computed using Equations (1), (4), (5), and (6).\nSimulation\nSimulate the Solow growth model with exogenous labor growth for $t=0\\ldots 100$. For the simulation, assume the following values of the parameters:\n\\begin{align}\nA & = 10\\\n\\alpha & = 0.35\\\ns & = 0.15\\\n\\delta & = 0.1\\\ng & = 0.015 \\\nn & = 0.01\n\\end{align}\nFurthermore, suppose that the initial values of capital and labor are:\n\\begin{align}\nK_0 & = 2\\\nA_0 & = 1\\\nL_0 & = 1\n\\end{align}", "# Initialize parameters for the simulation (A, s, T, delta, alpha, g, n, K0, A0, L0)\n\n\n# Initialize a variable called tfp as a (T+1)x1 array of zeros and set first value to A0\n\n\n# Compute all subsequent tfp values by iterating over t from 0 through T\n \n \n# Plot the simulated tfp series\n\n\n\n# Initialize a variable called labor as a (T+1)x1 array of zeros and set first value to L0\n\n\n# Compute all subsequent labor values by iterating over t from 0 through T\n \n \n# Plot the simulated labor series\n\n\n\n# Initialize a variable called capital as a (T+1)x1 array of zeros and set first value to K0\n\n\n# Compute all subsequent capital values by iterating over t from 0 through T\n\n \n# Plot the simulated capital series\n\n\n\n# Store the simulated capital, labor, and tfp data in a pandas DataFrame called data\n\n\n# Print the first 5 frows of the DataFrame\n\n\n# Create columns in the DataFrame to store computed values of the other endogenous variables: Y, C, and I\n\n\n# Print the first five rows of the DataFrame\n\n\n# Create columns in the DataFrame to store capital per worker, output per worker, consumption per worker, and investment per worker\n\n\n# Print the first five rows of the DataFrame\n\n\n# Create a 2x2 grid of plots of capital, output, consumption, and investment\n\n\n# Create a 2x2 grid of plots of capital per worker, output per worker, consumption per worker, and investment per worker\n", "Question 5\nRecall the Solow growth model with exogenous growth in labor and TFP:\n\\begin{align}\nY_t & = A_tK_t^{\\alpha} L_t^{1-\\alpha}, \\tag{1}\\\nC_t & = (1-s)Y_t \\tag{2}\\\nY_t & = C_t + I_t \\tag{3}\\\nK_{t+1} & = I_t + ( 1- \\delta)K_t \\tag{4}\\\nL_{t+1} & = (1+n) L_t \\tag{5} \\\nA_{t+1} & = (1+g) A_t. \\tag{6}\n\\end{align}\nSuppose that two countries called Westeros and Essos are identical except that TFP in Westeros grows faster than in Essos. Specifically:\n\\begin{align}\ng_{Westeros} & = 0.03\\\ng_{Essos} & = 0.01\n\\end{align}\nOtherwise, the parameters for each economy are the same including the initial values of capital, labor, and TFP:\n\\begin{align}\n\\alpha & = 0.35\\\ns & = 0.15\\\n\\delta & = 0.1\\\nn & = 0.01\\\nK_0 & = 20\\\nA_0 & = 10\\\nL_0 & = 1\n\\end{align}\nDo the following:\n\n\nFind the date (value for $t$) at which output per worker in Westeros becomes at least twice as large as output per worker in Essos. Print the value for t and the values of ouput per worker for each country.\n\n\nOn a single set of axes, plot simulated values of output per worker for each country for t = $1, 2, \\ldots 100$. \n\n\nHint: Copy into this notebook the function that simulates the Solow model with exogenous labor growth from the end of the Notebook from Class 9. Modify the function to fit this problem.", "# Question 5.1\n\n\n# Question 5.2\n" ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
besser82/shogun
doc/ipython-notebooks/converter/Tapkee.ipynb
bsd-3-clause
[ "Dimensionality Reduction with the Shogun Machine Learning Toolbox\nBy Sergey Lisitsyn (lisitsyn) and Fernando J. Iglesias Garcia (iglesias).\nThis notebook illustrates <a href=\"http://en.wikipedia.org/wiki/Unsupervised_learning\">unsupervised learning</a> using the suite of dimensionality reduction algorithms available in Shogun. Shogun provides access to all these algorithms using Tapkee, a C++ library especialized in <a href=\"http://en.wikipedia.org/wiki/Dimensionality_reduction\">dimensionality reduction</a>.\nHands-on introduction to dimension reduction\nFirst of all, let us start right away by showing what the purpose of dimensionality reduction actually is. To this end, we will begin by creating a function that provides us with some data:", "import numpy\nimport os\nSHOGUN_DATA_DIR=os.getenv('SHOGUN_DATA_DIR', '../../../data')\n\ndef generate_data(curve_type, num_points=1000):\n\tif curve_type=='swissroll':\n\t\ttt = numpy.array((3*numpy.pi/2)*(1+2*numpy.random.rand(num_points)))\n\t\theight = numpy.array((numpy.random.rand(num_points)-0.5))\n\t\tX = numpy.array([tt*numpy.cos(tt), 10*height, tt*numpy.sin(tt)])\n\t\treturn X,tt\n\tif curve_type=='scurve':\n\t\ttt = numpy.array((3*numpy.pi*(numpy.random.rand(num_points)-0.5)))\n\t\theight = numpy.array((numpy.random.rand(num_points)-0.5))\n\t\tX = numpy.array([numpy.sin(tt), 10*height, numpy.sign(tt)*(numpy.cos(tt)-1)])\n\t\treturn X,tt\n\tif curve_type=='helix':\n\t\ttt = numpy.linspace(1, num_points, num_points).T / num_points\n\t\ttt = tt*2*numpy.pi\n\t\tX = numpy.r_[[(2+numpy.cos(8*tt))*numpy.cos(tt)],\n\t\t [(2+numpy.cos(8*tt))*numpy.sin(tt)],\n\t\t [numpy.sin(8*tt)]]\n\t\treturn X,tt", "The function above can be used to generate three-dimensional datasets with the shape of a Swiss roll, the letter S, or an helix. These are three examples of datasets which have been extensively used to compare different dimension reduction algorithms. As an illustrative exercise of what dimensionality reduction can do, we will use a few of the algorithms available in Shogun to embed this data into a two-dimensional space. This is essentially the dimension reduction process as we reduce the number of features from 3 to 2. The question that arises is: what principle should we use to keep some important relations between datapoints? In fact, different algorithms imply different criteria to answer this question.\nJust to start, lets pick some algorithm and one of the data sets, for example lets see what embedding of the Swissroll is produced by the Isomap algorithm. The Isomap algorithm is basically a slightly modified Multidimensional Scaling (MDS) algorithm which finds embedding as a solution of the following optimization problem:\n$$\n\\min_{x'_1, x'_2, \\dots} \\sum_i \\sum_j \\| d'(x'_i, x'_j) - d(x_i, x_j)\\|^2,\n$$\nwith defined $x_1, x_2, \\dots \\in X~~$ and unknown variables $x_1, x_2, \\dots \\in X'~~$ while $\\text{dim}(X') < \\text{dim}(X)~~~$,\n$d: X \\times X \\to \\mathbb{R}~~$ and $d': X' \\times X' \\to \\mathbb{R}~~$ are defined as arbitrary distance functions (for example Euclidean). \nSpeaking less math, the MDS algorithm finds an embedding that preserves pairwise distances between points as much as it is possible. The Isomap algorithm changes quite small detail: the distance - instead of using local pairwise relationships it takes global factor into the account with shortest path on the neighborhood graph (so-called geodesic distance). The neighborhood graph is defined as graph with datapoints as nodes and weighted edges (with weight equal to the distance between points). The edge between point $x_i~$ and $x_j~$ exists if and only if $x_j~$ is in $k~$ nearest neighbors of $x_i$. Later we will see that that 'global factor' changes the game for the swissroll dataset.\nHowever, first we prepare a small function to plot any of the original data sets together with its embedding.", "%matplotlib inline\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\n\n%matplotlib inline\n\ndef plot(data, embedded_data, colors='m'):\n\tfig = plt.figure()\n\tfig.set_facecolor('white')\n\tax = fig.add_subplot(121,projection='3d')\n\tax.scatter(data[0],data[1],data[2],c=colors,cmap=plt.cm.Spectral)\n\tplt.axis('tight'); plt.axis('off')\n\tax = fig.add_subplot(122)\n\tax.scatter(embedded_data[0],embedded_data[1],c=colors,cmap=plt.cm.Spectral)\n\tplt.axis('tight'); plt.axis('off')\n\tplt.show()\n\nimport shogun as sg\n\n# wrap data into Shogun features\ndata, colors = generate_data('swissroll')\nfeats = sg.features(data)\n\n# create instance of Isomap converter and configure it\nisomap = sg.transformer('Isomap')\nisomap.put('target_dim', 2)\n# set the number of neighbours used in kNN search\nisomap.put('k', 20)\n\n# create instance of Multidimensional Scaling converter and configure it\nmds = sg.transformer('MultidimensionalScaling')\nmds.put('target_dim', 2)\n\n# embed Swiss roll data\nembedded_data_mds = mds.transform(feats).get('feature_matrix')\nembedded_data_isomap = isomap.transform(feats).get('feature_matrix')\n\nplot(data, embedded_data_mds, colors)\nplot(data, embedded_data_isomap, colors)", "As it can be seen from the figure above, Isomap has been able to \"unroll\" the data, reducing its dimension from three to two. At the same time, points with similar colours in the input space are close to points with similar colours in the output space. This is, a new representation of the data has been obtained; this new representation maintains the properties of the original data, while it reduces the amount of information required to represent it. Note that the fact the embedding of the Swiss roll looks good in two dimensions stems from the intrinsic dimension of the input data. Although the original data is in a three-dimensional space, its intrinsic dimension is lower, since the only degree of freedom are the polar angle and distance from the centre, or height. \nFinally, we use yet another method, Stochastic Proximity Embedding (SPE) to embed the helix:", "# wrap data into Shogun features\ndata, colors = generate_data('helix')\nfeatures = sg.features(data)\n\n# create MDS instance\nconverter = sg.transformer('StochasticProximityEmbedding')\nconverter.put('target_dim', 2)\n\n# embed helix data\nembedded_features = converter.transform(features)\nembedded_data = embedded_features.get('feature_matrix')\n\nplot(data, embedded_data, colors)", "References\n\nLisitsyn, S., Widmer, C., Iglesias Garcia, F. J. Tapkee: An Efficient Dimension Reduction Library. (Link to paper in JMLR.)\nTenenbaum, J. B., de Silva, V. and Langford, J. B. A Global Geometric Framework for Nonlinear Dimensionality Reduction. (Link to Isomap's website.)" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
texaspse/blog
media/f16-scientific-python/week2/Scientific Python Workshop 2.ipynb
mit
[ "First import pandas and numpy", "import pandas as pd\nimport numpy as np\n#Dont import matplotlib until we get to histogram example\nimport matplotlib.pyplot as plt\n#This next line tells jupyter to plot it in the same space\n%matplotlib inline", "Use pd.read_excel in order to open file. If it says file not found, then make sure your directory is correct\nMake sure you assign the file to a variable so it doesn't have to run every time", "table = pd.read_excel(\"GASISData.xls\")", "Lets say we want to see the first 10 rows of the data to make sure it is the correct file (Google \"pandas data preview\") #table.tail is end of data", "table.head()", "What if I want to look at just one column of data", "table['PLAYNAME']", "What if I want to create a new column", "table['NEW COLUMN'] = 5\ntable['NEW COLUMN'] ", "What if I want to find data in a certain set, such as only in Texas (Google) (panas find rows where value is)", "texasTable = table.loc[table['STATE'] == \"TEXAS\"]\nprint(texasTable)", "Run the following to get shape of table", "sizeTable = table.shape\nprint(sizeTable)", "This is done to find the count of number of rows and number of cols", "num_rows = sizeTable[0]\nnum_cols = sizeTable[1]\nprint(num_rows)\nprint(num_cols)", "Rows where you have some preset parameter, such as where lattitude is greater than 80 (Google) (Google same thing as above)", "table.loc[table['LATITUDE'] > 10]", "Exercise: Make them find out how to rename columns\nExercise: (Usually we use Excel equations, now we are gonna practice this) Google how to add two columns together, and then create a new column with all these added values\nGive them 5 mins for each excersice, help anyone around you\nIf you want to learn more, look it up at home on how to do other operators\nLets make a histogram of average permeability (In column CN), use column name not CN\nGoogle this", "pd.DataFrame.hist(table,\"AVPERM\")" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
tensorflow/docs-l10n
site/ja/r1/tutorials/keras/overfit_and_underfit.ipynb
apache-2.0
[ "Copyright 2018 The TensorFlow Authors.", "#@title Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n#@title MIT License\n#\n# Copyright (c) 2017 François Chollet\n#\n# Permission is hereby granted, free of charge, to any person obtaining a\n# copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL\n# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\n# DEALINGS IN THE SOFTWARE.", "過学習と学習不足について知る\n<table class=\"tfo-notebook-buttons\" align=\"left\">\n <td>\n <a target=\"_blank\" href=\"https://colab.research.google.com/github/tensorflow/docs-l10n/blob/master/site/ja/r1/tutorials/keras/overfit_and_underfit.ipynb\"><img src=\"https://www.tensorflow.org/images/colab_logo_32px.png\" />Run in Google Colab</a>\n </td>\n <td>\n <a target=\"_blank\" href=\"https://github.com/tensorflow/docs-l10n/blob/master/site/ja/r1/tutorials/keras/overfit_and_underfit.ipynb\"><img src=\"https://www.tensorflow.org/images/GitHub-Mark-32px.png\" />View source on GitHub</a>\n </td>\n</table>\n\nNote: これらのドキュメントは私たちTensorFlowコミュニティが翻訳したものです。コミュニティによる 翻訳はベストエフォートであるため、この翻訳が正確であることや英語の公式ドキュメントの 最新の状態を反映したものであることを保証することはできません。 この翻訳の品質を向上させるためのご意見をお持ちの方は、GitHubリポジトリtensorflow/docsにプルリクエストをお送りください。 コミュニティによる翻訳やレビューに参加していただける方は、 docs-ja@tensorflow.org メーリングリストにご連絡ください。\nいつものように、この例のプログラムはtf.keras APIを使用します。詳しくはTensorFlowのKeras guideを参照してください。\nこれまでの例、つまり、映画レビューの分類と燃費の推定では、検証用データでのモデルの正解率が、数エポックでピークを迎え、その後低下するという現象が見られました。\n言い換えると、モデルが訓練用データを過学習したと考えられます。過学習への対処の仕方を学ぶことは重要です。訓練用データセットで高い正解率を達成することは難しくありませんが、我々は、(これまで見たこともない)テスト用データに汎化したモデルを開発したいのです。\n過学習の反対語は学習不足(underfitting)です。学習不足は、モデルがテストデータに対してまだ改善の余地がある場合に発生します。学習不足の原因は様々です。モデルが十分強力でないとか、正則化のしすぎだとか、単に訓練時間が短すぎるといった理由があります。学習不足は、訓練用データの中の関連したパターンを学習しきっていないということを意味します。\nモデルの訓練をやりすぎると、モデルは過学習を始め、訓練用データの中のパターンで、テストデータには一般的ではないパターンを学習します。我々は、過学習と学習不足の中間を目指す必要があります。これから見ていくように、ちょうどよいエポック数だけ訓練を行うというのは必要なスキルなのです。\n過学習を防止するための、最良の解決策は、より多くの訓練用データを使うことです。多くのデータで訓練を行えば行うほど、モデルは自然により汎化していく様になります。これが不可能な場合、次善の策は正則化のようなテクニックを使うことです。正則化は、モデルに保存される情報の量とタイプに制約を課すものです。ネットワークが少数のパターンしか記憶できなければ、最適化プロセスにより、最も主要なパターンのみを学習することになり、より汎化される可能性が高くなります。\nこのノートブックでは、重みの正則化とドロップアウトという、よく使われる2つの正則化テクニックをご紹介します。これらを使って、IMDBの映画レビューを分類するノートブックの改善を図ります。", "import tensorflow.compat.v1 as tf\n\nfrom tensorflow import keras\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nprint(tf.__version__)", "IMDBデータセットのダウンロード\n以前のノートブックで使用したエンベディングの代わりに、ここでは文をマルチホットエンコードします。このモデルは、訓練用データセットをすぐに過学習します。このモデルを使って、過学習がいつ起きるかということと、どうやって過学習と戦うかをデモします。\nリストをマルチホットエンコードすると言うのは、0と1のベクトルにするということです。具体的にいうと、例えば[3, 5]というシーケンスを、インデックス3と5の値が1で、それ以外がすべて0の、10,000次元のベクトルに変換するということを意味します。", "NUM_WORDS = 10000\n\n(train_data, train_labels), (test_data, test_labels) = keras.datasets.imdb.load_data(num_words=NUM_WORDS)\n\ndef multi_hot_sequences(sequences, dimension):\n # 形状が (len(sequences), dimension)ですべて0の行列を作る\n results = np.zeros((len(sequences), dimension))\n for i, word_indices in enumerate(sequences):\n results[i, word_indices] = 1.0 # 特定のインデックスに対してresults[i] を1に設定する\n return results\n\n\ntrain_data = multi_hot_sequences(train_data, dimension=NUM_WORDS)\ntest_data = multi_hot_sequences(test_data, dimension=NUM_WORDS)", "結果として得られるマルチホットベクトルの1つを見てみましょう。単語のインデックスは頻度順にソートされています。このため、インデックスが0に近いほど1が多く出現するはずです。分布を見てみましょう。", "plt.plot(train_data[0])", "過学習のデモ\n過学習を防止するための最も単純な方法は、モデルのサイズ、すなわち、モデル内の学習可能なパラメータの数を小さくすることです(学習パラメータの数は、層の数と層ごとのユニット数で決まります)。ディープラーニングでは、モデルの学習可能なパラメータ数を、しばしばモデルの「キャパシティ」と呼びます。直感的に考えれば、パラメータ数の多いモデルほど「記憶容量」が大きくなり、訓練用のサンプルとその目的変数の間の辞書のようなマッピングをたやすく学習することができます。このマッピングには汎化能力がまったくなく、これまで見たことが無いデータを使って予測をする際には役に立ちません。\nディープラーニングのモデルは訓練用データに適応しやすいけれど、本当のチャレレンジは汎化であって適応ではないということを、肝に銘じておく必要があります。\n一方、ネットワークの記憶容量が限られている場合、前述のようなマッピングを簡単に学習することはできません。損失を減らすためには、より予測能力が高い圧縮された表現を学習しなければなりません。同時に、モデルを小さくしすぎると、訓練用データに適応するのが難しくなります。「多すぎる容量」と「容量不足」の間にちょうどよい容量があるのです。\n残念ながら、(層の数や、層ごとの大きさといった)モデルの適切なサイズやアーキテクチャを決める魔法の方程式はありません。一連の異なるアーキテクチャを使って実験を行う必要があります。\n適切なモデルのサイズを見つけるには、比較的少ない層の数とパラメータから始めるのがベストです。それから、検証用データでの損失値の改善が見られなくなるまで、徐々に層の大きさを増やしたり、新たな層を加えたりします。映画レビューの分類ネットワークでこれを試してみましょう。\n比較基準として、Dense層だけを使ったシンプルなモデルを構築し、その後、それより小さいバージョンと大きいバージョンを作って比較します。\n比較基準を作る", "baseline_model = keras.Sequential([\n # `.summary` を見るために`input_shape`が必要\n keras.layers.Dense(16, activation=tf.nn.relu, input_shape=(NUM_WORDS,)),\n keras.layers.Dense(16, activation=tf.nn.relu),\n keras.layers.Dense(1, activation=tf.nn.sigmoid)\n])\n\nbaseline_model.compile(optimizer='adam',\n loss='binary_crossentropy',\n metrics=['accuracy', 'binary_crossentropy'])\n\nbaseline_model.summary()\n\nbaseline_history = baseline_model.fit(train_data,\n train_labels,\n epochs=20,\n batch_size=512,\n validation_data=(test_data, test_labels),\n verbose=2)", "より小さいモデルの構築\n今作成したばかりの比較基準となるモデルに比べて隠れユニット数が少ないモデルを作りましょう。", "smaller_model = keras.Sequential([\n keras.layers.Dense(4, activation=tf.nn.relu, input_shape=(NUM_WORDS,)),\n keras.layers.Dense(4, activation=tf.nn.relu),\n keras.layers.Dense(1, activation=tf.nn.sigmoid)\n])\n\nsmaller_model.compile(optimizer='adam',\n loss='binary_crossentropy',\n metrics=['accuracy', 'binary_crossentropy'])\n\nsmaller_model.summary()", "同じデータを使って訓練します。", "smaller_history = smaller_model.fit(train_data,\n train_labels,\n epochs=20,\n batch_size=512,\n validation_data=(test_data, test_labels),\n verbose=2)", "より大きなモデルの構築\n練習として、より大きなモデルを作成し、どれほど急速に過学習が起きるかを見ることもできます。次はこのベンチマークに、この問題が必要とするよりはるかに容量の大きなネットワークを追加しましょう。", "bigger_model = keras.models.Sequential([\n keras.layers.Dense(512, activation=tf.nn.relu, input_shape=(NUM_WORDS,)),\n keras.layers.Dense(512, activation=tf.nn.relu),\n keras.layers.Dense(1, activation=tf.nn.sigmoid)\n])\n\nbigger_model.compile(optimizer='adam',\n loss='binary_crossentropy',\n metrics=['accuracy','binary_crossentropy'])\n\nbigger_model.summary()", "このモデルもまた同じデータを使って訓練します。", "bigger_history = bigger_model.fit(train_data, train_labels,\n epochs=20,\n batch_size=512,\n validation_data=(test_data, test_labels),\n verbose=2)", "訓練時と検証時の損失をグラフにする\n<!--TODO(markdaoust): This should be a one-liner with tensorboard -->\n\n実線は訓練用データセットの損失、破線は検証用データセットでの損失です(検証用データでの損失が小さい方が良いモデルです)。これをみると、小さいネットワークのほうが比較基準のモデルよりも過学習が始まるのが遅いことがわかります(4エポックではなく6エポック後)。また、過学習が始まっても性能の低下がよりゆっくりしています。", "def plot_history(histories, key='binary_crossentropy'):\n plt.figure(figsize=(16,10))\n\n for name, history in histories:\n val = plt.plot(history.epoch, history.history['val_'+key],\n '--', label=name.title()+' Val')\n plt.plot(history.epoch, history.history[key], color=val[0].get_color(),\n label=name.title()+' Train')\n\n plt.xlabel('Epochs')\n plt.ylabel(key.replace('_',' ').title())\n plt.legend()\n\n plt.xlim([0,max(history.epoch)])\n\n\nplot_history([('baseline', baseline_history),\n ('smaller', smaller_history),\n ('bigger', bigger_history)])", "より大きなネットワークでは、すぐに、1エポックで過学習が始まり、その度合も強いことに注目してください。ネットワークの容量が大きいほど訓練用データをモデル化するスピードが早くなり(結果として訓練時の損失値が小さくなり)ますが、より過学習しやすく(結果として訓練時の損失値と検証時の損失値が大きく乖離しやすく)なります。\n戦略\n重みの正則化を加える\n「オッカムの剃刀」の原則をご存知でしょうか。何かの説明が2つあるとすると、最も正しいと考えられる説明は、仮定の数が最も少ない「一番単純な」説明だというものです。この原則は、ニューラルネットワークを使って学習されたモデルにも当てはまります。ある訓練用データとネットワーク構造があって、そのデータを説明できる重みの集合が複数ある時(つまり、複数のモデルがある時)、単純なモデルのほうが複雑なものよりも過学習しにくいのです。\nここで言う「単純なモデル」とは、パラメータ値の分布のエントロピーが小さいもの(あるいは、上記で見たように、そもそもパラメータの数が少ないもの)です。したがって、過学習を緩和するための一般的な手法は、重みが小さい値のみをとることで、重み値の分布がより整然となる(正則)様に制約を与えるものです。これを「重みの正則化」と呼ばれ、ネットワークの損失関数に、重みの大きさに関連するコストを加えることで行われます。このコストには2つの種類があります。\n\n\nL1正則化 重み係数の絶対値に比例するコストを加える(重みの「L1ノルム」と呼ばれる)。\n\n\nL2正則化 重み係数の二乗に比例するコストを加える(重み係数の二乗「L2ノルム」と呼ばれる)。L2正則化はニューラルネットワーク用語では重み減衰(Weight Decay)と呼ばれる。呼び方が違うので混乱しないように。重み減衰は数学的にはL2正則化と同義である。\n\n\ntf.kerasでは、重みの正則化をするために、重み正則化のインスタンスをキーワード引数として層に加えます。ここでは、L2正則化を追加してみましょう。", "l2_model = keras.models.Sequential([\n keras.layers.Dense(16, kernel_regularizer=keras.regularizers.l2(0.001),\n activation=tf.nn.relu, input_shape=(NUM_WORDS,)),\n keras.layers.Dense(16, kernel_regularizer=keras.regularizers.l2(0.001),\n activation=tf.nn.relu),\n keras.layers.Dense(1, activation=tf.nn.sigmoid)\n])\n\nl2_model.compile(optimizer='adam',\n loss='binary_crossentropy',\n metrics=['accuracy', 'binary_crossentropy'])\n\nl2_model_history = l2_model.fit(train_data, train_labels,\n epochs=20,\n batch_size=512,\n validation_data=(test_data, test_labels),\n verbose=2)", "l2(0.001)というのは、層の重み行列の係数全てに対して0.001 * 重み係数の値 **2をネットワークの損失値合計に加えることを意味します。このペナルティは訓練時のみに加えられるため、このネットワークの損失値は、訓練時にはテスト時に比べて大きくなることに注意してください。\nL2正則化の影響を見てみましょう。", "plot_history([('baseline', baseline_history),\n ('l2', l2_model_history)])", "ご覧のように、L2正則化ありのモデルは比較基準のモデルに比べて過学習しにくくなっています。両方のモデルのパラメータ数は同じであるにもかかわらずです。\nドロップアウトを追加する\nドロップアウトは、ニューラルネットワークの正則化テクニックとして最もよく使われる手法の一つです。この手法は、トロント大学のヒントンと彼の学生が開発したものです。ドロップアウトは層に適用するもので、訓練時に層から出力された特徴量に対してランダムに「ドロップアウト(つまりゼロ化)」を行うものです。例えば、ある層が訓練時にある入力サンプルに対して、普通は[0.2, 0.5, 1.3, 0.8, 1.1] というベクトルを出力するとします。ドロップアウトを適用すると、このベクトルは例えば[0, 0.5, 1.3, 0, 1.1]のようにランダムに散らばったいくつかのゼロを含むようになります。「ドロップアウト率」はゼロ化される特徴の割合で、通常は0.2から0.5の間に設定します。テスト時は、どのユニットもドロップアウトされず、代わりに出力値がドロップアウト率と同じ比率でスケールダウンされます。これは、訓練時に比べてたくさんのユニットがアクティブであることに対してバランスをとるためです。\ntf.kerasでは、Dropout層を使ってドロップアウトをネットワークに導入できます。ドロップアウト層は、その直前の層の出力に対してドロップアウトを適用します。\nそれでは、IMDBネットワークに2つのドロップアウト層を追加しましょう。", "dpt_model = keras.models.Sequential([\n keras.layers.Dense(16, activation=tf.nn.relu, input_shape=(NUM_WORDS,)),\n keras.layers.Dropout(rate=0.5),\n keras.layers.Dense(16, activation=tf.nn.relu),\n keras.layers.Dropout(rate=0.5),\n keras.layers.Dense(1, activation=tf.nn.sigmoid)\n])\n\ndpt_model.compile(optimizer='adam',\n loss='binary_crossentropy',\n metrics=['accuracy','binary_crossentropy'])\n\ndpt_model_history = dpt_model.fit(train_data, train_labels,\n epochs=20,\n batch_size=512,\n validation_data=(test_data, test_labels),\n verbose=2)\n\nplot_history([('baseline', baseline_history),\n ('dropout', dpt_model_history)])", "ドロップアウトを追加することで、比較対象モデルより明らかに改善が見られます。\nまとめ:ニューラルネットワークにおいて過学習を防ぐ最も一般的な方法は次のとおりです。\n\n訓練データを増やす\nネットワークの容量をへらす\n重みの正則化を行う\nドロップアウトを追加する\n\nこのガイドで触れていない2つの重要なアプローチがあります。データ拡張とバッチ正規化です。" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
ktaneishi/deepchem
examples/notebooks/Estimators.ipynb
mit
[ "Using DeepChem with Tensorflow Data and Estimators\nWhen DeepChem was first created, Tensorflow had no standard interface for datasets or models. We created the Dataset and Model classes to fill this hole. More recently, Tensorflow has added the tf.data module as a standard interface for datasets, and the tf.estimator module as a standard interface for models. To enable easy interoperability with other tools, we have added features to Dataset and Model to support these new standards.\nThis example demonstrates how to use these features. Let's begin by loading a dataset and creating a model to analyze it. We'll use a simple MultitaskClassifier with one hidden layer.", "import deepchem as dc\nimport tensorflow as tf\nimport numpy as np\n\ntasks, datasets, transformers = dc.molnet.load_tox21()\ntrain_dataset, valid_dataset, test_dataset = datasets\nn_tasks = len(tasks)\nn_features = train_dataset.X.shape[1]\n\nmodel = dc.models.MultitaskClassifier(n_tasks, n_features, layer_sizes=[1000], dropouts=0.25)", "We want to train the model using the training set, then evaluate it on the test set. As our evaluation metric we will use the ROC AUC, averaged over the 12 tasks included in the dataset. First let's see how to do this with the DeepChem API.", "model.fit(train_dataset, nb_epoch=100)\nmetric = dc.metrics.Metric(dc.metrics.roc_auc_score, np.mean)\nprint(model.evaluate(test_dataset, [metric]))", "Simple enough. Now let's see how to do the same thing with the Tensorflow APIs. Fair warning: this is going to take a lot more code!\nTo begin with, Tensorflow doesn't allow a dataset to be passed directly to a model. Instead, you need to write an \"input function\" to construct a particular set of tensors and return them in a particular format. Fortunately, Dataset's make_iterator() method provides exactly the tensors we need in the form of a tf.data.Iterator. This allows our input function to be very simple.", "def input_fn(dataset, epochs):\n x, y, weights = dataset.make_iterator(batch_size=100, epochs=epochs).get_next()\n return {'x': x, 'weights': weights}, y", "Next, you have to use the functions in the tf.feature_column module to create an object representing each feature and weight column (but curiously, not the label column—don't ask me why!). These objects describe the data type and shape of each column, and give each one a name. The names must match the keys in the dict returned by the input function.", "x_col = tf.feature_column.numeric_column('x', shape=(n_features,))\nweight_col = tf.feature_column.numeric_column('weights', shape=(n_tasks,))", "Unlike DeepChem models, which allow arbitrary metrics to be passed to evaluate(), estimators require all metrics to be defined up front when you create the estimator. Unfortunately, Tensorflow doesn't have very good support for multitask models. It provides an AUC metric, but no easy way to average this metric over tasks. We therefore must create a separate metric for every task, then define our own metric function to compute the average of them.", "def mean_auc(labels, predictions, weights):\n metric_ops = []\n update_ops = []\n for i in range(n_tasks):\n metric, update = tf.metrics.auc(labels[:,i], predictions[:,i], weights[:,i])\n metric_ops.append(metric)\n update_ops.append(update)\n mean_metric = tf.reduce_mean(tf.stack(metric_ops))\n update_all = tf.group(*update_ops)\n return mean_metric, update_all", "Now we create our Estimator by calling make_estimator() on the DeepChem model. We provide as arguments the objects created above to represent the feature and weight columns, as well as our metric function.", "estimator = model.make_estimator(feature_columns=[x_col],\n weight_column=weight_col,\n metrics={'mean_auc': mean_auc},\n model_dir='estimator')", "We are finally ready to train and evaluate it! Notice how the input function passed to each method is actually a lambda. This allows us to write a single function, then use it with different datasets and numbers of epochs.", "estimator.train(input_fn=lambda: input_fn(train_dataset, 100))\nprint(estimator.evaluate(input_fn=lambda: input_fn(test_dataset, 1)))", "That's a lot of code for something DeepChem can do in three lines. The Tensorflow API is verbose and somewhat confusing. It has seemingly arbitrary limitations, like assuming a model will only ever have one output, and therefore only allowing one label. But for better or worse, it's a standard.\nOf course, if you just want to use a DeepChem model with a DeepChem dataset, there is no need for any of this. Just use the DeepChem API. But perhaps you want to use a DeepChem dataset with a model that has been implemented as an estimator. In that case, Dataset.make_iterator() allows you to easily do that. Or perhaps you have higher level workflow code that is written to work with estimators. In that case, make_estimator() allows DeepChem models to easily fit into that workflow." ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
metpy/MetPy
v0.12/_downloads/e5685967297554788de3cf5858571b23/Natural_Neighbor_Verification.ipynb
bsd-3-clause
[ "%matplotlib inline", "Natural Neighbor Verification\nWalks through the steps of Natural Neighbor interpolation to validate that the algorithmic\napproach taken in MetPy is correct.\nFind natural neighbors visual test\nA triangle is a natural neighbor for a point if the\ncircumscribed circle &lt;https://en.wikipedia.org/wiki/Circumscribed_circle&gt;_ of the\ntriangle contains that point. It is important that we correctly grab the correct triangles\nfor each point before proceeding with the interpolation.\nAlgorithmically:\n\n\nWe place all of the grid points in a KDTree. These provide worst-case O(n) time\n complexity for spatial searches.\n\n\nWe generate a Delaunay Triangulation &lt;https://docs.scipy.org/doc/scipy/\n reference/tutorial/spatial.html#delaunay-triangulations&gt;_\n using the locations of the provided observations.\n\n\nFor each triangle, we calculate its circumcenter and circumradius. Using\n KDTree, we then assign each grid a triangle that has a circumcenter within a\n circumradius of the grid's location.\n\n\nThe resulting dictionary uses the grid index as a key and a set of natural\n neighbor triangles in the form of triangle codes from the Delaunay triangulation.\n This dictionary is then iterated through to calculate interpolation values.\n\n\nWe then traverse the ordered natural neighbor edge vertices for a particular\n grid cell in groups of 3 (n - 1, n, n + 1), and perform calculations to generate\n proportional polygon areas.\n\n\nCircumcenter of (n - 1), n, grid_location\n Circumcenter of (n + 1), n, grid_location\nDetermine what existing circumcenters (ie, Delaunay circumcenters) are associated\n with vertex n, and add those as polygon vertices. Calculate the area of this polygon.\n\n\nIncrement the current edges to be checked, i.e.:\n n - 1 = n, n = n + 1, n + 1 = n + 2\n\n\nRepeat steps 5 & 6 until all of the edge combinations of 3 have been visited.\n\n\nRepeat steps 4 through 7 for each grid cell.", "import matplotlib.pyplot as plt\nimport numpy as np\nfrom scipy.spatial import ConvexHull, Delaunay, delaunay_plot_2d, Voronoi, voronoi_plot_2d\nfrom scipy.spatial.distance import euclidean\n\nfrom metpy.interpolate import geometry\nfrom metpy.interpolate.points import natural_neighbor_point", "For a test case, we generate 10 random points and observations, where the\nobservation values are just the x coordinate value times the y coordinate\nvalue divided by 1000.\nWe then create two test points (grid 0 & grid 1) at which we want to\nestimate a value using natural neighbor interpolation.\nThe locations of these observations are then used to generate a Delaunay triangulation.", "np.random.seed(100)\n\npts = np.random.randint(0, 100, (10, 2))\nxp = pts[:, 0]\nyp = pts[:, 1]\nzp = (pts[:, 0] * pts[:, 0]) / 1000\n\ntri = Delaunay(pts)\n\nfig, ax = plt.subplots(1, 1, figsize=(15, 10))\nax.ishold = lambda: True # Work-around for Matplotlib 3.0.0 incompatibility\ndelaunay_plot_2d(tri, ax=ax)\n\nfor i, zval in enumerate(zp):\n ax.annotate('{} F'.format(zval), xy=(pts[i, 0] + 2, pts[i, 1]))\n\nsim_gridx = [30., 60.]\nsim_gridy = [30., 60.]\n\nax.plot(sim_gridx, sim_gridy, '+', markersize=10)\nax.set_aspect('equal', 'datalim')\nax.set_title('Triangulation of observations and test grid cell '\n 'natural neighbor interpolation values')\n\nmembers, circumcenters = geometry.find_natural_neighbors(tri, list(zip(sim_gridx, sim_gridy)))\n\nval = natural_neighbor_point(xp, yp, zp, (sim_gridx[0], sim_gridy[0]), tri, members[0],\n circumcenters)\nax.annotate('grid 0: {:.3f}'.format(val), xy=(sim_gridx[0] + 2, sim_gridy[0]))\n\nval = natural_neighbor_point(xp, yp, zp, (sim_gridx[1], sim_gridy[1]), tri, members[1],\n circumcenters)\nax.annotate('grid 1: {:.3f}'.format(val), xy=(sim_gridx[1] + 2, sim_gridy[1]))", "Using the circumcenter and circumcircle radius information from\n:func:metpy.interpolate.geometry.find_natural_neighbors, we can visually\nexamine the results to see if they are correct.", "def draw_circle(ax, x, y, r, m, label):\n th = np.linspace(0, 2 * np.pi, 100)\n nx = x + r * np.cos(th)\n ny = y + r * np.sin(th)\n ax.plot(nx, ny, m, label=label)\n\n\nfig, ax = plt.subplots(1, 1, figsize=(15, 10))\nax.ishold = lambda: True # Work-around for Matplotlib 3.0.0 incompatibility\ndelaunay_plot_2d(tri, ax=ax)\nax.plot(sim_gridx, sim_gridy, 'ks', markersize=10)\n\nfor i, (x_t, y_t) in enumerate(circumcenters):\n r = geometry.circumcircle_radius(*tri.points[tri.simplices[i]])\n if i in members[1] and i in members[0]:\n draw_circle(ax, x_t, y_t, r, 'm-', str(i) + ': grid 1 & 2')\n ax.annotate(str(i), xy=(x_t, y_t), fontsize=15)\n elif i in members[0]:\n draw_circle(ax, x_t, y_t, r, 'r-', str(i) + ': grid 0')\n ax.annotate(str(i), xy=(x_t, y_t), fontsize=15)\n elif i in members[1]:\n draw_circle(ax, x_t, y_t, r, 'b-', str(i) + ': grid 1')\n ax.annotate(str(i), xy=(x_t, y_t), fontsize=15)\n else:\n draw_circle(ax, x_t, y_t, r, 'k:', str(i) + ': no match')\n ax.annotate(str(i), xy=(x_t, y_t), fontsize=9)\n\nax.set_aspect('equal', 'datalim')\nax.legend()", "What?....the circle from triangle 8 looks pretty darn close. Why isn't\ngrid 0 included in that circle?", "x_t, y_t = circumcenters[8]\nr = geometry.circumcircle_radius(*tri.points[tri.simplices[8]])\n\nprint('Distance between grid0 and Triangle 8 circumcenter:',\n euclidean([x_t, y_t], [sim_gridx[0], sim_gridy[0]]))\nprint('Triangle 8 circumradius:', r)", "Lets do a manual check of the above interpolation value for grid 0 (southernmost grid)\nGrab the circumcenters and radii for natural neighbors", "cc = np.array(circumcenters)\nr = np.array([geometry.circumcircle_radius(*tri.points[tri.simplices[m]]) for m in members[0]])\n\nprint('circumcenters:\\n', cc)\nprint('radii\\n', r)", "Draw the natural neighbor triangles and their circumcenters. Also plot a Voronoi diagram\n&lt;https://docs.scipy.org/doc/scipy/reference/tutorial/spatial.html#voronoi-diagrams&gt;_\nwhich serves as a complementary (but not necessary)\nspatial data structure that we use here simply to show areal ratios.\nNotice that the two natural neighbor triangle circumcenters are also vertices\nin the Voronoi plot (green dots), and the observations are in the polygons (blue dots).", "vor = Voronoi(list(zip(xp, yp)))\n\nfig, ax = plt.subplots(1, 1, figsize=(15, 10))\nax.ishold = lambda: True # Work-around for Matplotlib 3.0.0 incompatibility\nvoronoi_plot_2d(vor, ax=ax)\n\nnn_ind = np.array([0, 5, 7, 8])\nz_0 = zp[nn_ind]\nx_0 = xp[nn_ind]\ny_0 = yp[nn_ind]\n\nfor x, y, z in zip(x_0, y_0, z_0):\n ax.annotate('{}, {}: {:.3f} F'.format(x, y, z), xy=(x, y))\n\nax.plot(sim_gridx[0], sim_gridy[0], 'k+', markersize=10)\nax.annotate('{}, {}'.format(sim_gridx[0], sim_gridy[0]), xy=(sim_gridx[0] + 2, sim_gridy[0]))\nax.plot(cc[:, 0], cc[:, 1], 'ks', markersize=15, fillstyle='none',\n label='natural neighbor\\ncircumcenters')\n\nfor center in cc:\n ax.annotate('{:.3f}, {:.3f}'.format(center[0], center[1]),\n xy=(center[0] + 1, center[1] + 1))\n\ntris = tri.points[tri.simplices[members[0]]]\nfor triangle in tris:\n x = [triangle[0, 0], triangle[1, 0], triangle[2, 0], triangle[0, 0]]\n y = [triangle[0, 1], triangle[1, 1], triangle[2, 1], triangle[0, 1]]\n ax.plot(x, y, ':', linewidth=2)\n\nax.legend()\nax.set_aspect('equal', 'datalim')\n\n\ndef draw_polygon_with_info(ax, polygon, off_x=0, off_y=0):\n \"\"\"Draw one of the natural neighbor polygons with some information.\"\"\"\n pts = np.array(polygon)[ConvexHull(polygon).vertices]\n for i, pt in enumerate(pts):\n ax.plot([pt[0], pts[(i + 1) % len(pts)][0]],\n [pt[1], pts[(i + 1) % len(pts)][1]], 'k-')\n\n avex, avey = np.mean(pts, axis=0)\n ax.annotate('area: {:.3f}'.format(geometry.area(pts)), xy=(avex + off_x, avey + off_y),\n fontsize=12)\n\n\ncc1 = geometry.circumcenter((53, 66), (15, 60), (30, 30))\ncc2 = geometry.circumcenter((34, 24), (53, 66), (30, 30))\ndraw_polygon_with_info(ax, [cc[0], cc1, cc2])\n\ncc1 = geometry.circumcenter((53, 66), (15, 60), (30, 30))\ncc2 = geometry.circumcenter((15, 60), (8, 24), (30, 30))\ndraw_polygon_with_info(ax, [cc[0], cc[1], cc1, cc2], off_x=-9, off_y=3)\n\ncc1 = geometry.circumcenter((8, 24), (34, 24), (30, 30))\ncc2 = geometry.circumcenter((15, 60), (8, 24), (30, 30))\ndraw_polygon_with_info(ax, [cc[1], cc1, cc2], off_x=-15)\n\ncc1 = geometry.circumcenter((8, 24), (34, 24), (30, 30))\ncc2 = geometry.circumcenter((34, 24), (53, 66), (30, 30))\ndraw_polygon_with_info(ax, [cc[0], cc[1], cc1, cc2])", "Put all of the generated polygon areas and their affiliated values in arrays.\nCalculate the total area of all of the generated polygons.", "areas = np.array([60.434, 448.296, 25.916, 70.647])\nvalues = np.array([0.064, 1.156, 2.809, 0.225])\ntotal_area = np.sum(areas)\nprint(total_area)", "For each polygon area, calculate its percent of total area.", "proportions = areas / total_area\nprint(proportions)", "Multiply the percent of total area by the respective values.", "contributions = proportions * values\nprint(contributions)", "The sum of this array is the interpolation value!", "interpolation_value = np.sum(contributions)\nfunction_output = natural_neighbor_point(xp, yp, zp, (sim_gridx[0], sim_gridy[0]), tri,\n members[0], circumcenters)\n\nprint(interpolation_value, function_output)", "The values are slightly different due to truncating the area values in\nthe above visual example to the 3rd decimal place.", "plt.show()" ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
konkam/perceptron_guide
README.ipynb
gpl-3.0
[ "Guide: quelques étapes pour programmer un perceptron\nPréliminaire: charger des images en Python et les mettre sous forme de vecteur\nLes images\nAvec votre éditeur d'images préféré, vous pouvez créer une image et la sauvegarder sous un certain format, c'est à dire une manière d'encoder l'image. Ici on prendra l'exemple du format .png\nLorsque que vous sauvegardez, faites-attention au nombre de pixels de l'image, on veut ici des images qui ont toutes la meme taille (dans notre exemple 50x50).\nVoici un exemple d'image:\n\nVous trouverez 8 exemples d'images dans le dossier training_set_perceptron, que vous pouvez télécharger. Je vous encourage à créer les votres.\nChargement des packages préalables", "from pylab import * #Pour tranformer python en une calculette scientifique\nfrom scipy import misc #Pour lire les images", "Chargement des images dans python", "def cv_image_vers_vecteur(image): #Pour convertir une image en vecteur, cela servira pour les opérations suivantes\n return ravel(image)\n\ndef charge_l_image(nom_de_fichier):\n return misc.imread(nom_de_fichier, flatten=True, mode = \"L\")/255. #avec cela on convertit l'image en suite de 0 et de 1\n\ndef charge_l_image_sous_forme_de_vecteur(nom_de_fichier):\n return cv_image_vers_vecteur(charge_l_image(nom_de_fichier))\n\ndef charge_l_image_et_trace(nom_de_fichier_complet):\n imshow(charge_l_image(nom_de_fichier_complet))\n show()\n\n\ncharge_l_image(\"training_set_perceptron/A1.png\")\n\nshape(charge_l_image(\"training_set_perceptron/A1.png\"))", "On voit qu'une image est constituée de 50x50 = 2500 valeurs qui peuvent etre égales à 0 ou à 1.", "charge_l_image_sous_forme_de_vecteur(\"training_set_perceptron/A1.png\")\n\nshape(charge_l_image_sous_forme_de_vecteur(\"training_set_perceptron/A1.png\"))\n\ncharge_l_image_et_trace(\"training_set_perceptron/A1.png\")", "Suite: liste des étapes pour programmer le perceptron:\n\nConstituer un ensemble d'entrainement qui contient des exemples et ce qui doit etre appris sur ces exemples. Dans le cas que je vous propose, ce serait tous les A avec le score +1 et tous les B avec le score -1. Encore une fois, n'hésitez pas à faire vos propres exemples. \nTraitement de l'image par le neurone\nLe neurone prend l'image sous la forme d'un vecteur en entrée, le multiplie par ses poids et regarde le signe du résultat.\nL'image d'entrée est bicolore, formée de 50 pixels. On peut donc représenter l'image par des 0 et des 1 pour chacune des couleurs, on peut la transformer en un vecteur de taille 2500 contenant des 0 et des 1.\nSi on appelle xi le vecteur de l'exemple, wi les poids du neurone, et $y$ le résultat du traitement, alors le traitement de l'image par le neurone peut s'écrire en équations:\n<img src=\"eq1.png\" width=\"200\">\nEntrainement du neurone\nOn rappelle les étapes principales:\nOn commence par choisir des poids initiaux au hasard (ou bien tous à 0, qu'est-ce qui vous parait le plus cohérent ?) \nPour chaque exemple de notre ensemble d'entrainement:\non calcule le score prédit étant donné les poids actuels, le score qu'on aurait du trouver et l'erreur qui est la différence entre ces deux grandeurs\nOn met à jour les poids: nouveaux poids = poids actuels + erreur * entrée, \n\n\nEt on recommence\nSoit en équations:\nSi les poids actuels sont wi(t), les poids mis à jour wi(t+1), le score de l'exemple k est sk, la prédiction du neurone yk et l'exemple k vectorisé est sous la forme xi,k, alors:\n<img src=\"eq2.png\" width=\"200\">\nQuand s'arreter ?\nUn critère peut etre de regarder quand le neurone cesse de progresser, c'est à dire que les erreurs qu'il fait cessent de diminuer. \n\n\nUne fois le neurone entrainé, on le teste sur quelques images" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
betoesquivel/onforums-application
testdataextractor/TestDataExtractor.ipynb
mit
[ "Clustering test data and evaluating clustering technique with it", "from bs4 import BeautifulSoup\n\nf = open('../test_data/1957284403.ofs.gold.xml', 'r')\narticle_text = f.read();\nsoup = BeautifulSoup(article_text, \"lxml\")\n\ncomment = {\n \"bloggerId\": \"author\",\n \"sentences\": [], # all sentences in a comment,\n \"parents\": [] # the order depends on how beautifulsoup gives me the parents\n}\narticle = {\n \"sentences\": {}, # each sentence goes here, hashed by id\n \"comments\": {} # each comment goes here, hashed by id\n}\ncommentsHTML = soup.findAll('comment')\nprint commentsHTML[0]\nfor c in commentsHTML:\n comment_sentences = []\n comment_parents = []\n ", "Can I extract just the sentence that belongs to the replied comment?", "html = commentsHTML[0]\ncomms = html.findAll('comment')\nfirst_comm_s = html.findAll('s', recursive=False)\nfirst_comm_s\n", "Can I extract all the comment tags, including the nested ones?\nTurns out the findAll is recursive and gets me every comment.\nFrom there, getting the parents is easy.", "for c in commentsHTML:\n if c['id'] == \"c4\":\n print c\n print [p['id'] for p in c.findParents(\"comment\")]\n break", "Therefore, the function to extract the comments is:", "def parse_comments(comments):\n '''\n comment = {\n \"bloggerId\": \"author\",\n \"sentences\": [], # all sentences in a comment,\n \"parents\": [] # the order depends on how beautifulsoup gives me the parents\n }\n '''\n \n parsed_comments = {}\n for c in comments:\n comment = {}\n comment['bloggerId'] = c['bloggerid']\n \n comment['sentences_ids'] = [s['id'] for s in c.findAll('s', recursive=False)]\n comment['parents'] = [p['id'] for p in c.findParents(\"comment\")]\n parsed_comments[c['id']] = comment\n \n return parsed_comments\n\nimport json\nimport pprint\ndef parse_article(html):\n soup = BeautifulSoup(html, \"lxml\")\n \n sentences = soup.findAll('s')\n parsed_sentences = {}\n for s in sentences:\n parsed_sentences[s['id']] = s.get_text() \n \n parsed_comments = parse_comments(soup.findAll('comment'))\n \n article = {\n 'sentences': parsed_sentences,\n 'comments': parsed_comments\n }\n\n return article\n\narticle = parse_article(article_text)\npprint.pprint(article)\njson_article = json.dumps(article, indent=4)\nprint len(article['comments'].values()), \" comments parsed.\"\nprint len(article['sentences'].values()), \" sentences parsed.\"", "Clustering just the sentences\nVectorizing the sentences (TFIDF)", "from sklearn.feature_extraction.text import TfidfVectorizer\nimport nltk.stem\n\nenglish_stemmer = nltk.stem.SnowballStemmer('english')\n\nclass StemmedTfidfVectorizer(TfidfVectorizer):\n def build_analyzer(self):\n analyzer=super(StemmedTfidfVectorizer,self).build_analyzer()\n return lambda doc:(english_stemmer.stem(w) for w in analyzer(doc))\n\nvectorizer = StemmedTfidfVectorizer(min_df=1, stop_words='english',\n )\n\nsentences_vectors = vectorizer.fit_transform(article['sentences'].values())\nsorted_feature_indices = np.argsort(vectorizer.idf_)[::-1]\nfeatures = vectorizer.get_feature_names()\ntop_n_features = 20\ntop_features = [features[i] for i in sorted_feature_indices[:top_n_features]]\n\nprint \"%d features found\" % (len(features))\nprint \"Top %d features:\" % (top_n_features)\nprint top_features", "Dimensionality reduction and Normalization", "import gensim\n#Dimensionality reduction using LSI. Go from 6D to 2D.\n\nX = sentences_vectors.todense()\ndct = gensim.corpora.Dictionary(X)\nlsi_docs = {}\nnum_topics = 500\nlsi_model = gensim.models.LsiModel(dct, num_topics=500)\nprint lsi_model.shape\nprint lsi_model[:50]", "Clustering with MeanShift\nWHY ARE ALL VECTORS VALUED AT 0!???", "import numpy as np\nfrom sklearn.cluster import MeanShift, estimate_bandwidth\n\nbandwidth = estimate_bandwidth(X, quantile=0.3)\n\nms = MeanShift(bandwidth=bandwidth, bin_seeding=True)\nms.fit(X)\nlabels = ms.labels_\ncluster_centers = ms.cluster_centers_\n\nlabels_unique = np.unique(labels)\nn_clusters_ = len(labels_unique)\n\nprint \"Number of estimated clusters : %d\" % n_clusters_\n\n# Plot result\nimport matplotlib.pyplot as plt\nfrom itertools import cycle\n\nplt.figure(1)\nplt.clf()\n\ncolors = cycle('bgrcmykbgrcmykbgrcmykbgrcmyk')\nfor k, col in zip(range(n_clusters_), colors):\n my_members = labels == k\n cluster_center = cluster_centers[k]\n plt.plot(X[my_members, 0], X[my_members, 1], col + '.')\n plt.plot(cluster_center[0], cluster_center[1], 'o',\n markerfacecolor=col, markeredgecolor='k',\n markersize=14)\n\nplt.title('Estimated number of clusters: %d' % n_clusters_)\nplt.show()\n", "Using the same approach as a movie clusterer\nhttp://brandonrose.org/clustering\nImports", "import numpy as np\nimport pandas as pd\nimport nltk\nimport re\nimport os\nimport codecs\nfrom sklearn import feature_extraction\nimport mpld3", "Stopwords, stemming, and tokenizing", "stopwords = nltk.corpus.stopwords.words('english')\nfrom nltk.stem.snowball import SnowballStemmer\nstemmer = SnowballStemmer(\"english\")\nprint 'Done'\n\ndef tokenize_and_stem(sentences):\n tokens = [word for sent in sentences \n for word in nltk.word_tokenize(sent)]\n filtered_tokens = []\n for token in tokens:\n if re.search('[a-zA-Z]', token):\n filtered_tokens.append(token)\n stems = [stemmer.stem(t) for t in filtered_tokens]\n return stems\n\ndef tokenize_only(sentences):\n tokens = [word.lower() for sent in sentences\n for word in nltk.word_tokenize(sent)]\n filtered_tokens = []\n for token in tokens:\n if re.search('[a-zA-Z]', token):\n filtered_tokens.append(token)\n return filtered_tokens", "Make vocabulary\nstemmmed and not-stemmed", "totalvocab_stemmed = []\ntotalvocab_tokenized = []\nallwords_stemmed = tokenize_and_stem(article['sentences'].values())\ntotalvocab_stemmed.extend(allwords_stemmed)\n\nallwords_tokenized = tokenize_only(article['sentences'].values())\ntotalvocab_tokenized.extend(allwords_tokenized)", "Pandas data frame to visualize the vocabulary", "vocab_frame = pd.DataFrame({'words': totalvocab_tokenized},\n index = totalvocab_stemmed)\nprint 'there are ' + str(vocab_frame.shape[0]) + ' items in vocab_frame'\nprint 'here are the first words in the vocabulary'\nvocab_frame.head()", "TF-IDF and document similarity", "from sklearn.feature_extraction.text import TfidfVectorizer\n\ntfidf_vectorizer = TfidfVectorizer(max_df=0.8, max_features=20000,\n min_df=0.2, stop_words='english',\n use_idf=True, tokenizer=tokenize_and_stem,\n ngram_range=(1,3))\n%time tfidf_matrix = tfidf_vectorizer.fit_transform(article['sentences'].values())\n\nprint tfidf_matrix.shape\n\nterms = tfidf_vectorizer.get_feature_names()", "Cosine Similarity", "from sklearn.metrics.pairwise import cosine_similarity\ndist = 1 - cosine_similarity(tfidf_matrix)\ndist_frame = pd.DataFrame(dist)\nprint dist", "K-means clustering", "from sklearn.cluster import KMeans\n\nnum_clusters = 5\nkm = KMeans(n_clusters=num_clusters)\n\n%time km.fit(tfidf_matrix)\nclusters = km.labels_.tolist()\n\nclusters", "Multidimensional scaling to plot?", "import os\nimport matplotlib.pyplot as plt\nimport matplotlib as mpl\nfrom sklearn.manifold import MDS\n\nMDS()\nmds = MDS(n_components=2, dissimilarity=\"precomputed\", random_state=1)\npos = mds.fit_transform(dist)\n\nxs, ys = pos[:,0], pos[:, 1]", "Plot", "cluster_colors = {0: '#1b9e77', 1: '#d95f02', 2: '#7570b3', 3: '#e7298a', 4: '#66a61e'}\ncluster_names = {0: 'C0',\n 1: 'C1',\n 2: 'C2',\n 3: 'C3',\n 4: 'C4'}\n\n# iPython now will show matplotlib plots inline\n%matplotlib inline\n\ndf = pd.DataFrame(dict(x=xs, y=ys, label=clusters, title=[\"s{0}\".format(x) for x in range(190)]))\n\ngroups = df.groupby('label')\n\n### set up the plot\nfig, ax = plt.subplots(figsize=(17,9))\nax.margins(0.05)\nfor name, group in groups:\n ax.plot(group.x, group.y, marker='o', linestyle='',\n ms=12, label=cluster_names[name], color=cluster_colors[name],\n mec='none')\n ax.set_aspect('auto')\n ax.tick_params(\\\n axis='x',\n which='both',\n bottom='off',\n top='off',\n labelbottom='off')\n ax.tick_params(\\\n axis='y',\n which='both',\n left='off',\n top='off',\n labelleft='off')\nax.legend(numpoints=1)\n\nfor i in range(len(df)):\n ax.text(df.ix[i]['x'], df.ix[i]['y'], df.ix[i]['title'],\n size=8)\n \nplt.show()\n\n\nprint article['sentences']['s151']\nprint article['sentences']['s170']\nprint article['sentences']['s171']\nprint article['sentences']['s108']\n\nprint article['sentences']['s93']\nprint article['sentences']['s150']\nprint article['sentences']['s114']\nprint article['sentences']['s110']", "Hierarchical document clustering\nThe Ward clustering algorithm !!!!", "from scipy.cluster.hierarchy import ward, dendrogram\nlinkage_matrix = ward(dist) #define the linkage_matrix\n# using ward clustering pre-computed distances\n\nfig, ax = plt.subplots(figsize=(15,20)) # set size\nax = dendrogram(linkage_matrix, orientation=\"right\", labels=[\"s{0}\".format(x) for x in range(190)])\n\nplt.tick_params(\\\n axis = 'x',\n which ='both',\n bottom ='off',\n top = 'off',\n labelbottom = 'off')\n\nplt.tight_layout()\n\nplt.savefig('ward_clusters.png', dpi=200) \n\nframe = pd.DataFrame(linkage_matrix)\nframe.sort_values(2,axis=0, ascending=False)\n", "Extracting the links", "soup = BeautifulSoup(article_text, \"lxml\")\ndef is_valid_link(tag):\n if tag.name != 'link':\n return False\n link = tag\n l_conf = link['link_confidence']\n l_val = link['validation']\n arg = link.find_next_sibling('argument')\n sent = link.find_next_sibling('sentiment')\n a_val = arg['validation']\n s_val = sent['validation']\n a_conf = arg['val_confidence']\n s_conf = sent['val_confidence']\n args = [l_val, a_val, s_val, l_conf, a_conf, s_conf]\n return all(el == '1' or el == 'yes' for el in args)\n\nlinksHTML = soup.findAll(lambda tag:is_valid_link(tag))\n\nprint len(linksHTML), \"valid links found!\"\n\nparsed_links = []\nfor link_html in linksHTML:\n arg_html = link_html.find_next_sibling('argument')\n sent_html = link_html.find_next_sibling('sentiment')\n link = {}\n link['id'] = link_html['id']\n link['art_sentence'] = link_html['art_sentence']\n link['com_sentence'] = link_html['com_sentence']\n link['confidence'] = link_html['link_confidence']\n link['validation'] = link_html['validation']\n \n arg = {}\n arg['label'] = arg_html['label']\n arg['confidence'] = arg_html['val_confidence']\n arg['validation'] = arg_html['validation']\n \n sent = {}\n sent['label'] = sent_html['label']\n sent['confidence'] = sent_html['val_confidence']\n sent['validation'] = sent_html['validation']\n \n link['argument'] = arg\n link['sentiment'] = sent\n parsed_links.append(link)\n\n# pprint.pprint(parsed_links, indent=4)\nprint len(parsed_links),\"links parsed!\"\n " ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
MonicaGutierrez/PracticalMachineLearningClass
exercises/06-Titanic_cross_validation.ipynb
mit
[ "Exercise 06\nData preparation and model evaluation exercise with Titanic data\nWe'll be working with a dataset from Kaggle's Titanic competition: data, data dictionary\nGoal: Predict survival based on passenger characteristics\nThe sinking of the RMS Titanic is one of the most infamous shipwrecks in history. On April 15, 1912, during her maiden voyage, the Titanic sank after colliding with an iceberg, killing 1502 out of 2224 passengers and crew. This sensational tragedy shocked the international community and led to better safety regulations for ships.\nOne of the reasons that the shipwreck led to such loss of life was that there were not enough lifeboats for the passengers and crew. Although there was some element of luck involved in surviving the sinking, some groups of people were more likely to survive than others, such as women, children, and the upper-class.\nIn this challenge, we ask you to complete the analysis of what sorts of people were likely to survive. In particular, we ask you to apply the tools of machine learning to predict which passengers survived the tragedy.\nRead the data into Pandas", "import pandas as pd\nurl = 'https://raw.githubusercontent.com/justmarkham/DAT8/master/data/titanic.csv'\ntitanic = pd.read_csv(url, index_col='PassengerId')\ntitanic.head()", "Exercise 6.1\nImpute the missing values of the age and Embarked", "titanic.Age.fillna(titanic.Age.median(), inplace=True)\ntitanic.isnull().sum()\n\ntitanic.Embarked.mode()\n\ntitanic.Embarked.fillna('S', inplace=True)\ntitanic.isnull().sum()", "Exercise 6.3\nConvert the Sex and Embarked to categorical features", "titanic['Sex_Female'] = titanic.Sex.map({'male':0, 'female':1})\ntitanic.head()\n\nembarkedummy = pd.get_dummies(titanic.Embarked, prefix='Embarked')\nembarkedummy.drop(embarkedummy.columns[0], axis=1, inplace=True)\ntitanic = pd.concat([titanic, embarkedummy], axis=1)\ntitanic.head()", "Exercise 6.3 (2 points)\nFrom the set of features ['Pclass', 'Sex', 'Age', 'SibSp', 'Parch', 'Fare', 'Embarked']\n*Note, use the created categorical features for Sex and Embarked\nSelect the features that maximize the accuracy the model using K-Fold cross-validation", "y = titanic['Survived']\n\nfeatures = ['Pclass', 'Age', 'SibSp', 'Parch', 'Fare','Sex_Female', 'Embarked_Q', 'Embarked_S']\n\nimport numpy as np\ndef comb(n,k) :\n return np.math.factorial(n) / (np.math.factorial(n-k) * np.math.factorial(k))\n\nnp.sum([comb(8,i) for i in range(0,8)])\n\nimport itertools\n\npossible_models = []\nfor i in range(1,len(features)+1):\n possible_models.extend(list(itertools.combinations(features,i)))\n\npossible_models\n\nimport itertools\n\npossible_models = [] \nfor i in range(1,len(features)+1):\n possible_models.extend(list(itertools.combinations(features,i)))\n\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.cross_validation import cross_val_score\n\nY = titanic.Survived\n\nresultado = pd.DataFrame(index=possible_models,columns=['presicion'])\nfor i in range(len(possible_models)):\n X = titanic[list(possible_models[i])]\n reglogistica = LogisticRegression(C=1e9)\n resultado.iloc[i] = cross_val_score(reglogistica, X, Y, cv=10, scoring='accuracy').mean()\n\nresultado.head()\n\nresultado.sort_values('presicion',ascending=False).head(1)", "Bonus Exercise 6.4 (3 points)\nNow which are the best set of features selected by AUC" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
Diyago/Machine-Learning-scripts
DEEP LEARNING/NLP/text analyses/NB-SVM strong linear baseline - classif.ipynb
apache-2.0
[ "Introduction\nThis kernel shows how to use NBSVM (Naive Bayes - Support Vector Machine) to create a strong baseline for the Toxic Comment Classification Challenge competition. NBSVM was introduced by Sida Wang and Chris Manning in the paper Baselines and Bigrams: Simple, Good Sentiment and Topic Classification. In this kernel, we use sklearn's logistic regression, rather than SVM, although in practice the two are nearly identical (sklearn uses the liblinear library behind the scenes).\nIf you're not familiar with naive bayes and bag of words matrices, I've made a preview available of one of fast.ai's upcoming Practical Machine Learning course videos, which introduces this topic. Here is a link to the section of the video which discusses this: Naive Bayes video.", "import pandas as pd, numpy as np\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer\n\ntrain = pd.read_csv('../input/train.csv')\ntest = pd.read_csv('../input/test.csv')\nsubm = pd.read_csv('../input/sample_submission.csv')", "Looking at the data\nThe training data contains a row per comment, with an id, the text of the comment, and 6 different labels that we'll try to predict.", "train.head()", "Here's a couple of examples of comments, one toxic, and one with no labels.", "train['comment_text'][0]\n\ntrain['comment_text'][2]", "The length of the comments varies a lot.", "lens = train.comment_text.str.len()\nlens.mean(), lens.std(), lens.max()\n\nlens.hist();", "We'll create a list of all the labels to predict, and we'll also create a 'none' label so we can see how many comments have no labels. We can then summarize the dataset.", "label_cols = ['toxic', 'severe_toxic', 'obscene', 'threat', 'insult', 'identity_hate']\ntrain['none'] = 1-train[label_cols].max(axis=1)\ntrain.describe()\n\nlen(train),len(test)", "There are a few empty comments that we need to get rid of, otherwise sklearn will complain.", "COMMENT = 'comment_text'\ntrain[COMMENT].fillna(\"unknown\", inplace=True)\ntest[COMMENT].fillna(\"unknown\", inplace=True)", "Building the model\nWe'll start by creating a bag of words representation, as a term document matrix. We'll use ngrams, as suggested in the NBSVM paper.", "import re, string\nre_tok = re.compile(f'([{string.punctuation}“”¨«»®´·º½¾¿¡§£₤‘’])')\ndef tokenize(s): return re_tok.sub(r' \\1 ', s).split()", "It turns out that using TF-IDF gives even better priors than the binarized features used in the paper. I don't think this has been mentioned in any paper before, but it improves leaderboard score from 0.59 to 0.55.", "n = train.shape[0]\nvec = TfidfVectorizer(ngram_range=(1,2), tokenizer=tokenize,\n min_df=3, max_df=0.9, strip_accents='unicode', use_idf=1,\n smooth_idf=1, sublinear_tf=1 )\ntrn_term_doc = vec.fit_transform(train[COMMENT])\ntest_term_doc = vec.transform(test[COMMENT])", "This creates a sparse matrix with only a small number of non-zero elements (stored elements in the representation below).", "trn_term_doc, test_term_doc", "Here's the basic naive bayes feature equation:", "def pr(y_i, y):\n p = x[y==y_i].sum(0)\n return (p+1) / ((y==y_i).sum()+1)\n\nx = trn_term_doc\ntest_x = test_term_doc", "Fit a model for one dependent at a time:", "def get_mdl(y):\n y = y.values\n r = np.log(pr(1,y) / pr(0,y))\n m = LogisticRegression(C=4, dual=True)\n x_nb = x.multiply(r)\n return m.fit(x_nb, y), r\n\npreds = np.zeros((len(test), len(label_cols)))\n\nfor i, j in enumerate(label_cols):\n print('fit', j)\n m,r = get_mdl(train[j])\n preds[:,i] = m.predict_proba(test_x.multiply(r))[:,1]", "And finally, create the submission file.", "submid = pd.DataFrame({'id': subm[\"id\"]})\nsubmission = pd.concat([submid, pd.DataFrame(preds, columns = label_cols)], axis=1)\nsubmission.to_csv('submission.csv', index=False)" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
enakai00/jupyter_NikkeiLinux
No5/Figure11 - derivative_animation.ipynb
apache-2.0
[ "[4-1] 動画作成用のモジュールをインポートして、動画を表示可能なモードにセットします。", "import numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.animation as animation\n%matplotlib nbagg", "[4-2] x=0.5における接線を描いて、その傾きを求める関数derivativeを定義します。", "def derivative(f, filename):\n fig = plt.figure(figsize=(4,4))\n images = []\n x0, d = 0.5, 0.5\n\n for _ in range(10):\n subplot = fig.add_subplot(1,1,1)\n subplot.set_xlim(0, 1)\n subplot.set_ylim(0, 1)\n slope = (f(x0+d)-f(x0)) / d\n linex = np.linspace(0, 1, 100)\n image0 = subplot.text(0.5, 8, ('slope = %f' % slope))\n image1, = subplot.plot(linex, f(linex), color='blue')\n image2 = subplot.scatter([x0,x0+d],[f(x0),f(x0+d)])\n\n def g(x):\n return f(x0) + slope * (x-x0)\n \n image3, = subplot.plot([0,1], [g(0),g(1)],\n linewidth=1, color='red')\n image4 = subplot.text(0.3, 1.05, ('slope = %f' % slope))\n images.append([image0, image1, image2, image3, image4])\n d *= 0.5\n\n ani = animation.ArtistAnimation(fig, images, interval=1000)\n ani.save(filename, writer='imagemagick', fps=1)\n return ani", "[4-3] 二次関数 y=x*x を用意して、関数derivativeを呼び出します。\nGIF動画ファイル「derivative01.gif」が作成されます。", "def f(x):\n y = x*x\n return y\n\nderivative(f, 'derivative01.gif')" ]
[ "markdown", "code", "markdown", "code", "markdown", "code" ]
glouppe/scikit-optimize
examples/strategy-comparison.ipynb
bsd-3-clause
[ "Comparing surrogate models\nTim Head, July 2016.", "import numpy as np\nnp.random.seed(123)\n\n%matplotlib inline\nimport matplotlib.pyplot as plt\nplt.rcParams[\"figure.figsize\"] = (10, 6)\nplt.set_cmap(\"viridis\")", "Bayesian optimization or sequential model-based optimization uses a surrogate model\nto model the expensive to evaluate function func. There are several choices\nfor what kind of surrogate model to use. This example compares the performance of:\n\ngaussian processes,\nextra trees, and\nrandom forests \n\nas surrogate models. A purely random optimization strategy is used as a baseline.\nToy model\nWe will use the branin function as toy model for the expensive function. In\na real world application this function would be unknown and expensive to evaluate.", "from skopt.benchmarks import branin as _branin\n\ndef branin(x, noise_level=0.):\n return _branin(x) + noise_level * np.random.randn()\n\nfrom matplotlib.colors import LogNorm\n\ndef plot_branin():\n fig, ax = plt.subplots()\n\n x1_values = np.linspace(-5, 10, 100)\n x2_values = np.linspace(0, 15, 100)\n x_ax, y_ax = np.meshgrid(x1_values, x2_values)\n vals = np.c_[x_ax.ravel(), y_ax.ravel()]\n fx = np.reshape([branin(val) for val in vals], (100, 100))\n \n cm = ax.pcolormesh(x_ax, y_ax, fx,\n norm=LogNorm(vmin=fx.min(), \n vmax=fx.max()))\n\n minima = np.array([[-np.pi, 12.275], [+np.pi, 2.275], [9.42478, 2.475]])\n ax.plot(minima[:, 0], minima[:, 1], \"r.\", markersize=14, lw=0, label=\"Minima\")\n \n cb = fig.colorbar(cm)\n cb.set_label(\"f(x)\")\n \n ax.legend(loc=\"best\", numpoints=1)\n \n ax.set_xlabel(\"X1\")\n ax.set_xlim([-5, 10])\n ax.set_ylabel(\"X2\")\n ax.set_ylim([0, 15])\n \nplot_branin()", "This shows the value of the two-dimensional branin function and the three minima.\nObjective\nThe objective of this example is to find one of these minima in as few iterations\nas possible. One iteration is defined as one call to the branin function.\nWe will evaluate each model several times using a different seed for the\nrandom number generator. Then compare the average performance of these\nmodels. This makes the comparison more robust against models that get\n\"lucky\".", "from functools import partial\nfrom skopt import gp_minimize, forest_minimize, dummy_minimize\n\nfunc = partial(branin, noise_level=2.0)\nbounds = [(-5.0, 10.0), (0.0, 15.0)]\nx0 = [2.5, 7.5]\nn_calls = 80\n\ndef run(minimizer, n_iter=20):\n return [minimizer(func, bounds, x0=x0, n_calls=n_calls, random_state=n) \n for n in range(n_iter)]\n\n# Random search\ndummy_res = run(dummy_minimize) \n\n# Gaussian processes\ngp_res = run(gp_minimize)\n\n# Random forest\nrf_res = run(partial(forest_minimize, base_estimator=\"rf\"))\n\n# Extra trees \net_res = run(partial(forest_minimize, base_estimator=\"et\"))", "Note that this can take a few minutes.", "from skopt.plots import plot_convergence\n\nplot_convergence((\"dummy_minimize\", dummy_res),\n (\"gp_minimize\", gp_res),\n (\"forest_minimize('rf')\", rf_res),\n (\"forest_minimize('et)\", et_res), \n true_minimum=0.397887, yscale=\"log\")", "This plot shows the value of the minimum found (y axis) as a function of the number\nof iterations performed so far (x axis). The dashed red line indicates the\ntrue value of the minimum of the branin function.\nFor the first ten iterations all methods perform equally well as they all start\nby creating ten random samples before fitting their respective model for the\nfirst time. After iteration ten the next point at which to evaluate branin is\nguided by the model, which is where differences start to appear.\nEach minimizer only has access to noisy observations of the objective\nfunction, so as time passes (more iterations) it will start observing values that\nare below the true value simply because they are fluctuations." ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
NYUDataBootcamp/Projects
UG_S16/Jerry_Allen_Gender_Pay_Gap.ipynb
mit
[ "Gender Pay Gap Inequality in the U.S. and Potential Insights\nA Research Project at NYU's Stern School of Buinsess — May 2016 \nWritten by Jerry \"Joa\" Allen (joa218@nyu.edu)\nAbstract \nAlthough it has been a longstanding issue, the gender pay gap has been an especially touched upon topic in recent times. There's the well-quoted statistic stating women earn 77% as much as their male counterparts in exchange for equal work. However, this statistic is met with contention from various economists. Some claim that women having less pay for equal work is possibly true in certain cases, but it is not by and large the case. This paper is meant to provide insights as it pertains to potential drivers of the gender pay gap.\nAcessing and Parsing the Data\nI decided to access the 2014 American Time Use Study, which is the most recent year available. The dataset I manipulate is the ATUS Activity Summary File. In brief, this file mostly outlines how respondents spent their time as it pertains to various activities, ranging from sleep to eldercare. Moreover, the file also contains information regarding the sex (ie. unfortunately gender was unavailable) of the respondents, amongst other demographic information. What I am largely interested in is investigating gender equality (or lack thereof) when it comes to labor force status, hours worked, childcare, and eldercare. Moreover, I will also weigh in on the implications these insights have on the gender\npay gap. With that in mind, I plan to produce figures which will concisely compare men and women along the variables mentioned above. \nIn terms of accessing the data, it is available on http://www.bls.gov/tus/datafiles_2014.htm, and under the ATUS Activity Summary zip. Furthermore, descriptions of the column variables and their units of measurement can be found at http://www.bls.gov/tus/atuscpscodebk14.pdf and http://www.bls.gov/tus/atusintcodebk14.pdf.", "import sys # system module\nimport pandas as pd # data package\nimport matplotlib.pyplot as plt # graphics module \nimport datetime as dt # date and time module\nimport numpy as np # foundation for Pandas\nimport seaborn.apionly as sns # matplotlib graphics (no styling)\n\n\n# these lines make our graphics show up in the notebook\n%matplotlib inline \n\n# check versions (overkill, but why not?)\nprint('Python version:', sys.version)\nprint('Pandas version: ', pd.__version__)\nprint('Today: ', dt.date.today())\n\natus = (pd.read_csv('/Users/JOA/Documents/Academics/NYU/Spring 2016/Data_Bootcamp/atussum_2014/atussum_2014.dat'))\n\natus['TESEX'] = atus['TESEX'].replace({1: 'Male', 2:'Female'})\natus['TELFS'] = atus['TELFS'].replace({1: \"Employed(at work)\", 2: \"Employed(absent)\",\n 3:'Unemployed(on layoff)', 4: 'Unemployed(looking)',\n 5: \"Not in labor force\"})#TELFS refers to labor force status\n\natus = atus.set_index('TESEX')\natus.index.name = 'Sex'\n\natus = atus[['TEHRUSLT', 'TELFS', 'TRERNWA', 'TRTEC', 'TRTHH']]\natus = atus.replace(to_replace=[-1], value=[None]) # -1 represents blank answers\natus = atus.replace(to_replace=[-2], value=[None]) # -2 represents a \"don't know\" answer\natus = atus.replace(to_replace=[-3], value=[None]) # -3 represents a refuse to answer\natus = atus.replace(to_replace=[-4], value=[None]) # -4 represents an \"hours vary\" answer that is of no use\n\natus['TRERNWA'] = atus['TRERNWA']/100 #TRERNWA measures weekly income. The original values implied 2 decimal places\n\natus = atus.rename(columns={'TEHRUSLT':'Hours Worked/Wk','TELFS':'Labor Force Status', 'TRERNWA':'Main Job Income/Wk'\n ,'TRTEC': 'Elderly Care (mins)','TRTHH':'Secondary Child Care (mins)'})\n\natus['Sex'] = atus.index\natus.columns = ['Hours Worked/Wk', 'Labor Force Status', 'Main Job Income/Wk',\n 'Elderly Care (mins)', 'Secondary Child Care (mins)', 'Sex'] #added in Sex as column for sns plot purposes\n\nfig, ax = plt.subplots()\nfig.set_size_inches(11.7, 5.5)\nax.set_title('Figure 1. Labor Force Status Count', weight = 'bold', fontsize = 17)\nsns.countplot(x= 'Labor Force Status', hue='Sex', data= atus)\nplt.xlabel('Labor Force Status',weight='bold',fontsize=13)\nplt.ylabel('Count',weight='bold', fontsize=13)", "Labor Force Status\nThe notion of women making up to 23 less cents on the dollar than men has been challenged numerous times. Many claim, including Resident Fellow at the Harvard Institute of Politics, Karen Agness, that this statistic in manipulated and misled by popular media and the government. The extent of systemic discrimination on women in the U.S. suggested by this statistic is far from conclusive, as it does not take into account the many factors that are producing this number. Figure 1 illustrates the difference in labor force placement between men and women. It is worth noting that there were 20% more female respondents in this survey, such that the female count is inflated compared to that of males. Even when adjusting for greater number of female respondents, there is about 25% more females not in the labor force than males. Naturally, this kind of discrepancy in labor force status is likely to contribute to the overall gender pay gap we are witnessing in the U.S. Moreover, the number of men and women unemployed and looking are nearly the same. Although it may not debunk, this insight discredits the notion of systemic hiring discrimination considering there are more women not working, but there are not more women looking for a job. If there was systemic hiring discrimination against women, there would presumably be a greater share of women looking for a job than men.", "fig, ax = plt.subplots()\nfig.set_size_inches(11.7, 8.27)\nax.set_title('Figure 2. Income Per Week From Main Job', weight='bold', fontsize = 17)\nsns.set_style(\"whitegrid\")\nsns.violinplot(x='Sex',y='Main Job Income/Wk', data = atus)\nplt.xlabel('Sex',weight='bold',fontsize=13)\nplt.ylabel('Main Job Income/Wk ($)',weight='bold', fontsize=13)", "Differences in Main Stream of Income \nFigure 2 clearly illustrates men earning more income than women. There's a sizable share of women earning less than 500/week, while there are very few making more than 1500/week. On the other hand, the men's income is a more evenly distributed, as opposed to being as bottom heavy as women's income. The interquartile range of men is about 1000 compared to about 600 for women. Furthermore, the figure clearly portrays men having a lot more of an income upside, as the upper quartile of women is about 1000, while the upper quartile of men is about 1500 (ie. displayed in the black lines within the axes objects). This difference in income is just as stark, when observing the top earners between men and women, as the top earner for men (about 2900) is about 30% more than his women counterpart. If nothing else, this figure reinforces the fact that men make more money than women, and their income is more widely distributed. The below figures will provide potential drivers for this inequality as it pertains to differences in time use between men and women.", "fig, ax = plt.subplots()\nfig.set_size_inches(11.7, 8.27)\nax.set_title('Figure 3. Hours Worked Per Week', weight='bold',fontsize = 17)\nsns.set_style('whitegrid')\nsns.boxplot(x='Sex', y='Hours Worked/Wk', data= atus)\nplt.xlabel('Sex',weight='bold',fontsize=13)\nplt.ylabel('Hours Worked/Wk',weight='bold', fontsize=13)", "Differences in Hours Worked\nOne obvious factor to investigate is the number of hours worked for both men and women. This will surely have an impact on the earnings for each sex. Figure 3 shows that males work considerably more hours than females. A clear indicator of this is the upper quartile for women being 40 hours/week is virtually equal to the lower quartile for men. It does not require statistical analysis to presume the more hours one works, the more income that person tends to earn. This perhaps explains, at least to some degree, the stark difference in incomes between men and women, shown in the Figure 2. However, the question remains what women are spending their time doing more than men if they are not working more hours than men. The implication is that women are enduring certain responsibilities (ie. more so than men) that take up their time, and this in turn has a negative impact on their income.", "fig, ax = plt.subplots()\nfig.set_size_inches(11.7, 8.27)\nax.set(xlim=(0, 1400))\nax.set_title('Figure 4. Mins/Day Providing Secondary Child Care (<13y/o)', weight='bold', fontsize = 17)\nsns.violinplot(data= atus, x='Secondary Child Care (mins)', y='Sex')\nplt.xlabel('Secondary Child Care (Mins/Day)',weight='bold',fontsize=13)\nplt.ylabel('Sex',weight='bold', fontsize=13)", "The Differences in the Time Spent Providing Child Care\nSecondary child care is referring to time spent looking after children, while taking on something else as a primary activity. In sum, it is keeping a watchful eye over children, without providing one's full and undivided attention. Harvard Economics Professor, Claudia Goldin postulated that women providing more family care is a potential reason for the pay gap. Moreover, she touched upon research that viably suggests that women value temporal flexibility more than men, while men value income more than women. Figure 4 displays that women provide secondary child care more than men, as over 25% provide more than 200 minutes/day of such care. The fat tail on blue object depicts that their is a great deal of women providing hundreds of minutes of child care each day. Resultantly, the women who have these responsibilities are presumably earning less income than men and women who do not.", "fig, ax = plt.subplots()\nfig.set_size_inches(11.27, 5.5)\nax.set(ylim=(0, 1400))\nax.set_title(\"Figure 5. Mins/Day Providing Elderly Care\", weight='bold',fontsize = 17)\nsns.set_style(\"whitegrid\")\nsns.swarmplot(x='Sex', y='Elderly Care (mins)', data= atus)\nplt.xlabel('Sex',weight='bold',fontsize=13)\nplt.ylabel('Elderly Care (Mins/Day)',weight='bold', fontsize=13)", "Differences in Time Spent Providing Elderly Care\nRelated to the Figure 4, Figure 5 depicts females providing considerably more eldercare (ie. unrelated to employment) than their male counterparts. Granted, there is 20% more female respondents in the survey which makes the size of the blue object more accentuated. Nevertheless, there is still about twice as many females providing elderly care than men when accounting for the difference in number of respondents. This data supports Goldin's postulate of the women earning less than men are more likely to provide family care than men.\nConclusion\nAlthough their is still much more analysis that can be done, this paper at least sheds light on the fact that the gender pay gap issue is much more convoluted and nuanced than typically given credit for. Far too often, blanketed statements are made regarding females getting paid less than men, without evaluating any specific underlying reasons. Instead, it is prevalent practice to use the loaded word, \"discrimination,\" as reasoning for the matter. In fact, Former Director of the U.S. State Department, Anne-Marie Slaughter, finds that when you do not account for women with caregiving responsibilities, women earn only 5% less than men. \nGoing forward, it would be worthwhile to further investigate the notion of women valuing temporal flexibility more than men, and men valuing income more than women. It would be interesting the further validate this assertion and learn if and how this disposition has changed over decades. If women are genetically predisposed to be more caregiving, then it may be a futile cause to completely close the gender pay gap given women would always tend to spend more time providing family care as opposed to earning income through working, compared to men. On the other hand, if women are at all pressured by social constructs to be more caregiving, then it would be worth thinking about potential solutions to eradicate this issue. In any case, it would be useful to further explore gender differences and inequities as a means to discover conclusive insights that can best mitigate any gender pay gap. \nReferences\nAgness, Karen. \"Don't Buy Into The Gender Pay Gap Myth.\" Forbes. Forbes Magazine, 12 Apr. 2016. Web. 11 May 2016.\nDubner, Stephen J. \"The True Story of the Gender Pay Gap - Freakonomics.\" Freakonomics. N.p., 7 Jan. 2016. Web. 11 May 2016." ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
zhuanxuhit/deep-learning
embeddings/.ipynb_checkpoints/Skip-Grams-Solution-checkpoint.ipynb
mit
[ "Skip-gram word2vec\nIn this notebook, I'll lead you through using TensorFlow to implement the word2vec algorithm using the skip-gram architecture. By implementing this, you'll learn about embedding words for use in natural language processing. This will come in handy when dealing with things like translations.\nReadings\nHere are the resources I used to build this notebook. I suggest reading these either beforehand or while you're working on this material.\n\nA really good conceptual overview of word2vec from Chris McCormick \nFirst word2vec paper from Mikolov et al.\nNIPS paper with improvements for word2vec also from Mikolov et al.\nAn implementation of word2vec from Thushan Ganegedara\nTensorFlow word2vec tutorial\n\nWord embeddings\nWhen you're dealing with language and words, you end up with tens of thousands of classes to predict, one for each word. Trying to one-hot encode these words is massively inefficient, you'll have one element set to 1 and the other 50,000 set to 0. The word2vec algorithm finds much more efficient representations by finding vectors that represent the words. These vectors also contain semantic information about the words. Words that show up in similar contexts, such as \"black\", \"white\", and \"red\" will have vectors near each other. There are two architectures for implementing word2vec, CBOW (Continuous Bag-Of-Words) and Skip-gram.\n<img src=\"assets/word2vec_architectures.png\" width=\"500\">\nIn this implementation, we'll be using the skip-gram architecture because it performs better than CBOW. Here, we pass in a word and try to predict the words surrounding it in the text. In this way, we can train the network to learn representations for words that show up in similar contexts.\nFirst up, importing packages.", "import time\n\nimport numpy as np\nimport tensorflow as tf\n\nimport utils", "Load the text8 dataset, a file of cleaned up Wikipedia articles from Matt Mahoney. The next cell will download the data set to the data folder. Then you can extract it and delete the archive file to save storage space.", "from urllib.request import urlretrieve\nfrom os.path import isfile, isdir\nfrom tqdm import tqdm\nimport zipfile\n\ndataset_folder_path = 'data'\ndataset_filename = 'text8.zip'\ndataset_name = 'Text8 Dataset'\n\nclass DLProgress(tqdm):\n last_block = 0\n\n def hook(self, block_num=1, block_size=1, total_size=None):\n self.total = total_size\n self.update((block_num - self.last_block) * block_size)\n self.last_block = block_num\n\nif not isfile(dataset_filename):\n with DLProgress(unit='B', unit_scale=True, miniters=1, desc=dataset_name) as pbar:\n urlretrieve(\n 'http://mattmahoney.net/dc/text8.zip',\n dataset_filename,\n pbar.hook)\n\nif not isdir(dataset_folder_path):\n with zipfile.ZipFile(dataset_filename) as zip_ref:\n zip_ref.extractall(dataset_folder_path)\n \nwith open('data/text8') as f:\n text = f.read()", "Preprocessing\nHere I'm fixing up the text to make training easier. This comes from the utils module I wrote. The preprocess function coverts any punctuation into tokens, so a period is changed to &lt;PERIOD&gt;. In this data set, there aren't any periods, but it will help in other NLP problems. I'm also removing all words that show up five or fewer times in the dataset. This will greatly reduce issues due to noise in the data and improve the quality of the vector representations. If you want to write your own functions for this stuff, go for it.", "words = utils.preprocess(text)\nprint(words[:30])\n\nprint(\"Total words: {}\".format(len(words)))\nprint(\"Unique words: {}\".format(len(set(words))))", "And here I'm creating dictionaries to covert words to integers and backwards, integers to words. The integers are assigned in descending frequency order, so the most frequent word (\"the\") is given the integer 0 and the next most frequent is 1 and so on. The words are converted to integers and stored in the list int_words.", "vocab_to_int, int_to_vocab = utils.create_lookup_tables(words)\nint_words = [vocab_to_int[word] for word in words]", "Subsampling\nWords that show up often such as \"the\", \"of\", and \"for\" don't provide much context to the nearby words. If we discard some of them, we can remove some of the noise from our data and in return get faster training and better representations. This process is called subsampling by Mikolov. For each word $w_i$ in the training set, we'll discard it with probability given by \n$$ P(w_i) = 1 - \\sqrt{\\frac{t}{f(w_i)}} $$\nwhere $t$ is a threshold parameter and $f(w_i)$ is the frequency of word $w_i$ in the total dataset.\nI'm going to leave this up to you as an exercise. Check out my solution to see how I did it.\n\nExercise: Implement subsampling for the words in int_words. That is, go through int_words and discard each word given the probablility $P(w_i)$ shown above. Note that $P(w_i)$ is that probability that a word is discarded. Assign the subsampled data to train_words.", "from collections import Counter\nimport random\n\nthreshold = 1e-5\nword_counts = Counter(int_words)\ntotal_count = len(int_words)\nfreqs = {word: count/total_count for word, count in word_counts.items()}\np_drop = {word: 1 - np.sqrt(threshold/freqs[word]) for word in word_counts}\ntrain_words = [word for word in int_words if p_drop[word] < random.random()]", "Making batches\nNow that our data is in good shape, we need to get it into the proper form to pass it into our network. With the skip-gram architecture, for each word in the text, we want to grab all the words in a window around that word, with size $C$. \nFrom Mikolov et al.: \n\"Since the more distant words are usually less related to the current word than those close to it, we give less weight to the distant words by sampling less from those words in our training examples... If we choose $C = 5$, for each training word we will select randomly a number $R$ in range $< 1; C >$, and then use $R$ words from history and $R$ words from the future of the current word as correct labels.\"\n\nExercise: Implement a function get_target that receives a list of words, an index, and a window size, then returns a list of words in the window around the index. Make sure to use the algorithm described above, where you chose a random number of words to from the window.", "def get_target(words, idx, window_size=5):\n ''' Get a list of words in a window around an index. '''\n \n R = np.random.randint(1, window_size+1)\n start = idx - R if (idx - R) > 0 else 0\n stop = idx + R\n target_words = set(words[start:idx] + words[idx+1:stop+1])\n \n return list(target_words)", "Here's a function that returns batches for our network. The idea is that it grabs batch_size words from a words list. Then for each of those words, it gets the target words in the window. I haven't found a way to pass in a random number of target words and get it to work with the architecture, so I make one row per input-target pair. This is a generator function by the way, helps save memory.", "def get_batches(words, batch_size, window_size=5):\n ''' Create a generator of word batches as a tuple (inputs, targets) '''\n \n n_batches = len(words)//batch_size\n \n # only full batches\n words = words[:n_batches*batch_size]\n \n for idx in range(0, len(words), batch_size):\n x, y = [], []\n batch = words[idx:idx+batch_size]\n for ii in range(len(batch)):\n batch_x = batch[ii]\n batch_y = get_target(batch, ii, window_size)\n y.extend(batch_y)\n x.extend([batch_x]*len(batch_y))\n yield x, y\n ", "Building the graph\nFrom Chris McCormick's blog, we can see the general structure of our network.\n\nThe input words are passed in as one-hot encoded vectors. This will go into a hidden layer of linear units, then into a softmax layer. We'll use the softmax layer to make a prediction like normal.\nThe idea here is to train the hidden layer weight matrix to find efficient representations for our words. This weight matrix is usually called the embedding matrix or embedding look-up table. We can discard the softmax layer becuase we don't really care about making predictions with this network. We just want the embedding matrix so we can use it in other networks we build from the dataset.\nI'm going to have you build the graph in stages now. First off, creating the inputs and labels placeholders like normal.\n\nExercise: Assign inputs and labels using tf.placeholder. We're going to be passing in integers, so set the data types to tf.int32. The batches we're passing in will have varying sizes, so set the batch sizes to [None]. To make things work later, you'll need to set the second dimension of labels to None or 1.", "train_graph = tf.Graph()\nwith train_graph.as_default():\n inputs = tf.placeholder(tf.int32, [None], name='inputs')\n labels = tf.placeholder(tf.int32, [None, None], name='labels')", "Embedding\nThe embedding matrix has a size of the number of words by the number of neurons in the hidden layer. So, if you have 10,000 words and 300 hidden units, the matrix will have size $10,000 \\times 300$. Remember that we're using one-hot encoded vectors for our inputs. When you do the matrix multiplication of the one-hot vector with the embedding matrix, you end up selecting only one row out of the entire matrix:\n\nYou don't actually need to do the matrix multiplication, you just need to select the row in the embedding matrix that corresponds to the input word. Then, the embedding matrix becomes a lookup table, you're looking up a vector the size of the hidden layer that represents the input word.\n<img src=\"assets/word2vec_weight_matrix_lookup_table.png\" width=500>\n\nExercise: Tensorflow provides a convenient function tf.nn.embedding_lookup that does this lookup for us. You pass in the embedding matrix and a tensor of integers, then it returns rows in the matrix corresponding to those integers. Below, set the number of embedding features you'll use (200 is a good start), create the embedding matrix variable, and use tf.nn.embedding_lookup to get the embedding tensors. For the embedding matrix, I suggest you initialize it with a uniform random numbers between -1 and 1 using tf.random_uniform.", "n_vocab = len(int_to_vocab)\nn_embedding = 200 # Number of embedding features \nwith train_graph.as_default():\n embedding = tf.Variable(tf.random_uniform((n_vocab, n_embedding), -1, 1))\n embed = tf.nn.embedding_lookup(embedding, inputs)", "Negative sampling\nFor every example we give the network, we train it using the output from the softmax layer. That means for each input, we're making very small changes to millions of weights even though we only have one true example. This makes training the network very inefficient. We can approximate the loss from the softmax layer by only updating a small subset of all the weights at once. We'll update the weights for the correct label, but only a small number of incorrect labels. This is called \"negative sampling\". Tensorflow has a convenient function to do this, tf.nn.sampled_softmax_loss.\n\nExercise: Below, create weights and biases for the softmax layer. Then, use tf.nn.sampled_softmax_loss to calculate the loss. Be sure to read the documentation to figure out how it works.", "# Number of negative labels to sample\nn_sampled = 100\nwith train_graph.as_default():\n softmax_w = tf.Variable(tf.truncated_normal((n_vocab, n_embedding), stddev=0.1))\n softmax_b = tf.Variable(tf.zeros(n_vocab))\n \n # Calculate the loss using negative sampling\n loss = tf.nn.sampled_softmax_loss(softmax_w, softmax_b, \n labels, embed,\n n_sampled, n_vocab)\n \n cost = tf.reduce_mean(loss)\n optimizer = tf.train.AdamOptimizer().minimize(cost)", "Validation\nThis code is from Thushan Ganegedara's implementation. Here we're going to choose a few common words and few uncommon words. Then, we'll print out the closest words to them. It's a nice way to check that our embedding table is grouping together words with similar semantic meanings.", "with train_graph.as_default():\n ## From Thushan Ganegedara's implementation\n valid_size = 16 # Random set of words to evaluate similarity on.\n valid_window = 100\n # pick 8 samples from (0,100) and (1000,1100) each ranges. lower id implies more frequent \n valid_examples = np.array(random.sample(range(valid_window), valid_size//2))\n valid_examples = np.append(valid_examples, \n random.sample(range(1000,1000+valid_window), valid_size//2))\n\n valid_dataset = tf.constant(valid_examples, dtype=tf.int32)\n \n # We use the cosine distance:\n norm = tf.sqrt(tf.reduce_sum(tf.square(embedding), 1, keep_dims=True))\n normalized_embedding = embedding / norm\n valid_embedding = tf.nn.embedding_lookup(normalized_embedding, valid_dataset)\n similarity = tf.matmul(valid_embedding, tf.transpose(normalized_embedding))\n\n# If the checkpoints directory doesn't exist:\n!mkdir checkpoints\n\nepochs = 10\nbatch_size = 1000\nwindow_size = 10\n\nwith train_graph.as_default():\n saver = tf.train.Saver()\n\nwith tf.Session(graph=train_graph) as sess:\n iteration = 1\n loss = 0\n sess.run(tf.global_variables_initializer())\n\n for e in range(1, epochs+1):\n batches = get_batches(train_words, batch_size, window_size)\n start = time.time()\n for x, y in batches:\n \n feed = {inputs: x,\n labels: np.array(y)[:, None]}\n train_loss, _ = sess.run([cost, optimizer], feed_dict=feed)\n \n loss += train_loss\n \n if iteration % 100 == 0: \n end = time.time()\n print(\"Epoch {}/{}\".format(e, epochs),\n \"Iteration: {}\".format(iteration),\n \"Avg. Training loss: {:.4f}\".format(loss/100),\n \"{:.4f} sec/batch\".format((end-start)/100))\n loss = 0\n start = time.time()\n \n if iteration % 1000 == 0:\n # note that this is expensive (~20% slowdown if computed every 500 steps)\n sim = similarity.eval()\n for i in range(valid_size):\n valid_word = int_to_vocab[valid_examples[i]]\n top_k = 8 # number of nearest neighbors\n nearest = (-sim[i, :]).argsort()[1:top_k+1]\n log = 'Nearest to %s:' % valid_word\n for k in range(top_k):\n close_word = int_to_vocab[nearest[k]]\n log = '%s %s,' % (log, close_word)\n print(log)\n \n iteration += 1\n save_path = saver.save(sess, \"checkpoints/text8.ckpt\")\n embed_mat = sess.run(normalized_embedding)", "Restore the trained network if you need to:", "with train_graph.as_default():\n saver = tf.train.Saver()\n\nwith tf.Session(graph=train_graph) as sess:\n saver.restore(sess, tf.train.latest_checkpoint('checkpoints'))\n embed_mat = sess.run(embedding)", "Visualizing the word vectors\nBelow we'll use T-SNE to visualize how our high-dimensional word vectors cluster together. T-SNE is used to project these vectors into two dimensions while preserving local stucture. Check out this post from Christopher Olah to learn more about T-SNE and other ways to visualize high-dimensional data.", "%matplotlib inline\n%config InlineBackend.figure_format = 'retina'\n\nimport matplotlib.pyplot as plt\nfrom sklearn.manifold import TSNE\n\nviz_words = 500\ntsne = TSNE()\nembed_tsne = tsne.fit_transform(embed_mat[:viz_words, :])\n\nfig, ax = plt.subplots(figsize=(14, 14))\nfor idx in range(viz_words):\n plt.scatter(*embed_tsne[idx, :], color='steelblue')\n plt.annotate(int_to_vocab[idx], (embed_tsne[idx, 0], embed_tsne[idx, 1]), alpha=0.7)" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
frainfreeze/studying
home/python/learningPython5thED/Learning python 5th ed..ipynb
mit
[ "Test your knowledge: Part II exercises\n1. The basics\nRun each of the following expressions, and try to\nexplain what’s happening in each case. Note that the semicolon in some of these\nis being used as a statement separator, to squeeze multiple statements onto a single\nline: for example, X=1;X assigns and then prints a variable. Also remember that a comma between expressions usually builds a tuple, even if there are no enclosing parentheses: X,Y,Z\nis a three-item tuple, which Python prints back to you in parentheses.", "2 ** 16\n\n2 / 5, 2 / 5.0\n\n\"spam\" + \"eggs\"\n\nS = \"ham\"\n\"eggs \" + S\n\nS * 5\n\nS[:0]\n\n\"green %s and %s\" % (\"eggs\", S)\n\n'green {0} and {1}'.format('eggs', S)\n\n('x',)[0]\n\n('x', 'y')[1]\n\nL = [1,2,3] + [4,5,6]\nL, L[:], L[:0], L[-2], L[-2:]\n\n([1,2,3] + [4,5,6])[2:4]\n\n[L[2], L[3]]\n\nL.reverse(); L\n\nL.sort(); L\n\nL.index(4)\n\n{'a':1, 'b':2}['b']\n\nD = {'x':1, 'y':2, 'z':3}\nD['w'] = 0\nD\n\nD['x'] + D['w']\nD\n\nD[(1,2,3)] = 4\nD\n\nlist(D.keys()), list(D.values()), (1,2,3) in D\n\n[[]], [\"\",[],(),{},None]", "2. Indexing and slicing\nAt the interactive prompt, define a list named L that contains\nfour strings or numbers (e.g., L=[0,1,2,3] ). Then, experiment with the following\nboundary cases. You may never see these cases in real programs (especially not in\nthe bizarre ways they appear here!), but they are intended to make you think about\nthe underlying model, and some may be useful in less artificial forms—slicing out\nof bounds can help, for example, if a sequence is as long as you expect:\n- What happens when you try to index out of bounds (e.g., L[4] )?\n- What about slicing out of bounds (e.g., L[−1000:100] )?\n- Finally, how does Python handle it if you try to extract a sequence in reverse,\nwith the lower bound greater than the higher bound (e.g., L[3:1] )? Hint: try\nassigning to this slice ( L[3:1]=['?'] ), and see where the value is put. Do you\nthink this may be the same phenomenon you saw when slicing out of bounds?\n3. Indexing, slicing and del\nDefine another list L with four items, and assign an empty\nlist to one of its offsets (e.g., L[2]=[] ). What happens? Then, assign an empty list\nto a slice ( L[2:3]=[] ). What happens now? Recall that slice assignment deletes the\nslice and inserts the new value where it used to be.\nThe del statement deletes offsets, keys, attributes, and names. Use it on your list\nto delete an item (e.g., del L[0] ). What happens if you delete an entire slice ( del\nL[1:] )? What happens when you assign a nonsequence to a slice ( L[1:2]=1 )?\n4. Tuple assignment\nWhat do you think is happening to X and Y when you run following sequence?", "X = 'spam'\nY = 'eggs'\nX, Y = Y, X", "5. Dictionary keys.\nYou’ve learned that dictionaries aren’t accessed by offsets, so what’s going on here?", "D = {}\nD[1] = 'a'\nD[2] = 'b'\nD", "Does the following shed any light on the subject? (Hint: strings, integers, and tuples\nshare which type category?)", "D[(1, 2, 3)] = 'c'\nD", "6. Dictionary indexing.\nCreate a dictionary named D with three entries, for keys 'a' ,\n'b' , and 'c' . What happens if you try to index a nonexistent key ( D['d'] )? What\ndoes Python do if you try to assign to a nonexistent key 'd' (e.g., D['d']='spam' )?\nHow does this compare to out-of-bounds assignments and references for lists?\nDoes this sound like the rule for variable names?\n7. Generic operations.\nRun interactive tests to answer the following questions:\n- What happens when you try to use the + operator on different/mixed types\n(e.g., string + list, list + tuple)?\n- Does + work when one of the operands is a dictionary?\n- Does the append method work for both lists and strings? How about using the\nkeys method on lists? (Hint: what does append assume about its subject object?)\n- Finally, what type of object do you get back when you slice or concatenate two\nlists or two strings?\n8. String indexing.\nDefine a string S of four characters: S = \"spam\" . Then type the\nfollowing expression: S[0][0][0][0][0] . Any clue as to what’s happening this time?\n(Hint: recall that a string is a collection of characters, but Python characters are\none-character strings.) Does this indexing expression still work if you apply it to a\nlist such as ['s', 'p', 'a', 'm'] ? Why?\n9. Immutable types.\nDefine a string S of four characters again: S = \"spam\" . Write an\nassignment that changes the string to \"slam\" , using only slicing and concatenation.\nCould you perform the same operation using just indexing and concatenation?\nHow about index assignment?\n10. Nesting.\nWrite a data structure that represents your personal information: name\n(first, middle, last), age, job, address, email address, and phone number. You may\nbuild the data structure with any combination of built-in object types you like (lists,\ntuples, dictionaries, strings, numbers). Then, access the individual components of\nyour data structures by indexing. Do some structures make more sense than others\nfor this object?\n11. Files\nWrite a script that creates a new output file called myfile.txt and writes the\nstring \"Hello file world!\" into it. Then write another script that opens my-\nfile.txt and reads and prints its contents. Does the new file show up in the directory where you ran your\nscripts? What if you add a different directory path to the filename passed to open ?\nNote: file write methods do not add newline characters to your strings; add an\nexplicit \\n at the end of the string if you want to fully terminate the line in the file.", "!ls", "Test your knowledge: Part III exercises\n1. Coding basic loops\n\nWrite a for loop that prints the ASCII code of each character in a string named S. Use the built-in function ord(character) to convert each character to an\nASCII integer. This function technically returns a Unicode code point in\nPython 3.X, but if you restrict its content to ASCII characters, you’ll get back\nASCII codes. (Test it interactively to see how it works.)\nNext, change your loop to compute the sum of the ASCII codes of all the\ncharacters in a string.\nFinally, modify your code again to return a new list that contains the ASCII\ncodes of each character in the string. Does the expression map(ord, S) have a\nsimilar effect? How about [ord(c) for c in S] ? Why? (Hint: see Chapter 14.)\n\n2. Backslash characters\nWhat happens on your machine when you type the following\ncode interactively?", "for i in range(5):\n print('hello %d\\n\\a' % i, end=\"\")", "3. Sorting dictionaries.\nIn Chapter 8, we saw that dictionaries are unordered collections. Write a for loop that prints a dictionary’s items in sorted (ascending) order. (Hint: use the dictionary keys and list sort methods, or the newer sorted built-in function.)\n4. Program logic alternatives.\nConsider the following code, which uses a while loop and found flag to search a list of powers of 2 for the value of 2 raised to the fifth power (32).\n```python\nL = [1, 2, 4, 8, 16, 32, 64]\nX = 5\nfound = False\ni = 0\nwhile not found and i < len(L):\n if 2 ** X == L[i]:\n found = True\n else:\n i = i+1\nif found:\n print('at index', i)\nelse:\n print(X, 'not found') \n```\nAs is, the example doesn’t follow normal Python coding techniques. Follow the steps outlined here to improve it:\n- First, rewrite this code with a while loop else clause to eliminate the found flag and final if statement.\n- Next, rewrite the example to use a for loop with an else clause, to eliminate the explicit list-indexing logic. (Hint: to get the index of an item, use the list index method— L.index(X) returns the offset of the first X in list L .)\n- Next, remove the loop completely by rewriting the example with a simple in operator membership expression. (See Chapter 8 for more details, or type this to test: 2 in [1,2,3] .)\n- Finally, use a for loop and the list append method to generate the powers-of-2 list ( L ) instead of hardcoding a list literal.\nDeeper thoughts:\n- Do you think it would improve performance to move the 2 ** X expression outside the loops? How would you code that?\n- As we saw in exercise 1, Python includes a map(function, list) tool that can generate a powers-of-2 list, too: map(lambda x: 2 ** x, range(7)). Try typing this code interactively; we’ll meet lambda more formally in the next part of this book, especially in Chapter 19. Would a list comprehension help here (see Chapter 14)?\n5. Code maintenance.\nIf you haven’t already done so, experiment with making the code changes suggested in this chapter’s sidebar “Changing PyDoc’s Colors” on page 456. Much of the work of real software development is in changing existing code, so the sooner you begin doing so, the better. For reference, my edited copy of PyDoc is in the book’s examples package, named mypydoc.py; to see how it differs, you can run a file compare (fc on Windows) with the original pydoc.py in 3.3 (also included, lest it change radically in 3.4 as the sidebar describes). If PyDoc is more easily customized by the time you read these words, customize colors per its current convention instead; if this involves changing a CSS file, let’s\nhope the procedure will be well documented in Python’s manuals.\nTest Your Knowledge: Part IV Exercises\n1. The basics.\nAt the Python interactive prompt, write a function that prints its single\nargument to the screen and call it interactively, passing a variety of object types:\nstring, integer, list, dictionary. Then, try calling it without passing any argument.\nWhat happens? What happens when you pass two arguments?\n2. Arguments.\nWrite a function called adder in a Python module file. The function\nshould accept two arguments and return the sum (or concatenation) of the two.\nThen, add code at the bottom of the file to call the adder function with a variety of\nobject types (two strings, two lists, two floating points), and run this file as a script\nfrom the system command line. Do you have to print the call statement results to\nsee results on your screen?\n3. varargs.\nGeneralize the adder function you wrote in the last exercise to compute\nthe sum of an arbitrary number of arguments, and change the calls to pass more\nor fewer than two arguments. What type is the return value sum? (Hints: a slice\nsuch as S[:0] returns an empty sequence of the same type as S , and the type built-\nin function can test types; but see the manually coded min examples in Chapter 18 for a simpler approach.) What happens if you pass in arguments of different\ntypes? What about passing in dictionaries?\n4. Keywords.\nChange the adder function from exercise 2 to accept and sum/concatenate three arguments: def adder(good, bad, ugly). Now, provide default values\nfor each argument, and experiment with calling the function interactively. Try\npassing one, two, three, and four arguments. Then, try passing keyword arguments. Does the call adder(ugly=1, good=2) work? Why? Finally, generalize the\nnew adder to accept and sum/concatenate an arbitrary number of keyword arguments. This is similar to what you did in exercise 3, but you’ll need to iterate over\na dictionary, not a tuple. (Hint: the dict.keys method returns a list you can step\nthrough with a for or while , but be sure to wrap it in a list call to index it in 3.X;\ndict.values may help here too.)\n5. Dictionary tools.\nWrite a function called copyDict(dict) that copies its dictionary\nargument. It should return a new dictionary containing all the items in its argument. Use the dictionary keys method to iterate (or, in Python 2.2 and later, step\nover a dictionary’s keys without calling keys ). Copying sequences is easy ( X[:]\nmakes a top-level copy); does this work for dictionaries, too? As explained in this\nexercise’s solution, because dictionaries now come with similar tools, this and the\nnext exercise are just coding exercises but still serve as representative function\nexamples.\n6. Dictionary tools.\nWrite a function called addDict(dict1, dict2) that computes the\nunion of two dictionaries. It should return a new dictionary containing all the items\nin both its arguments (which are assumed to be dictionaries). If the same key appears in both arguments, feel free to pick a value from either. Test your function\nby writing it in a file and running the file as a script. What happens if you pass lists\ninstead of dictionaries? How could you generalize your function to handle this case,\ntoo? (Hint: see the type built-in function used earlier.) Does the order of the arguments passed in matter?\n7. More argument-matching examples.", "def f1(a, b): print(a, b) # Normal args\ndef f2(a, *b): print(a, b) # Positional varargs\ndef f3(a, **b): print(a, b) # Keyword varargs\ndef f4(a, *b, **c): print(a, b, c) # Mixed modes\ndef f5(a, b=2, c=3): print(a, b, c) # Defaults\ndef f6(a, b=2, *c): print(a, b, c) # Defaults and positional varargs", "Test the following calls interactively, and try to explain each result; in some\ncases, you’ll probably need to fall back on the matching algorithm shown in Chapter 18. Do you think mixing matching modes is a good idea in general? Can you think of cases where it would be useful?", "f1(1, 2)\n\nf1(b=2, a=1)\n\nf2(1, 2, 3)\n\nf3(1, x=2, y=3)\n\nf4(1, 2, 3, x=2, y=3)\n\nf5(1)\n\nf5(1, 4)\n\nf6(1)\n\nf6(1, 3, 4)", "8. Primes revisited.\nRecall the following code snippet from Chapter 13, which simplistically determines whether a positive integer is prime:\npython\nx = y // 2 # For some y &gt; 1\nwhile x &gt; 1:\n if y % x == 0: # Remainder\n print(y, 'has factor', x)\n break # Skip else\n x -= 1\nelse: # Normal exit\n print(y, 'is prime')\nPackage this code as a reusable function, add some calls to the function. While you’re at it, experiment with replacing the first line’s // operator with / to see how true division changes the / operator in Python 3.X and breaks this code (refer back to Chapter 5 if you need a reminder). What can you do about negatives, and the values 0 and 1 ? How about speeding this up? Your outputs should look something like this:\n13 is prime\n13.0 is prime\n15 has factor 5\n15.0 has factor 5.0\n9. Iterations and comprehensions.\nWrite code to build a new list containing the square roots of all the numbers in this list: [2, 4, 9, 16, 25]. Code this as a for loop first,\nthen as a map call, then as a list comprehension, and finally as a generator expression. Use the sqrt function in the built-in math module to do the calculation (i.e.,\nimport math and say math.sqrt(x) ). Of the four, which approach do you like best?\n10. Timing tools.\nIn Chapter 5, we saw three ways to compute square roots:\nmath.sqrt(X) , X ** .5 , and pow(X, .5) . If your programs run a lot of these, their\nrelative performance might become important. To see which is quickest, repurpose\nthe timerseqs.py script we wrote in this chapter to time each of these three tools.\nUse the bestof or bestoftotal functions in one of this chapter’s timer modules to\ntest (you can use either the original, the 3.X-only keyword-only variant, or the 2.X/\n3.X version, and may use Python’s timeit module as well). You might also want\nto repackage the testing code in this script for better reusability—by passing a test\nfunctions tuple to a general tester function, for example (for this exercise a copy-\nand-modify approach is fine). Which of the three square root tools seems to run\nfastest on your machine and Python in general? Finally, how might you go about\ninteractively timing the speed of dictionary comprehensions versus for loops?\n11. Recursive functions.\nWrite a simple recursion function named countdown that prints\nnumbers as it counts down to zero. For example, a call countdown(5) will print: 5\n4 3 2 1 stop. There’s no obvious reason to code this with an explicit stack or\nqueue, but what about a nonfunction approach? Would a generator make sense\nhere?\n12. Computing factorials.\nFinally, a computer science classic (but demonstrative nonetheless). We employed the notion of factorials in Chapter 20’s coverage of permutations: N! , computed as N*(N-1)*(N-2)*...1 . For instance, 6! is 6*5*4*\\3*2*1 , or\n720 . Code and time four functions that, for a call fact(N) , each return N! . Code these four functions (1) as a recursive countdown per Chapter 19; (2) using the\nfunctional reduce call per Chapter 19; (3) with a simple iterative counter loop per\nChapter 13; and (4) using the math.factorial library tool per Chapter 20. Use\nChapter 21’s timeit to time each of your functions. What conclusions can you\ndraw from your results?" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
sdpython/ensae_teaching_cs
_doc/notebooks/exams/td_note_2015.ipynb
mit
[ "1A.e - TD noté, 5 décembre 2014\nParcours de chemins dans un graphe acyclique (arbre).", "from jyquickhelper import add_notebook_menu\nadd_notebook_menu()", "Après chaque question, on vérifie sur un petit exemple que cela fonctionne comme attendu.\nExercice 1\nCe premier exercice aborde la problème d'un parcours de graphe non récursif.\nQ1", "def adjacence(N):\n # on crée uen matrice vide\n mat = [ [ 0 for j in range(N) ] for i in range(N) ]\n for i in range(0,N-1):\n mat[i][i+1] = 1\n return mat\n\nmat = adjacence(7)\nmat", "Q2\nIl faut ajouter 5 arcs au hasard en évitant d'ajouter deux fois le même.", "import random\ndef ajoute_points(mat,nb=5):\n ajout = { }\n while len(ajout) < 5 :\n i,j = random.randint(0,len(mat)-1),random.randint(0,len(mat)-1)\n if i < j and (i,j) not in ajout:\n mat[i][j] = 1\n ajout[i,j] = 1\n\najoute_points(mat)\nmat", "Q3", "def successeurs(adj,i):\n ligne = adj[i]\n # dans l'expression suivante, \n # s est la valeur de la matrice (0 ou 1)\n # i l'indice\n return [ i for i,s in enumerate(ligne) if s == 1 ]\n\nsuccesseurs(mat, 1)", "Q4", "def successeurs_dico(adj):\n return { i:successeurs(adj, i) for i in range(len(adj)) }\n\ndico = successeurs_dico(mat)\ndico", "Q5", "def suites_chemin(chemin, dico):\n dernier = chemin[-1]\n res = [ ]\n for s in dico[dernier]:\n res.append ( chemin + [ s ] )\n return res\n\nsuites_chemin( [ 0, 1 ], dico)", "Q6", "def parcours(adj):\n dico = successeurs_dico(adj)\n chemins = [ [ 0 ]]\n resultat = [ ]\n while len(chemins) > 0 :\n chemins2 = []\n for chemin in chemins :\n res = suites_chemin(chemin, dico)\n if len(res) == 0:\n # chemin est un chemin qui ne peut être continué\n resultat.append ( chemin )\n else:\n chemins2.extend ( res ) \n chemins = chemins2\n return resultat\n\nparcours(mat)", "Q7\nLa différence entre un parcours en profondeur et un parcours en largeur tient au fait qu'on préfère d'abord explorer le successeur direct, puis le successeur direct plutôt que les voisins du successeurs directe. Dans le premier cas, on aboutit très vite à un chemin terminé. Dans le second cas, on obtient les chemins plutôt vers la fin de l'algorithme. Dans la version proposée par l'algorithme, c'est un parcours en largeur qui est implémenté.\nQ8\nLa matrice en question est la suivante (pour $N=7$) :", "def adjacence8(N):\n # on crée uen matrice vide\n mat = [ [ 0 for j in range(N) ] for i in range(N) ]\n for i in range(0,N-1):\n for j in range(i+1,N):\n mat[i][j] = 1\n return mat\n\nadj = adjacence8(7)\nadj\n\nche = parcours(adj)\nprint(\"nombre\",len(che))\nche", "On fait une petite boucle pour intuiter le résultat :", "for i in range(5,11):\n adj = adjacence8(i)\n che = parcours(adj)\n print(i, \"-->\",len(che))", "Cela ressemble beaucoup à des puissances de deux. Cela suggère un raisonnement par récurrence. Chaque noeud $i$ est connecté à tous les suivantes $i+1$, $i+2$... On remarque que tous les chemins se termine par le dernier noeud $n$. Lorsqu'on ajoute le noeud $n+1$ au graphe, il sera le successeur de tous les autres. Pour un chemin donné, on peut soit l'ajouter à la fin, soit remplacer le dernier noeud $n$ par $n-1$. C'est ainsi qu'on multiplie par deux le nombre de chemins. S'il y a $n$ noeuds, on obtient $2^{n-2}$.\nExercice 2\nOn suppose qu'on dispose d'un tableau de nombres non trié. Ecrire une fonction qui retourne les trois éléments minimaux.\nLa première option consiste à utiliser la fonction sort. Celle-ci a un coût de $O(n \\ln n)$ le programme est très simple.", "l = [ -1, 4, 6, 4, 1, 9, 5 ]\nl.sort()\nl[:3]", "Le problème qu'on cherche à résoudre est plus simple puisqu'il s'agit de ne garder que les trois premiers éléments. On n'a pas besoin de trier la fin de la liste. L'idée consiste à parcourir le tableau et à ne conserver que les trois premiers éléments. Si un élément est plus grand que le troisième élément, on ne s'en occupe pas.", "def garde_3_element(tab):\n meilleur = [ ]\n for t in tab:\n if len(meilleur) < 3 :\n meilleur.append(t)\n meilleur.sort()\n elif t < meilleur[2] :\n meilleur[2] = t\n meilleur.sort()\n return meilleur\n\ngarde_3_element(l)", "Même si on utilise un tri, le coût est en en $O(n)$ car le tri opère sur au plus trois éléments.\nExercice 3\nQ1", "def word2dict(mot):\n return { i: mot[:i] for i in range(len(mot)+1) }\n\nword2dict(\"mot\"), word2dict(\"python\")", "Q2", "def two_words2dict(d1,d2):\n return { (i,j): (d1[i],d2[j]) for i in d1 for j in d2 }\n\nmot1 = \"python\"\nmot2 = \"piton\"\nd1 = word2dict(mot1)\nd2 = word2dict(mot2)\nvertices = two_words2dict(d1,d2)\nvertices", "Q3\nIl y a autant d'éléments que $(len(mot1) +1)*(len(mot2)+1)$ puisqu'on fait une double boucle sur toutes les positions + 1 pour 0. Donc $(p+1)(q+1)$ si $p$ et $q$ sont les tailles des deux mots.", "len(vertices),(len(mot1)+1)*(len(mot2)+1)", "Q4", "def add_edge_hv(vertices):\n edges = { }\n for edge1 in vertices:\n i1,j1 = edge1\n for edge2 in vertices:\n i2,j2 = edge2\n if (i2-i1==1 and j1==j2) or (j2-j1==1 and i1==i2) :\n edges[ edge1,edge2 ] = 1\n return edges\n\nedges = add_edge_hv(vertices)\nedges", "Q5\nPour chaque noeud, on ajoute deux arcs excepté les noeuds qui correspond à la fin des mots. Donc $2(p+1)(q+1)-(p+1)-(q+1)=2pq+p+q$.", "len(edges), 2*len(mot1)*len(mot2)+len(mot1)+len(mot2)", "Q6\nOn s'inspire de la fonction précédente. Il serait plus efficace de les fusionner.", "def cout(m1,m2):\n c1 = m1[-1]\n c2 = m2[-1]\n if c1==c2 : return 0\n else : return 1\n\ndef ajoute_diagonale(edges, vertices):\n # edges = { } # on n'ajoute surtout pas cette ligne, sinon c'est comme si on effaçait tout ce que contient\n # edges\n for edge1 in vertices:\n i1,j1 = edge1\n for edge2 in vertices:\n i2,j2 = edge2\n if i2-i1==1 and j2-j1==1 :\n edges[ edge1,edge2 ] = cout (vertices [ edge2 ][0], vertices [ edge2 ][1] )\n\najoute_diagonale(edges, vertices)\nedges", "Q7\nL'algorithme du plus court chemin.", "def loop_on_edges(distance, edges):\n for edge,cout in edges.items() :\n v1,v2 = edge\n if v1 in distance and (v2 not in distance or distance[v2] > distance[v1] + cout) :\n distance[v2] = distance[v1] + cout", "Q8\nLa question était sans doute un peu mal posé car il est beaucoup plus facile pour la fonction loop_on_edges de savoir si le dictionnaire distance est modifié ou non. On la modifie pour qu'elle retourne le nombre de mises à jour.", "def loop_on_edges(distance, edges):\n misejour = 0\n for edge,cout in edges.items() :\n v1,v2 = edge\n if v1 in distance and (v2 not in distance or distance[v2] > distance[v1] + cout) :\n distance[v2] = distance[v1] + cout\n misejour += 1\n return misejour", "Puis l'algorithme final :", "def plus_court_chemin(edges):\n distance = { (0,0): 0 }\n m = 1\n while m > 0:\n m = loop_on_edges(distance, edges)\n return distance\n\nresultat = plus_court_chemin(edges)\nresultat", "Q9\nComme on a tout fait avec ces deux mots, il suffit de prendre la bonne valeur dans le tableau distance :", "print(mot1,mot2)\nresultat [ len(mot1), len(mot2) ]", "Exercice 4\nOn a un tableau d'entiers l = [1, 8, 5, 7, 3, 6, 9]. On veut placer les entiers pairs en premiers et les entiers impairs en derniers : 8, 6, 1, 5, 7, 3, 9. Ecrire une fonction qui fait cela.\nLe coût d'un tri est de $O(n \\ln n)$. On construit d'abord le couple (parité, élément) pour chaque élément puis on trie de table. C'est la solution la plus simple.", "l = [1, 8, 5, 7, 3, 6, 9]\nl2 = [ (i%2, i) for i in l]\nl2.sort()\nres = [ b for a,b in l2 ]\nres", "Dans cas précis, on ne souhaite pas trier sur les nombres mais sur leur parité. En quelque sorte, on ne s'intéresse pas de savoir dans quel ordre deux nombres pairs seront triés. Cela réduit le nombre d'opérations à effectuer. Une idée consiste à parcourir le tableau par les deux bouts et à échanger deux nombres dès que leur parité sont mal classées.", "def trie_parite(l):\n i = 0\n j = len(l)-1\n while i < j :\n while i < j and l[i]%2 == 0 : i += 1\n while i < j and l[j]%2 == 1 : j -= 1\n if i < j:\n ech = l[i]\n l[i] = l[j]\n l[j] = ech\n i += 1\n j -= 1\n \nl = l.copy()\ntrie_parite(l)\nl" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
jbwhit/WSP-312-Tips-and-Tricks
notebooks/07-Some_basics.ipynb
mit
[ "from __future__ import absolute_import, division, print_function", "Github\nhttps://github.com/jbwhit/OSCON-2015/commit/6750b962606db27f69162b802b5de4f84ac916d5\nA few Python Basics", "# Create a [list] \ndays = ['Monday', # multiple lines \n 'Tuesday', # acceptable \n 'Wednesday',\n 'Thursday',\n 'Friday',\n 'Saturday',\n 'Sunday', \n ] # trailing comma is fine!\n\ndays\n\n# Simple for-loop\nfor day in days:\n print(day)\n\n# Double for-loop\nfor day in days:\n for letter in day:\n print(letter)\n\nprint(days)\n\nprint(*days)\n\n# Double for-loop\nfor day in days:\n for letter in day:\n print(letter)\n print()\n\nfor day in days:\n for letter in day:\n print(letter.lower())", "List Comprehensions", "length_of_days = [len(day) for day in days]\nlength_of_days\n\nletters = [letter for day in days\n for letter in day]\n\nprint(letters)\n\nletters = [letter for day in days for letter in day]\nprint(letters)\n\n[num for num in xrange(10) if num % 2]\n\n[num for num in xrange(10) if num % 2 else \"doesn't work\"]\n\n[num if num % 2 else \"works\" for num in xrange(10)]\n\n[num for num in xrange(10)]\n\nsorted_letters = sorted([x.lower() for x in letters])\nprint(sorted_letters)\n\nunique_sorted_letters = sorted(set(sorted_letters))\n\nprint(\"There are\", len(unique_sorted_letters), \"unique letters in the days of the week.\")\nprint(\"They are:\", ''.join(unique_sorted_letters))\n\nprint(\"They are:\", '; '.join(unique_sorted_letters))\n\ndef first_three(input_string):\n \"\"\"Takes an input string and returns the first 3 characters.\"\"\"\n return input_string[:3] \n\nimport numpy as np\n\n# tab\nnp.linspace()\n\n[first_three(day) for day in days]\n\ndef last_N(input_string, number=2):\n \"\"\"Takes an input string and returns the last N characters.\"\"\"\n return input_string[-number:] \n\n[last_N(day, 4) for day in days if len(day) > 6]\n\nfrom math import pi\n\nprint([str(round(pi, i)) for i in xrange(2, 9)])\n\nlist_of_lists = [[i, round(pi, i)] for i in xrange(2, 9)]\nprint(list_of_lists)\n\nfor sublist in list_of_lists:\n print(sublist)\n\n# Let this be a warning to you!\n\n# If you see python code like the following in your work:\n\nfor x in range(len(list_of_lists)):\n print(\"Decimals:\", list_of_lists[x][0], \"expression:\", list_of_lists[x][1])\n\nprint(list_of_lists)\n\n# Change it to look more like this: \n\nfor decimal, rounded_pi in list_of_lists:\n print(\"Decimals:\", decimal, \"expression:\", rounded_pi)\n \n\n\n# enumerate if you really need the index\n\nfor index, day in enumerate(days):\n print(index, day)\n", "Dictionaries\nPython dictionaries are awesome. They are hash tables and have a lot of neat CS properties. Learn and use them well.", "from IPython.display import IFrame, HTML\nHTML('<iframe src=https://en.wikipedia.org/wiki/Hash_table width=100% height=550></iframe>')\n\nfellows = [\"Jonathan\", \"Alice\", \"Bob\"]\nuniversities = [\"UCSD\", \"UCSD\", \"Vanderbilt\"]\n\nfor x, y in zip(fellows, universities):\n print(x, y)\n\n# Don't do this\n{x: y for x, y in zip(fellows, universities)}\n\n# Doesn't work like you might expect\n{zip(fellows, universities)}\n\ndict(zip(fellows, universities))\n\nfellows\n\nfellow_dict = {fellow.lower(): university \n for fellow, university in zip(fellows, universities)}\n\nfellow_dict\n\nfellow_dict['bob']\n\nrounded_pi = {i:round(pi, i) for i in xrange(2, 9)}\n\nrounded_pi[5]\n\nsum([i ** 2 for i in range(10)])\n\nsum(i ** 2 for i in range(10))\n\nhuh = (i ** 2 for i in range(10))\n\nhuh.next()", "Participate in StackOverflow\nAn example: http://stackoverflow.com/questions/6605006/convert-pdf-to-image-with-high-resolution" ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
hchauvet/beampy
doc-src/auto_tutorials/positioning_system.ipynb
gpl-3.0
[ "%matplotlib inline", "Beampy Positioning system\nBeampy has a positioning system that allows to make automatic, fixed or\nrelative positioning. The default behavior is set by the theme used in the\npresentation.\nThe default theme sets the coordinates to:\n\nx='center' which means that element is centered in the horizontal direction\nx element anchor is set to left, which means that the horizontal distance is\n computed between to left side of the slide and the left border of the element\n bounding-box.\ny='auto' which means that elements are equally spaced on the vertical\n direction.\ny element anchor is set to top, which means that the vertical distance is\n computed between the top of the slide and the top border of the element\n bounding-box.\nThe reference for computing coordinates as percent is the page or group width\n for both x and y.\n\nSlide coordinate system\nThe origin of the coordinate coordinate system is the upper-left corner of the\nslide or the current group. And is positive when moving toward the bottom-right\ncorner.", "from beampy import *\nfrom beampy.utils import bounding_box, draw_axes\n\ndoc = document(quiet=True)\n\nwith slide():\n draw_axes(show_ticks=True)\n t1 = text('This is the default theme behaviour')\n t2 = text('x are centered and y equally spaced')\n\n for t in [t1, t2]:\n t.add_border()\n\ndisplay_matplotlib(gcs())", "Automatic positioning\nBeampy as some simple automatic positioning, which are 'centering' the Beampy\nmodule with center, and equally spaced distribution of Beampy modules that\nhave auto as coordinates\nCentering\n+++++++++", "with slide():\n draw_axes()\n rectangle(x='center', y='center', width=400, height=200,\n color='lightgreen', edgecolor=None)\n text('x and y are centered for the text and the rectangle modules',\n x='center', y='center', width=350)\n\ndisplay_matplotlib(gcs())", "Auto\n++++\nEqually spaced vertically\n~~~~~~~~~~~~~~~~~~~~~~~~~", "with slide():\n draw_axes()\n for c in ['gold', 'crimson', 'orangered']:\n rectangle(x='center', y='auto', width=100, height=100,\n color=c, edgecolor=None)\n\ndisplay_matplotlib(gcs())", "Equally spaced horizontally\n~~~~~~~~~~~~~~~~~~~~~~~~~~~", "with slide():\n draw_axes()\n for c in ['gold', 'crimson', 'orangered']:\n rectangle(x='auto', y='center', width=100, height=100,\n color=c, edgecolor=None)\n\ndisplay_matplotlib(gcs())", "Equally spaced in xy directions\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~", "with slide():\n draw_axes()\n for c in ['gold', 'crimson', 'orangered']:\n rectangle(x='auto', y='auto', width=100, height=100,\n color=c, edgecolor=None)\n\ndisplay_matplotlib(gcs())", "Absolute positioning\nunits\n+++++\nAbsolute coordinates could be given as follow:\n\n(int or float) <= 1.0, the position is a percent of the slide or group width\n for x and y (by default, but could be changed).\n(int or float) > 1.0, the position is in pixels.\nGiven as a string, the position is in pixels or in the unit given just after,\n like '2cm'.\n\n<div class=\"alert alert-info\"><h4>Note</h4><p>For `y` < 1.0, the default will be changed in future version to be percent\n of the height. To already change this in your slide you could add just\n after importing Beampy:\n\n >>> DEFAULT_Y['unit'] = 'height'</p></div>", "with slide():\n draw_axes()\n text('x and y relative to width', x=0.5, y=0.5)\n text('x and y relative to width, with aspect ratio for y', x=0.5,\n y=0.5*(3/4.), width=300)\n text('x and y given in pixels', x=100, y=100)\n text('x and y given in centimetres', x='2cm', y='5cm')\n\ndisplay_matplotlib(gcs())", "Anchors\n+++++++\nWe could also change the anchor of the Beampy module using the center,\nright, bottom function in the coordinate.", "with slide():\n draw_axes()\n\n t1 = text('Top-left absolute positioning $$x=x^2$$', x=400, y=100)\n\n t2 = text('Top-right absolute positioning $$x=x^2$$', x=right(400), y=200)\n\n t3 = text('Middle-middle absolute positioning $$x=x^2$$', x=center(400), y=center(300))\n\n t4 = text('Bottom-right absolute positioning $$x=x^2$$', x=right(0.5), y=bottom(0.6))\n\n for t in [t1, t2, t3, t4]:\n bounding_box(t)\n\ndisplay_matplotlib(gcs())", "Relative positioning\nWhen a Beampy module as been placed on a slide, we could position an other\nelement relative to this first one. To do so Beampy module have methods to\nrefer to their anchors (module.left, module.right, module.top, module.bottom,\nmodule.center).", "with slide():\n draw_axes()\n texts_width = 200\n\n r = rectangle(x='center', y='center', width=100, height=100,\n color='crimson', edgecolor=None)\n\n t1 = text('Centered 10 px below the rectangle', x=r.center+center(0),\n y=r.bottom+10, width=texts_width, align='center')\n\n t2 = text('Centered 10 px above the rectangle', x=r.center+center(0),\n y=r.top-bottom(10), width=texts_width, align='center')\n\n t3 = text('10 px left of the rectangle', x=r.left-right(10),\n y=r.center+center(10), width=texts_width, align='center')\n\n t4 = text('10 px right of the rectangle', x=r.right+10,\n y=r.center+center(10), width=texts_width, align='center')\n\n for t in [t1, t2, t3, t4]:\n bounding_box(t)\n\ndisplay_matplotlib(gcs())", "An other way to do relative positioning is to use string as coordinate with\n'+' ot '-' before the shift and the unit. This will place the new Beampy\nModule relative to previous one.", "with slide():\n draw_axes()\n\n text('text x=20, y=0.5cm', x='20', y='0.5cm')\n for i in range(2):\n text('text x=-0, y=+0.5cm', x='-0', y='+0.5cm')\n\n text('text x=25, y=0.3', x='25', y=0.3)\n for i in range(2):\n text('text x=+0, y=+0.5cm', x='+0', y='+0.5cm')\n\n text('text x=25, y=0.5', x='25', y=0.5)\n text('text x=+10, y=+0', x='+10', y='+0')\n text('text x=+10, y=-0', x='+10', y='-0')\n\ndisplay_matplotlib(gcs())", "Coordinate as dictionary\nCoordinate could also be given as dictionary. The dictionary keys are the\nfollowing:\n\nunit: ('px', 'pt', 'cm', 'width', 'height'), the width of the shift value.\nshift: float value, the amount of shifting.\nreference: ('slide' or 'relative') 'relative' is used to make relative\n positioning.\nanchor: (top, bottom, left, right, middle) define the anchor position on the\n module bounding-box.\nalign: (left, right or center for x) and (top, bottom or center for y) is used\n to set the origin of slide axes.", "with slide():\n draw_axes()\n\n t = text('centered text',\n x={'anchor':'middle', 'shift':0.5},\n y={'anchor':'middle', 'shift':0.5, 'unit':'height'})\n bounding_box(t)\n\n t = text('bottom right shift',\n x={'anchor':'right', 'shift':30, 'align':'right'},\n y={'anchor':'bottom', 'shift':30, 'align':'bottom'})\n bounding_box(t)\n\ndisplay_matplotlib(gcs())" ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
Ric01/Uso-Google-Finance-Python3
Leer Precio Acciones Python 3.ipynb
gpl-3.0
[ "Tutorial: Uso de la libreria de Google Finance en Python para leer datos de acciones\nPaso 1: Importar las librerias necesarias", "from googlefinance import getQuotes \nimport time \nimport json \nimport os \nimport sys \nfrom IPython.display import clear_output", "Paso 2: Definir una funcion que imprime en formato JSON la informacion", "def buscar_accion(nombre_accion):\n clear_output()\n os.system('cls' if os.name=='nt' else 'clear') \n print(json.dumps(getQuotes(nombre_accion), indent=2)) ", "Paso 3: Buscar informacion de la accion de Google (GOOG)", "buscar_accion(\"AAPL\")" ]
[ "markdown", "code", "markdown", "code", "markdown", "code" ]
crystalzhaizhai/cs207_yi_zhai
lectures/L6/L6.ipynb
mit
[ "Lecture 6: Wednesday, September 20th 2017\nTowards Intermediate Python\nTopics:\n* Recap: How does this stuff really work?\n* Nested environments\n* Closures\n* Decorators\nNested Environments\nYou can nest the definitions of functions. When you do this, inner function definitions are not even evaluated until the outer function is called. These inner functions have access to the name bindings in the scope of the outer function. So below, in make_statement, both s and key will be defined. And in key, you have access to s. This sharing is called lexical scoping.", "def make_statement(s):\n def key(k):\n c=(s, k)\n return c\n return key\nk = make_statement('name: ')\n#we have captured the first element of the tuple as a \"kind of state\"\nname = k('Albert')\nprint(name)\nname2 = k('Emmy')\nprint(name2)", "We can make this a little bit more explicit. In the line k = make_statement('name: '), make_statement() has returned the inner function key and the inner function has been given the name k. Now, when we call k() the inner function returns the desired tuple.\nThe reason this works is that in addition to the environment in which a user-defined function is running, that function has access to a second environment: the environment in which the function was defined. Here, key has access to the environment of make_statement. In this sense the environment of make_statement is the parent of the environment of key.\nThis enables two things:\n\nNames inside the inner functions (or the outer ones for that matter) do not interfere with names in the global scope. Inside the outer and inner functions, the \"most lexically local\" names are the ones that matter\nAn inner function can access the environment of its enclosing (outer) function\n\nClosures\nSince the inner functions can \"capture\" information from an outer function's environment, the inner function is sometimes called a closure.\nNotice that s, once captured by the inner function, cannot now be changed: we have lost direct access to its manipulation. This process is called encapsulation, and is a cornerstone of object oriented programming.\n\nAugmenting Functions\nSince functions are first class, we might want to augment them to put out, for example, call information, time information, etc.\nExample 1\nIn the following, timer() accepts a function f as it's argument and returns an inner function called inner.\ninner accepts a variable argument list and wraps the function f with timers to time how long it takes f to execute.\nNote that f is passed a variable argument list (try to recall what Python does with that).", "# First we write our timer function\nimport time\ndef timer(f):\n def inner(*args):\n t0 = time.time()\n output = f(*args)\n elapsed = time.time() - t0\n print(\"Time Elapsed\", elapsed)\n return output\n return inner\n\n# Now we prepare to use our timer function\n\nimport numpy as np # Import numpy\n\n# User-defined functions\ndef allocate1(x, N):\n return [x]*N\n\ndef allocate2(x, N):\n ones = np.ones(N)\n return np.multiply(x, ones)\n\nx = 1.0\n\n# Time allocation with lists\nmy_alloc = timer(allocate1)\nl1 = my_alloc(x, 10000000)\n\n# Time allocation with numpy array\nmy_alloc2 = timer(allocate2)\nl2 = my_alloc2(x, 10000000)", "That seemed pretty useful. We might want to do such things a lot (and not just for timing purposes).\n\nLet's recap the pattern that was so useful.\nBasically, we wrote a nice function to \"decorate\" our function of interest. In this case, we wrote a timer function whose closure wrapped up any function we gave to it in a timing construct. In order to invoke our nice decorations, we had to pass a function to the timer function and get a new, decorated function back. Then we called the decorated function.\nSo the idea is as follows. We have a decorator (here called timer) that sweetens up some function (call it target). \npython\ndef target():\n pass\ndecorated_target = decorator(target)\nBut Python provides what's called syntactic sugar. Instead of writing all of that, we can just write:\npython\n@decorator\ndef target():\n pass\nNow target is decorated. Let's see how this all works.", "@timer\ndef allocate1(x, N):\n return [x]*N\n\nx = 2.0\nallocate1(x, 10000000)", "Example 2\nWe'll just create a demo decorator here.", "def decorate(f):\n print(\"Let's decorate!\")\n d = 1.0\n def wrapper(*args):\n print(\"Entering function.\")\n output = f(*args)\n print(\"Exited function.\")\n if output > d :\n print(\"My d is bigger than yours.\")\n elif output < d:\n print(\"Your d is bigger than mine.\")\n else:\n print(\"Our ds are the same size.\")\n return wrapper\n\n@decorate\ndef useful_f(a, b, c):\n d1 = np.sqrt(a * a + b * b + c * c)\n return d1\n\nd = useful_f(1.0, 2.0, 3.0)", "A key thing to remmember that a decorator is run RIGHT AFTER the function is defined, not when the function is called. Thus if you had the above decorator code in a module, it would print \"Let's decorate!\" when importing the module. Notice that the concept of a closure is used: the state d=1 is captured into the decorated function above." ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
GoogleCloudPlatform/vertex-ai-samples
notebooks/community/sdk/sdk_automl_image_classification_batch.ipynb
apache-2.0
[ "# Copyright 2021 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.", "Vertex SDK: AutoML training image classification model for batch prediction\n<table align=\"left\">\n <td>\n <a href=\"https://colab.research.google.com/github/GoogleCloudPlatform/vertex-ai-samples/tree/master/notebooks/official/automl/sdk_automl_image_classification_batch.ipynb\">\n <img src=\"https://cloud.google.com/ml-engine/images/colab-logo-32px.png\" alt=\"Colab logo\"> Run in Colab\n </a>\n </td>\n <td>\n <a href=\"https://github.com/GoogleCloudPlatform/vertex-ai-samples/tree/master/notebooks/official/automl/sdk_automl_image_classification_batch.ipynb\">\n <img src=\"https://cloud.google.com/ml-engine/images/github-logo-32px.png\" alt=\"GitHub logo\">\n View on GitHub\n </a>\n </td>\n <td>\n <a href=\"https://console.cloud.google.com/ai/platform/notebooks/deploy-notebook?download_url=https://github.com/GoogleCloudPlatform/vertex-ai-samples/tree/master/notebooks/official/automl/sdk_automl_image_classification_batch.ipynb\">\n Open in Google Cloud Notebooks\n </a>\n </td>\n</table>\n<br/><br/><br/>\nOverview\nThis tutorial demonstrates how to use the Vertex SDK to create image classification models and do batch prediction using a Google Cloud AutoML model.\nDataset\nThe dataset used for this tutorial is the Flowers dataset from TensorFlow Datasets. The version of the dataset you will use in this tutorial is stored in a public Cloud Storage bucket. The trained model predicts the type of flower an image is from a class of five flowers: daisy, dandelion, rose, sunflower, or tulip.\nObjective\nIn this tutorial, you create an AutoML image classification model from a Python script, and then do a batch prediction using the Vertex SDK. You can alternatively create and deploy models using the gcloud command-line tool or online using the Cloud Console.\nThe steps performed include:\n\nCreate a Vertex Dataset resource.\nTrain the model.\nView the model evaluation.\nMake a batch prediction.\n\nThere is one key difference between using batch prediction and using online prediction:\n\n\nPrediction Service: Does an on-demand prediction for the entire set of instances (i.e., one or more data items) and returns the results in real-time.\n\n\nBatch Prediction Service: Does a queued (batch) prediction for the entire set of instances in the background and stores the results in a Cloud Storage bucket when ready.\n\n\nCosts\nThis tutorial uses billable components of Google Cloud:\n\nVertex AI\nCloud Storage\n\nLearn about Vertex AI\npricing and Cloud Storage\npricing, and use the Pricing\nCalculator\nto generate a cost estimate based on your projected usage.\nSet up your local development environment\nIf you are using Colab or Google Cloud Notebooks, your environment already meets all the requirements to run this notebook. You can skip this step.\nOtherwise, make sure your environment meets this notebook's requirements. You need the following:\n\nThe Cloud Storage SDK\nGit\nPython 3\nvirtualenv\nJupyter notebook running in a virtual environment with Python 3\n\nThe Cloud Storage guide to Setting up a Python development environment and the Jupyter installation guide provide detailed instructions for meeting these requirements. The following steps provide a condensed set of instructions:\n\n\nInstall and initialize the SDK.\n\n\nInstall Python 3.\n\n\nInstall virtualenv and create a virtual environment that uses Python 3. Activate the virtual environment.\n\n\nTo install Jupyter, run pip3 install jupyter on the command-line in a terminal shell.\n\n\nTo launch Jupyter, run jupyter notebook on the command-line in a terminal shell.\n\n\nOpen this notebook in the Jupyter Notebook Dashboard.\n\n\nInstallation\nInstall the latest version of Vertex SDK for Python.", "import os\n\n# Google Cloud Notebook\nif os.path.exists(\"/opt/deeplearning/metadata/env_version\"):\n USER_FLAG = \"--user\"\nelse:\n USER_FLAG = \"\"\n\n! pip3 install --upgrade google-cloud-aiplatform $USER_FLAG", "Install the latest GA version of google-cloud-storage library as well.", "! pip3 install -U google-cloud-storage $USER_FLAG\n\nif os.environ[\"IS_TESTING\"]:\n ! pip3 install --upgrade tensorflow $USER_FLAG", "Restart the kernel\nOnce you've installed the additional packages, you need to restart the notebook kernel so it can find the packages.", "import os\n\nif not os.getenv(\"IS_TESTING\"):\n # Automatically restart kernel after installs\n import IPython\n\n app = IPython.Application.instance()\n app.kernel.do_shutdown(True)", "Before you begin\nGPU runtime\nThis tutorial does not require a GPU runtime.\nSet up your Google Cloud project\nThe following steps are required, regardless of your notebook environment.\n\n\nSelect or create a Google Cloud project. When you first create an account, you get a $300 free credit towards your compute/storage costs.\n\n\nMake sure that billing is enabled for your project.\n\n\nEnable the following APIs: Vertex AI APIs, Compute Engine APIs, and Cloud Storage.\n\n\nIf you are running this notebook locally, you will need to install the Cloud SDK.\n\n\nEnter your project ID in the cell below. Then run the cell to make sure the\nCloud SDK uses the right project for all the commands in this notebook.\n\n\nNote: Jupyter runs lines prefixed with ! as shell commands, and it interpolates Python variables prefixed with $.", "PROJECT_ID = \"[your-project-id]\" # @param {type:\"string\"}\n\nif PROJECT_ID == \"\" or PROJECT_ID is None or PROJECT_ID == \"[your-project-id]\":\n # Get your GCP project id from gcloud\n shell_output = ! gcloud config list --format 'value(core.project)' 2>/dev/null\n PROJECT_ID = shell_output[0]\n print(\"Project ID:\", PROJECT_ID)\n\n! gcloud config set project $PROJECT_ID", "Region\nYou can also change the REGION variable, which is used for operations\nthroughout the rest of this notebook. Below are regions supported for Vertex AI. We recommend that you choose the region closest to you.\n\nAmericas: us-central1\nEurope: europe-west4\nAsia Pacific: asia-east1\n\nYou may not use a multi-regional bucket for training with Vertex AI. Not all regions provide support for all Vertex AI services.\nLearn more about Vertex AI regions", "REGION = \"us-central1\" # @param {type: \"string\"}", "Timestamp\nIf you are in a live tutorial session, you might be using a shared test account or project. To avoid name collisions between users on resources created, you create a timestamp for each instance session, and append the timestamp onto the name of resources you create in this tutorial.", "from datetime import datetime\n\nTIMESTAMP = datetime.now().strftime(\"%Y%m%d%H%M%S\")", "Authenticate your Google Cloud account\nIf you are using Google Cloud Notebooks, your environment is already authenticated. Skip this step.\nIf you are using Colab, run the cell below and follow the instructions when prompted to authenticate your account via oAuth.\nOtherwise, follow these steps:\nIn the Cloud Console, go to the Create service account key page.\nClick Create service account.\nIn the Service account name field, enter a name, and click Create.\nIn the Grant this service account access to project section, click the Role drop-down list. Type \"Vertex\" into the filter box, and select Vertex Administrator. Type \"Storage Object Admin\" into the filter box, and select Storage Object Admin.\nClick Create. A JSON file that contains your key downloads to your local environment.\nEnter the path to your service account key as the GOOGLE_APPLICATION_CREDENTIALS variable in the cell below and run the cell.", "# If you are running this notebook in Colab, run this cell and follow the\n# instructions to authenticate your GCP account. This provides access to your\n# Cloud Storage bucket and lets you submit training jobs and prediction\n# requests.\n\nimport os\nimport sys\n\n# If on Google Cloud Notebook, then don't execute this code\nif not os.path.exists(\"/opt/deeplearning/metadata/env_version\"):\n if \"google.colab\" in sys.modules:\n from google.colab import auth as google_auth\n\n google_auth.authenticate_user()\n\n # If you are running this notebook locally, replace the string below with the\n # path to your service account key and run this cell to authenticate your GCP\n # account.\n elif not os.getenv(\"IS_TESTING\"):\n %env GOOGLE_APPLICATION_CREDENTIALS ''", "Create a Cloud Storage bucket\nThe following steps are required, regardless of your notebook environment.\nWhen you initialize the Vertex SDK for Python, you specify a Cloud Storage staging bucket. The staging bucket is where all the data associated with your dataset and model resources are retained across sessions.\nSet the name of your Cloud Storage bucket below. Bucket names must be globally unique across all Google Cloud projects, including those outside of your organization.", "BUCKET_NAME = \"gs://[your-bucket-name]\" # @param {type:\"string\"}\n\nif BUCKET_NAME == \"\" or BUCKET_NAME is None or BUCKET_NAME == \"gs://[your-bucket-name]\":\n BUCKET_NAME = \"gs://\" + PROJECT_ID + \"aip-\" + TIMESTAMP", "Only if your bucket doesn't already exist: Run the following cell to create your Cloud Storage bucket.", "! gsutil mb -l $REGION $BUCKET_NAME", "Finally, validate access to your Cloud Storage bucket by examining its contents:", "! gsutil ls -al $BUCKET_NAME", "Set up variables\nNext, set up some variables used throughout the tutorial.\nImport libraries and define constants", "import google.cloud.aiplatform as aip", "Initialize Vertex SDK for Python\nInitialize the Vertex SDK for Python for your project and corresponding bucket.", "aip.init(project=PROJECT_ID, staging_bucket=BUCKET_NAME)", "Tutorial\nNow you are ready to start creating your own AutoML image classification model.\nLocation of Cloud Storage training data.\nNow set the variable IMPORT_FILE to the location of the CSV index file in Cloud Storage.", "IMPORT_FILE = (\n \"gs://cloud-samples-data/vision/automl_classification/flowers/all_data_v2.csv\"\n)", "Quick peek at your data\nThis tutorial uses a version of the Flowers dataset that is stored in a public Cloud Storage bucket, using a CSV index file.\nStart by doing a quick peek at the data. You count the number of examples by counting the number of rows in the CSV index file (wc -l) and then peek at the first few rows.", "if \"IMPORT_FILES\" in globals():\n FILE = IMPORT_FILES[0]\nelse:\n FILE = IMPORT_FILE\n\ncount = ! gsutil cat $FILE | wc -l\nprint(\"Number of Examples\", int(count[0]))\n\nprint(\"First 10 rows\")\n! gsutil cat $FILE | head", "Create the Dataset\nNext, create the Dataset resource using the create method for the ImageDataset class, which takes the following parameters:\n\ndisplay_name: The human readable name for the Dataset resource.\ngcs_source: A list of one or more dataset index files to import the data items into the Dataset resource.\nimport_schema_uri: The data labeling schema for the data items.\n\nThis operation may take several minutes.", "dataset = aip.ImageDataset.create(\n display_name=\"Flowers\" + \"_\" + TIMESTAMP,\n gcs_source=[IMPORT_FILE],\n import_schema_uri=aip.schema.dataset.ioformat.image.single_label_classification,\n)\n\nprint(dataset.resource_name)", "Create and run training pipeline\nTo train an AutoML model, you perform two steps: 1) create a training pipeline, and 2) run the pipeline.\nCreate training pipeline\nAn AutoML training pipeline is created with the AutoMLImageTrainingJob class, with the following parameters:\n\ndisplay_name: The human readable name for the TrainingJob resource.\nprediction_type: The type task to train the model for.\nclassification: An image classification model.\nobject_detection: An image object detection model.\nmulti_label: If a classification task, whether single (False) or multi-labeled (True).\nmodel_type: The type of model for deployment.\nCLOUD: Deployment on Google Cloud\nCLOUD_HIGH_ACCURACY_1: Optimized for accuracy over latency for deployment on Google Cloud.\nCLOUD_LOW_LATENCY_: Optimized for latency over accuracy for deployment on Google Cloud.\nMOBILE_TF_VERSATILE_1: Deployment on an edge device.\nMOBILE_TF_HIGH_ACCURACY_1:Optimized for accuracy over latency for deployment on an edge device.\nMOBILE_TF_LOW_LATENCY_1: Optimized for latency over accuracy for deployment on an edge device.\nbase_model: (optional) Transfer learning from existing Model resource -- supported for image classification only.\n\nThe instantiated object is the DAG (directed acyclic graph) for the training job.", "dag = aip.AutoMLImageTrainingJob(\n display_name=\"flowers_\" + TIMESTAMP,\n prediction_type=\"classification\",\n multi_label=False,\n model_type=\"CLOUD\",\n base_model=None,\n)\n\nprint(dag)", "Run the training pipeline\nNext, you run the DAG to start the training job by invoking the method run, with the following parameters:\n\ndataset: The Dataset resource to train the model.\nmodel_display_name: The human readable name for the trained model.\ntraining_fraction_split: The percentage of the dataset to use for training.\ntest_fraction_split: The percentage of the dataset to use for test (holdout data).\nvalidation_fraction_split: The percentage of the dataset to use for validation.\nbudget_milli_node_hours: (optional) Maximum training time specified in unit of millihours (1000 = hour).\ndisable_early_stopping: If True, training maybe completed before using the entire budget if the service believes it cannot further improve on the model objective measurements.\n\nThe run method when completed returns the Model resource.\nThe execution of the training pipeline will take upto 20 minutes.", "model = dag.run(\n dataset=dataset,\n model_display_name=\"flowers_\" + TIMESTAMP,\n training_fraction_split=0.8,\n validation_fraction_split=0.1,\n test_fraction_split=0.1,\n budget_milli_node_hours=8000,\n disable_early_stopping=False,\n)", "Review model evaluation scores\nAfter your model has finished training, you can review the evaluation scores for it.\nFirst, you need to get a reference to the new model. As with datasets, you can either use the reference to the model variable you created when you deployed the model or you can list all of the models in your project.", "# Get model resource ID\nmodels = aip.Model.list(filter=\"display_name=flowers_\" + TIMESTAMP)\n\n# Get a reference to the Model Service client\nclient_options = {\"api_endpoint\": f\"{REGION}-aiplatform.googleapis.com\"}\nmodel_service_client = aip.gapic.ModelServiceClient(client_options=client_options)\n\nmodel_evaluations = model_service_client.list_model_evaluations(\n parent=models[0].resource_name\n)\nmodel_evaluation = list(model_evaluations)[0]\nprint(model_evaluation)", "Send a batch prediction request\nSend a batch prediction to your deployed model.\nGet test item(s)\nNow do a batch prediction to your Vertex model. You will use arbitrary examples out of the dataset as a test items. Don't be concerned that the examples were likely used in training the model -- we just want to demonstrate how to make a prediction.", "test_items = !gsutil cat $IMPORT_FILE | head -n2\nif len(str(test_items[0]).split(\",\")) == 3:\n _, test_item_1, test_label_1 = str(test_items[0]).split(\",\")\n _, test_item_2, test_label_2 = str(test_items[1]).split(\",\")\nelse:\n test_item_1, test_label_1 = str(test_items[0]).split(\",\")\n test_item_2, test_label_2 = str(test_items[1]).split(\",\")\n\nprint(test_item_1, test_label_1)\nprint(test_item_2, test_label_2)", "Copy test item(s)\nFor the batch prediction, copy the test items over to your Cloud Storage bucket.", "file_1 = test_item_1.split(\"/\")[-1]\nfile_2 = test_item_2.split(\"/\")[-1]\n\n! gsutil cp $test_item_1 $BUCKET_NAME/$file_1\n! gsutil cp $test_item_2 $BUCKET_NAME/$file_2\n\ntest_item_1 = BUCKET_NAME + \"/\" + file_1\ntest_item_2 = BUCKET_NAME + \"/\" + file_2", "Make the batch input file\nNow make a batch input file, which you will store in your local Cloud Storage bucket. The batch input file can be either CSV or JSONL. You will use JSONL in this tutorial. For JSONL file, you make one dictionary entry per line for each data item (instance). The dictionary contains the key/value pairs:\n\ncontent: The Cloud Storage path to the image.\nmime_type: The content type. In our example, it is a jpeg file.\n\nFor example:\n {'content': '[your-bucket]/file1.jpg', 'mime_type': 'jpeg'}", "import json\n\nimport tensorflow as tf\n\ngcs_input_uri = BUCKET_NAME + \"/test.jsonl\"\nwith tf.io.gfile.GFile(gcs_input_uri, \"w\") as f:\n data = {\"content\": test_item_1, \"mime_type\": \"image/jpeg\"}\n f.write(json.dumps(data) + \"\\n\")\n data = {\"content\": test_item_2, \"mime_type\": \"image/jpeg\"}\n f.write(json.dumps(data) + \"\\n\")\n\nprint(gcs_input_uri)\n! gsutil cat $gcs_input_uri", "Make the batch prediction request\nNow that your Model resource is trained, you can make a batch prediction by invoking the batch_predict() method, with the following parameters:\n\njob_display_name: The human readable name for the batch prediction job.\ngcs_source: A list of one or more batch request input files.\ngcs_destination_prefix: The Cloud Storage location for storing the batch prediction resuls.\nsync: If set to True, the call will block while waiting for the asynchronous batch job to complete.", "batch_predict_job = model.batch_predict(\n job_display_name=\"flowers_\" + TIMESTAMP,\n gcs_source=gcs_input_uri,\n gcs_destination_prefix=BUCKET_NAME,\n sync=False,\n)\n\nprint(batch_predict_job)", "Wait for completion of batch prediction job\nNext, wait for the batch job to complete. Alternatively, one can set the parameter sync to True in the batch_predict() method to block until the batch prediction job is completed.", "batch_predict_job.wait()", "Get the predictions\nNext, get the results from the completed batch prediction job.\nThe results are written to the Cloud Storage output bucket you specified in the batch prediction request. You call the method iter_outputs() to get a list of each Cloud Storage file generated with the results. Each file contains one or more prediction requests in a JSON format:\n\ncontent: The prediction request.\nprediction: The prediction response.\nids: The internal assigned unique identifiers for each prediction request.\ndisplayNames: The class names for each class label.\nconfidences: The predicted confidence, between 0 and 1, per class label.", "import json\n\nimport tensorflow as tf\n\nbp_iter_outputs = batch_predict_job.iter_outputs()\n\nprediction_results = list()\nfor blob in bp_iter_outputs:\n if blob.name.split(\"/\")[-1].startswith(\"prediction\"):\n prediction_results.append(blob.name)\n\ntags = list()\nfor prediction_result in prediction_results:\n gfile_name = f\"gs://{bp_iter_outputs.bucket.name}/{prediction_result}\"\n with tf.io.gfile.GFile(name=gfile_name, mode=\"r\") as gfile:\n for line in gfile.readlines():\n line = json.loads(line)\n print(line)\n break", "Cleaning up\nTo clean up all Google Cloud resources used in this project, you can delete the Google Cloud\nproject you used for the tutorial.\nOtherwise, you can delete the individual resources you created in this tutorial:\n\nDataset\nPipeline\nModel\nEndpoint\nAutoML Training Job\nBatch Job\nCustom Job\nHyperparameter Tuning Job\nCloud Storage Bucket", "delete_all = True\n\nif delete_all:\n # Delete the dataset using the Vertex dataset object\n try:\n if \"dataset\" in globals():\n dataset.delete()\n except Exception as e:\n print(e)\n\n # Delete the model using the Vertex model object\n try:\n if \"model\" in globals():\n model.delete()\n except Exception as e:\n print(e)\n\n # Delete the endpoint using the Vertex endpoint object\n try:\n if \"endpoint\" in globals():\n endpoint.delete()\n except Exception as e:\n print(e)\n\n # Delete the AutoML or Pipeline trainig job\n try:\n if \"dag\" in globals():\n dag.delete()\n except Exception as e:\n print(e)\n\n # Delete the custom trainig job\n try:\n if \"job\" in globals():\n job.delete()\n except Exception as e:\n print(e)\n\n # Delete the batch prediction job using the Vertex batch prediction object\n try:\n if \"batch_predict_job\" in globals():\n batch_predict_job.delete()\n except Exception as e:\n print(e)\n\n # Delete the hyperparameter tuning job using the Vertex hyperparameter tuning object\n try:\n if \"hpt_job\" in globals():\n hpt_job.delete()\n except Exception as e:\n print(e)\n\n if \"BUCKET_NAME\" in globals():\n ! gsutil rm -r $BUCKET_NAME" ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
omoju/Fundamentals
CS/Part_1_Complexity_RunTimeAnalysis.ipynb
gpl-3.0
[ "from IPython.display import display\nfrom IPython.display import HTML\nimport IPython.core.display as di # Example: di.display_html('<h3>%s:</h3>' % str, raw=True)\n\n# This line will hide code by default when the notebook is exported as HTML\ndi.display_html('<script>jQuery(function() {if (jQuery(\"body.notebook_app\").length == 0) { jQuery(\".input_area\").toggle(); jQuery(\".prompt\").toggle();}});</script>', raw=True)\n\n# This line will add a button to toggle visibility of code blocks, for use with the HTML export version\ndi.display_html('''<button onclick=\"jQuery('.input_area').toggle(); jQuery('.prompt').toggle();\">Toggle code</button>''', raw=True)\n", "Runtime Analysis\nusing Finding the nth Fibonacci numbers as a computational object to think with", "%pylab inline\n\n# Import libraries\nfrom __future__ import absolute_import, division, print_function\n\nimport math\nfrom time import time\nimport matplotlib.pyplot as pyplt", "Fibonacci\nExcerpt from Algorithms by S. Dasgupta, C.H. Papadimitriou, and U.V. Vazirani \nFibonacci is most widely known for his famous sequence of numbers\n$0,1,1,2,3,5,8,13,21,34,...,$\neach the sum of its two immediate predecessors. More formally, the Fibonacci numbers $F_n$ are generated by the simple rule\n$F_n = \\begin{cases} \nF_n−1 + F_n−2, & \\mbox{if } n \\mbox{ is} > 1 \\ \n1, & \\mbox{if } n \\mbox{ is} = 1 \\ \n0, & \\mbox{if } n \\mbox{ is} = 0\n\\end{cases}$\nNo other sequence of numbers has been studied as extensively, or applied to more fields: biology, demography, art, architecture, music, to name just a few. And, together with the powers of 2, it is computer science’s favorite sequence.\nTree Recursion\nA very simple way to calculate the nth Fibonacci number is to use a recursive algorithm. Here is a recursive algorithm for computing the nth Fibonacci number.\npython\ndef fib(n):\n if n == 0 or n == 1:\n return n\n else:\n return fib(n-2) + fib(n-1)\nThis algorithm in particular is done using tree recursion.", "from IPython.display import YouTubeVideo\nYouTubeVideo('ls0GsJyLVLw')\n\ndef fib(n):\n if n == 0 or n == 1:\n return n\n else:\n return fib(n-2) + fib(n-1)\n \n\nfib(5)", "Whenever we have an algorithm, there are three questions we always ask about it:\n\nIs it correct?\nHow much time does it take, as a function of n? \nAnd can we do better?\n\n1. Correctness\nFor this question, the answer is yes because it is almost a line by line implementation of the definition of the Fibonacci sequence.\n2. Time complexity as a function of n\nLet $T(n)$ be the number of computer steps needed to compute $fib(n)$; what can we say about this function? For starters, if $n$ is less than 2, the procedure halts almost immediately, after just a couple of steps. Therefore,\n$$ T(n)≤2 \\, \\mbox{for} \\, n≤1. $$\nFor larger values of $n$, there are two recursive invocations of $fib$, taking time $T (n − 1)$ and $T(n−2)$, respectively, plus three computer steps (checks on the value of $n$ and a final addition).\nTherefore,\n$$ T(n) = T(n−1) + T(n−2)+3\\, \\mbox{for} \\,n>1. $$\nCompare this to the recurrence relation for $F_n$, we immediately see that $T(n) ≥ F_n$.\nThis is very bad news: the running time of the algorithm grows as fast as the Fibonacci numbers! $T(n)$ is exponential in $n$, which implies that the algorithm is impractically slow except for very small values of $n$.\nLet’s be a little more concrete about just how bad exponential time is. To compute $F_{200}$,\nthe $fib$ algorithm executes $T (200) ≥ F_{200} ≥ 2^{138}$ elementary computer steps. How long this actually takes depends, of course, on the computer used. At this time, the fastest computer in the world is the NEC Earth Simulator, which clocks 40 trillion steps per second. Even on this machine, $fib(200)$ would take at least $2^{92}$ seconds. This means that, if we start the computation today, it would still be going long after the sun turns into a red giant star.", "# This function provides a way to track function calls\n\ndef count(f):\n def counted(n):\n counted.call_count += 1\n return f(n)\n counted.call_count = 0\n return counted\n\nfib = count(fib)\n\n\nt0 = time()\n\nn = 5\nfib(n)\n\nprint ('This recursive implementation of fib(', n, ') took', round(time() - t0, 4), 'secs')\nprint ('And {0} calls to the function'.format(fib.call_count))\n\nt0 = time()\n\nn = 30\nfib(n)\n\nprint ('This recursive implementation of fib(', n, ') took', round(time() - t0, 4), 'secs')\nprint ('And {0} calls to the function'.format(fib.call_count))", "3. Can we do better?\nA polynomial algorithm for $fib$\nLet’s try to understand why $fib$ is so slow. fib.call_count shows the count of recursive invocations triggered by a single call to $fib(5)$, which is 15. If you sketched it out, you will notice that many computations are repeated!\nA more sensible scheme would store the intermediate results—the values $F_0 , F_1 , . . . , F_{n−1}$ as soon as they become known. \nLets do exactly that through memoization. Note that you can also do this by writing a polynomial algorithm.\nMemoization\nTree-recursive computational processes can often be made more efficient through memoization, a powerful technique for increasing the efficiency of recursive functions that repeat computation. A memoized function will store the return value for any arguments it has previously received. A second call to fib(30) would not re-compute the return value recursively, but instead return the existing one that has already been constructed.\nMemoization can be expressed naturally as a higher-order function, which can also be used as a decorator. The definition below creates a cache of previously computed results, indexed by the arguments from which they were computed. The use of a dictionary requires that the argument to the memoized function be immutable.", "def memo(f):\n cache = {}\n def memoized(n):\n if n not in cache:\n cache[n] = f(n) # Make a mapping between the key \"n\" and the return value of f(n)\n return cache[n]\n return memoized\n\nfib = memo(fib)\n\nt0 = time()\n\nn = 400\nfib(n)\n\nprint ('This memoized implementation of fib(', n, ') took', round(time() - t0, 4), 'secs')\n\nt0 = time()\n\nn = 300\nfib(n)\n\nprint ('This memoized implementation of fib(', n, ') took', round(time() - t0, 4), 'secs')\n\n# Here is the polynomial algorithm for fibonacci sequence\ndef fib2(n):\n if n == 0:\n return 0\n \n f = [0] * (n+1) # create an array f[0 . . . n]\n f[0], f[1] = 0, 1\n \n for i in range(2, n+1):\n f[i] = f[i-1] + f[i-2]\n \n return f[n] \n\nfib2 = count(fib2)\n\nt0 = time()\n\nn = 3000\nfib2(n)\n\nprint ('This polynomial implementation of fib2(', n, ') took', round(time() - t0, 4), 'secs')\n\nfib2.call_count", "How long does $fib2$ take? \n- The inner loop consists of a single computer step and is executed $n − 1$ times. \n- Therefore the number of computer steps used by $fib2$ is linear in $n$. \nFrom exponential we are down to polynomial, a huge breakthrough in running time. It is now perfectly reasonable to compute $F_{200}$ or even $F_{200,000}$", "fib2(200)", "Instead of reporting that an algorithm takes, say, $ 5n^3 + 4n + 3$ steps on an input of size $n$, it is much simpler to leave out lower-order terms such as $4n$ and $3$ (which become insignificant as $n$ grows), and even the detail of the coefficient $5$ in the leading term (computers will be five times faster in a few years anyway), and just say that the algorithm takes time $O(n^3)$ (pronounced “big oh of $n^3$”).\nIt is time to define this notation precisely. In what follows, think of $f(n)$ and $g(n)$ as the running times of two algorithms on inputs of size $n$.\n\nLet $f(n)$ and $g(n)$ be functions from positive integers to positive reals. We say $f = O(g)$ (which means that “$f$ grows no faster than $g$”) if there is a constant $c > 0$ such that \n${f(n) ≤ c · g(n)}$.\n\nSaying $f = O(g)$ is a very loose analog of “$f ≤ g$.” It differs from the usual notion of ≤ because of the constant c, so that for instance $10n = O(n)$. This constant also allows us to disregard what happens for small values of $n$. \nExample:\nFor example, suppose we are choosing between two algorithms for a particular computational task. One takes $f_1(n) = n^2$ steps, while the other takes $f_2(n) = 2n + 20$ steps. Which is better? Well, this depends on the value of $n$. For $n ≤ 5$, $f_1(n)$ is smaller; thereafter, $f_2$ is the clear winner. In this case, $f_2$ scales much better as $n$ grows, and therefore it is superior.", "t = arange(0, 15, 1)\nf1 = t * t \nf2 = 2*t + 20\n\npyplt.title('Exponential time vs Linear time')\nplot(t, f1, t, f2)\npyplt.annotate('$n^2$', xy=(8, 1), xytext=(10, 108))\npyplt.annotate('$2n + 20$', xy=(5, 1), xytext=(10, 45))\npyplt.xlabel('n')\npyplt.ylabel('Run time')\npyplt.grid(True)\n\n\n", "Is there a faster way to compute the nth Fibonacci number than by fib2? One idea\ninvolves matrices.\nWe start by writing the equations $F_1$ = $F_1$ and $F_2$ = $F_0$ + $F_1$ in matrix notation:\n$$\n\\begin{bmatrix} F_1\\F_2 \\end{bmatrix} = \\begin{bmatrix} 0&1\\ 1&1 \\end{bmatrix} \\cdot \\begin{bmatrix} F_0\\F_1 \\end{bmatrix}\n$$\nsimilarly,\n$$\n \\begin{bmatrix} F_2\\F_3 \\end{bmatrix} = \\begin{bmatrix} 0&1\\ 1&1 \\end{bmatrix} \\cdot \\begin{bmatrix} F_1\\F_2 \\end{bmatrix} = \\begin{bmatrix} 0&1\\ 1&1 \\end{bmatrix}^2 \\cdot \\begin{bmatrix} F_0\\F_1 \\end{bmatrix}\n$$\nand in general \n$$\n\\begin{bmatrix} F_n\\F_{n+1} \\end{bmatrix} = \\begin{bmatrix} 0&1\\ 1&1 \\end{bmatrix}^n \\cdot \\begin{bmatrix} F_0\\F_1 \\end{bmatrix}\n$$\nSo, in order to compute $F_n$, it suffices to raise this 2 × 2 matrix, call it $X$, to the nth power.\nThus the number of arithmetic operations needed by our matrix-based algorithm, call it fib3, is\njust $O(log n)$, as compared to $O(n)$ for fib2. Have we broken another exponential barrier? \nThe catch is that our new algorithm involves multiplication, not just addition; and multiplications of large numbers are slower than additions. We have already seen that, when the complexity of arithmetic operations is taken into account, the running time of fib2 becomes $O(n^2)$.\nIn conclusion, whether fib3 is faster than fib2 depends on whether we can multiply n-bit integers faster than $O(n^2)$." ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
YuriyGuts/kaggle-quora-question-pairs
notebooks/unused/feature-oofp-nn-lstm-with-activations.ipynb
mit
[ "Feature: Out-Of-Fold Predictions and Feature Layer Activations from an LSTM\nIn addition to the output of the final network layer, the model will also output the activations of the intermediate feature layer.\nTo achieve this, we'll create a multi-output network (target output + activations output), and supply dummy ground truth and a dummy loss function to the second output.\nImports\nThis utility package imports numpy, pandas, matplotlib and a helper kg module into the root namespace.", "from pygoose import *\n\nimport gc\n\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.model_selection import StratifiedKFold\nfrom sklearn.metrics import *\n\nfrom keras import backend as K\nfrom keras.models import Model, Sequential\nfrom keras.layers import *\nfrom keras.callbacks import EarlyStopping, ModelCheckpoint", "Config\nAutomatically discover the paths to various data folders and compose the project structure.", "project = kg.Project.discover()", "Identifier for storing these features on disk and referring to them later.", "feature_list_id = 'oofp_nn_lstm_with_activations'", "Make subsequent NN runs reproducible.", "RANDOM_SEED = 42\n\nnp.random.seed(RANDOM_SEED)", "Read data\nWord embedding lookup matrix.", "embedding_matrix = kg.io.load(project.aux_dir + 'fasttext_vocab_embedding_matrix.pickle')", "Padded sequences of word indices for every question.", "X_train_q1 = kg.io.load(project.preprocessed_data_dir + 'sequences_q1_fasttext_train.pickle')\nX_train_q2 = kg.io.load(project.preprocessed_data_dir + 'sequences_q2_fasttext_train.pickle')\n\nX_test_q1 = kg.io.load(project.preprocessed_data_dir + 'sequences_q1_fasttext_test.pickle')\nX_test_q2 = kg.io.load(project.preprocessed_data_dir + 'sequences_q2_fasttext_test.pickle')\n\ny_train = kg.io.load(project.features_dir + 'y_train.pickle')", "Word embedding properties.", "EMBEDDING_DIM = embedding_matrix.shape[-1]\nVOCAB_LENGTH = embedding_matrix.shape[0]\nMAX_SEQUENCE_LENGTH = X_train_q1.shape[-1]\n\nprint(EMBEDDING_DIM, VOCAB_LENGTH, MAX_SEQUENCE_LENGTH)", "Define models", "def zero_loss(y_true, y_pred):\n return K.zeros((1,))\n\ndef create_model_question_branch():\n input_q = Input(shape=(MAX_SEQUENCE_LENGTH,), dtype='int32')\n \n embedding_q = Embedding(\n VOCAB_LENGTH,\n EMBEDDING_DIM,\n weights=[embedding_matrix],\n input_length=MAX_SEQUENCE_LENGTH,\n trainable=False,\n )(input_q)\n\n timedist_q = TimeDistributed(Dense(\n EMBEDDING_DIM,\n activation='relu',\n ))(embedding_q)\n\n lambda_q = Lambda(\n lambda x: K.max(x, axis=1),\n output_shape=(EMBEDDING_DIM, )\n )(timedist_q)\n \n output_q = lambda_q\n return input_q, output_q\n\ndef create_model(params): \n embedding_layer = Embedding(\n VOCAB_LENGTH,\n EMBEDDING_DIM,\n weights=[embedding_matrix],\n input_length=MAX_SEQUENCE_LENGTH,\n trainable=False,\n )\n lstm_layer = LSTM(\n params['num_lstm'],\n dropout=params['lstm_dropout_rate'],\n recurrent_dropout=params['lstm_dropout_rate'],\n )\n\n input_q1 = Input(shape=(MAX_SEQUENCE_LENGTH,), dtype='int32')\n embedded_sequences_1 = embedding_layer(input_q1)\n x1 = lstm_layer(embedded_sequences_1)\n\n input_q2 = Input(shape=(MAX_SEQUENCE_LENGTH,), dtype='int32')\n embedded_sequences_2 = embedding_layer(input_q2)\n y1 = lstm_layer(embedded_sequences_2)\n\n features = Concatenate(name='feature_output')([x1, y1])\n dropout_feat = Dropout(params['dense_dropout_rate'])(features)\n bn_feat = BatchNormalization()(dropout_feat)\n\n dense_1 = Dense(params['num_dense'], activation='relu')(bn_feat)\n dropout_1 = Dropout(params['dense_dropout_rate'])(dense_1)\n bn_1 = BatchNormalization()(dropout_1)\n\n output = Dense(1, activation='sigmoid', name='target_output')(bn_1)\n\n model = Model(\n inputs=[input_q1, input_q2],\n outputs=[output, features],\n )\n \n model.compile(\n loss={'target_output': 'binary_crossentropy', 'feature_output': zero_loss},\n loss_weights={'target_output': 1.0, 'feature_output': 0.0},\n optimizer='nadam',\n metrics=None,\n )\n\n return model\n\ndef predict(model, X_q1, X_q2):\n \"\"\"\n Mirror the pairs, compute two separate predictions, and average them.\n \"\"\"\n \n y1 = model.predict([X_q1, X_q2], batch_size=1024, verbose=1).reshape(-1) \n y2 = model.predict([X_q2, X_q1], batch_size=1024, verbose=1).reshape(-1) \n return (y1 + y2) / 2", "Partition the data", "NUM_FOLDS = 5\n\nkfold = StratifiedKFold(\n n_splits=NUM_FOLDS,\n shuffle=True,\n random_state=RANDOM_SEED\n)", "Define hyperparameters", "BATCH_SIZE = 2048\n\nMAX_EPOCHS = 200", "Best values picked by Bayesian optimization.", "model_params = {\n 'dense_dropout_rate': 0.075,\n 'lstm_dropout_rate': 0.332,\n 'num_dense': 130,\n 'num_lstm': 300,\n}\n\nfeature_output_size = model_params['num_lstm'] * 2", "Create placeholders for out-of-fold predictions.", "y_train_oofp = np.zeros_like(y_train, dtype='float32')\ny_train_oofp_features = np.zeros((len(y_train), feature_output_size), dtype='float32')\n\ny_test_oofp = np.zeros((len(X_test_q1), NUM_FOLDS), dtype='float32')\ny_test_oofp_features = np.zeros((len(X_test_q1), feature_output_size), dtype='float32')", "The path where the best weights of the current model will be saved.", "model_checkpoint_path = project.temp_dir + 'fold-checkpoint-' + feature_list_id + '.h5'", "Fit the folds and compute out-of-fold predictions", "%%time\n\n# Iterate through folds.\nfor fold_num, (ix_train, ix_val) in enumerate(kfold.split(X_train_q1, y_train)):\n \n # Augment the training set by mirroring the pairs.\n X_fold_train_q1 = np.vstack([X_train_q1[ix_train], X_train_q2[ix_train]])\n X_fold_train_q2 = np.vstack([X_train_q2[ix_train], X_train_q1[ix_train]])\n\n X_fold_val_q1 = np.vstack([X_train_q1[ix_val], X_train_q2[ix_val]])\n X_fold_val_q2 = np.vstack([X_train_q2[ix_val], X_train_q1[ix_val]])\n\n # Ground truth should also be \"mirrored\".\n y_fold_train = np.concatenate([y_train[ix_train], y_train[ix_train]])\n y_fold_val = np.concatenate([y_train[ix_val], y_train[ix_val]])\n \n print()\n print(f'Fitting fold {fold_num + 1} of {kfold.n_splits}')\n print()\n \n # Compile a new model.\n model = create_model(model_params)\n\n # Train.\n model.fit(\n # Create dummy ground truth values for the activation outputs.\n [X_fold_train_q1, X_fold_train_q2],\n [y_fold_train, np.zeros((len(y_fold_train), feature_output_size))],\n \n validation_data=(\n [X_fold_val_q1, X_fold_val_q2],\n [y_fold_val, np.zeros((len(y_fold_val), feature_output_size))],\n ),\n\n batch_size=BATCH_SIZE,\n epochs=MAX_EPOCHS,\n verbose=1,\n \n callbacks=[\n # Stop training when the validation loss stops improving.\n EarlyStopping(\n monitor='val_loss',\n min_delta=0.001,\n patience=3,\n verbose=1,\n mode='auto',\n ),\n # Save the weights of the best epoch.\n ModelCheckpoint(\n model_checkpoint_path,\n monitor='val_loss',\n save_best_only=True,\n verbose=2,\n ),\n ],\n )\n \n # Restore the best epoch.\n model.load_weights(model_checkpoint_path)\n \n # Compute out-of-fold predictions.\n y_train_oofp[ix_val] = predict(model, X_train_q1[ix_val], X_train_q2[ix_val])\n y_test_oofp[:, fold_num] = predict(model, X_test_q1, X_test_q2)\n \n # Clear GPU memory.\n K.clear_session()\n del X_fold_train_q1, X_fold_train_q2\n del X_fold_val_q1, X_fold_val_q2\n del model\n gc.collect()\n\ncv_score = log_loss(y_train, y_train_oofp)\nprint('CV score:', cv_score)", "Save features", "feature_names = [feature_list_id]\n\nfeatures_train = y_train_oofp.reshape((-1, 1))\n\nfeatures_test = np.mean(y_test_oofp, axis=1).reshape((-1, 1))\n\nproject.save_features(features_train, features_test, feature_names, feature_list_id)" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
machow/siuba
docs/draft-old-pages/intro_sql_interm.ipynb
mit
[ "import matplotlib.cbook\n\nimport warnings\nimport plotnine\nwarnings.filterwarnings(module='plotnine*', action='ignore')\nwarnings.filterwarnings(module='matplotlib*', action='ignore')\n\n%matplotlib inline", "Querying SQL (advanced)\nNOTE: THIS DOC IS CURRENTLY IN OUTLINE FORM\nIn this tutorial, we'll use a dataset of television ratings.\n\ncopying data in, and getting a table from SQL\nfiltering out rows, and aggregating data\nlooking at shifts in ratings between seasons\nchecking for abnormalities in the data\n\nSetting up", "import pandas as pd\nfrom siuba.tests.helpers import copy_to_sql\nfrom siuba import *\nfrom siuba.dply.vector import lag, desc, row_number\nfrom siuba.dply.string import str_c\nfrom siuba.sql import LazyTbl\n\ndata_url = \"https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2019/2019-01-08/IMDb_Economist_tv_ratings.csv\"\ntv_ratings = pd.read_csv(data_url, parse_dates = [\"date\"])\n\n\ndb_uri = \"postgresql://{user}:{password}@localhost:5433/{db}\".format(\n user = \"postgres\",\n password = \"\",\n db = \"postgres\"\n )\n\n# create tv_ratings table\ntbl_ratings = copy_to_sql(tv_ratings, \"tv_ratings\", db_uri)\n\n# can also access an existing table\ntbl_ratings = LazyTbl(db_uri, \"tv_ratings\")\n\ntbl_ratings\n", "Inspecting a single show", "buffy = (tbl_ratings\n >> filter(_.title == \"Buffy the Vampire Slayer\")\n >> collect()\n )\n\nbuffy\n\nbuffy >> summarize(avg_rating = _.av_rating.mean())", "Average rating per show, along with dates", "avg_ratings = (tbl_ratings \n >> group_by(_.title)\n >> summarize(\n avg_rating = _.av_rating.mean(),\n date_range = str_c(_.date.dt.year.max(), \" - \", _.date.dt.year.min())\n )\n )\n\navg_ratings", "Biggest changes in ratings between two seasons", "top_4_shifts = (tbl_ratings\n >> group_by(_.title)\n >> arrange(_.seasonNumber)\n >> mutate(rating_shift = _.av_rating - lag(_.av_rating))\n >> summarize(\n max_shift = _.rating_shift.max()\n )\n >> arrange(-_.max_shift)\n >> head(4)\n )\n\ntop_4_shifts\n\nbig_shift_series = (top_4_shifts\n >> select(_.title)\n >> inner_join(_, tbl_ratings, \"title\")\n >> collect()\n )\n\nfrom plotnine import *\n\n(big_shift_series\n >> ggplot(aes(\"seasonNumber\", \"av_rating\"))\n + geom_point()\n + geom_line()\n + facet_wrap(\"~ title\")\n + labs(\n title = \"Seasons with Biggest Shifts in Ratings\",\n y = \"Average rating\",\n x = \"Season\"\n )\n )", "Do we have full data for each season?", "mismatches = (tbl_ratings\n >> arrange(_.title, _.seasonNumber)\n >> group_by(_.title)\n >> mutate(\n row = row_number(_),\n mismatch = _.row != _.seasonNumber\n )\n >> filter(_.mismatch.any())\n >> ungroup()\n )\n\n\nmismatches\n\nmismatches >> distinct(_.title) >> count() >> collect()" ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
jpilgram/phys202-2015-work
assignments/assignment10/ODEsEx03.ipynb
mit
[ "Ordinary Differential Equations Exercise 3\nImports", "%matplotlib inline\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport seaborn as sns\nfrom scipy.integrate import odeint\nfrom IPython.html.widgets import interact, fixed", "Damped, driven nonlinear pendulum\nThe equations of motion for a simple pendulum of mass $m$, length $l$ are:\n$$\n\\frac{d^2\\theta}{dt^2} = \\frac{-g}{\\ell}\\sin\\theta\n$$\nWhen a damping and periodic driving force are added the resulting system has much richer and interesting dynamics:\n$$\n\\frac{d^2\\theta}{dt^2} = \\frac{-g}{\\ell}\\sin\\theta - a \\omega - b \\sin(\\omega_0 t)\n$$\nIn this equation:\n\n$a$ governs the strength of the damping.\n$b$ governs the strength of the driving force.\n$\\omega_0$ is the angular frequency of the driving force.\n\nWhen $a=0$ and $b=0$, the energy/mass is conserved:\n$$E/m =g\\ell(1-\\cos(\\theta)) + \\frac{1}{2}\\ell^2\\omega^2$$\nBasic setup\nHere are the basic parameters we are going to use for this exercise:", "g = 9.81 # m/s^2\nl = 0.5 # length of pendulum, in meters\ntmax = 50. # seconds\nt = np.linspace(0, tmax, int(100*tmax))", "Write a function derivs for usage with scipy.integrate.odeint that computes the derivatives for the damped, driven harmonic oscillator. The solution vector at each time will be $\\vec{y}(t) = (\\theta(t),\\omega(t))$.", "#I worked with James A and Hunter T.\ndef derivs(y, t, a, b, omega0):\n \"\"\"Compute the derivatives of the damped, driven pendulum.\n \n Parameters\n ----------\n y : ndarray\n The solution vector at the current time t[i]: [theta[i],omega[i]].\n t : float\n The current time t[i].\n a, b, omega0: float\n The parameters in the differential equation.\n \n Returns\n -------\n dy : ndarray\n The vector of derviatives at t[i]: [dtheta[i],domega[i]].\n \"\"\"\n # YOUR CODE HERE\n #raise NotImplementedError()\n theta = y[0]\n omega = y[1]\n dtheta =omega\n dw = -(g/l)*np.sin(theta)-a*omega-b*np.sin(omega0*t)\n return [dtheta, dw]\n\nassert np.allclose(derivs(np.array([np.pi,1.0]), 0, 1.0, 1.0, 1.0), [1.,-1.])\n\ndef energy(y):\n \"\"\"Compute the energy for the state array y.\n \n The state array y can have two forms:\n \n 1. It could be an ndim=1 array of np.array([theta,omega]) at a single time.\n 2. It could be an ndim=2 array where each row is the [theta,omega] at single\n time.\n \n Parameters\n ----------\n y : ndarray, list, tuple\n A solution vector\n \n Returns\n -------\n E/m : float (ndim=1) or ndarray (ndim=2)\n The energy per mass.\n \"\"\"\n # YOUR CODE HERE\n #raise NotImplementedError()\n if y.ndim==1:\n theta = y[0]\n omega = y[1]\n if y.ndim==2:\n theta = y[:,0]\n omega = y[:,1]\n E = g*l*(1-np.cos(theta))+0.5*l**2*omega**2\n return (E)\n\nassert np.allclose(energy(np.array([np.pi,0])),g)\nassert np.allclose(energy(np.ones((10,2))), np.ones(10)*energy(np.array([1,1])))", "Simple pendulum\nUse the above functions to integrate the simple pendulum for the case where it starts at rest pointing vertically upwards. In this case, it should remain at rest with constant energy.\n\nIntegrate the equations of motion.\nPlot $E/m$ versus time.\nPlot $\\theta(t)$ and $\\omega(t)$ versus time.\nTune the atol and rtol arguments of odeint until $E/m$, $\\theta(t)$ and $\\omega(t)$ are constant.\n\nAnytime you have a differential equation with a a conserved quantity, it is critical to make sure the numerical solutions conserve that quantity as well. This also gives you an opportunity to find other bugs in your code. The default error tolerances (atol and rtol) used by odeint are not sufficiently small for this problem. Start by trying atol=1e-3, rtol=1e-2 and then decrease each by an order of magnitude until your solutions are stable.", "# YOUR CODE HERE\n#raise NotImplementedError()\ny0 = [np.pi,0]\nsolution = odeint(derivs, y0, t, args = (0,0,0), atol = 1e-5, rtol = 1e-4) \n\n# YOUR CODE HERE\n#raise NotImplementedError()\nplt.plot(t,energy(solution), label=\"$Energy/mass$\")\nplt.title('Simple Pendulum Engery')\nplt.xlabel('time')\nplt.ylabel('$Engery/Mass$')\nplt.ylim(9.2,10.2);\n\n# YOUR CODE HERE\n#raise NotImplementedError()\ntheta= solution[:,0]\nomega = solution[:,1]\nplt.plot(t ,theta, label = \"$\\Theta (t)$\")\nplt.plot(t, omega, label = \"$\\omega (t)$\")\nplt.ylim(-0.5,5)\nplt.legend()\nplt.title('Simple Pendulum $\\Theta (t)$ and $\\omega (t)$')\nplt.xlabel('Time');\n\nassert True # leave this to grade the two plots and their tuning of atol, rtol.", "Damped pendulum\nWrite a plot_pendulum function that integrates the damped, driven pendulum differential equation for a particular set of parameters $[a,b,\\omega_0]$.\n\nUse the initial conditions $\\theta(0)=-\\pi + 0.1$ and $\\omega=0$.\nDecrease your atol and rtol even futher and make sure your solutions have converged.\nMake a parametric plot of $[\\theta(t),\\omega(t)]$ versus time.\nUse the plot limits $\\theta \\in [-2 \\pi,2 \\pi]$ and $\\theta \\in [-10,10]$\nLabel your axes and customize your plot to make it beautiful and effective.", "def plot_pendulum(a=0.0, b=0.0, omega0=0.0):\n \"\"\"Integrate the damped, driven pendulum and make a phase plot of the solution.\"\"\"\n # YOUR CODE HERE\n #raise NotImplementedError()\n y0 =[-np.pi+0.1,0]\n solution = odeint(derivs, y0, t, args = (a,b,omega0), atol = 1e-5, rtol = 1e-4)\n theta=solution[:,0]\n omega=solution[:,1]\n plt.plot(theta, omega, color=\"k\")\n plt.title('Damped and Driven Pendulum Motion')\n plt.xlabel('$\\Theta (t)$')\n plt.ylabel('$\\omega (t)$')\n plt.xlim(-2*np.pi, 2*np.pi)\n plt.ylim(-10,10);", "Here is an example of the output of your plot_pendulum function that should show a decaying spiral.", "plot_pendulum(0.5, 0.0, 0.0)", "Use interact to explore the plot_pendulum function with:\n\na: a float slider over the interval $[0.0,1.0]$ with steps of $0.1$.\nb: a float slider over the interval $[0.0,10.0]$ with steps of $0.1$.\nomega0: a float slider over the interval $[0.0,10.0]$ with steps of $0.1$.", "# YOUR CODE HERE\n#raise NotImplementedError()\ninteract(plot_pendulum, a=(0.0,1.0,0.1), b=(0.0,10.0,0.1), omega0 = (0.0,10.0,0.1));", "Use your interactive plot to explore the behavior of the damped, driven pendulum by varying the values of $a$, $b$ and $\\omega_0$.\n\nFirst start by increasing $a$ with $b=0$ and $\\omega_0=0$.\nThen fix $a$ at a non-zero value and start to increase $b$ and $\\omega_0$.\n\nDescribe the different classes of behaviors you observe below.\nIncreasing a (with b=0 and $\\omega_0=0$) decreases the number of spirals and makes the densest part of the spirals more central,this means that a increases damping so it spirals to 0 faster with higher a. b is the amplitude of the driving force, as you increase b the spirals start overlapping with eachother. Once b gets high enough, the driving force overcomes the pendulum motion and the graph no longer looks like pendelum motion. $\\omega_0$ is angular fequency of the driving force. As $\\omega_0$ increases, the spot at which the spirals bunch together moves, starting from the middle." ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
mne-tools/mne-tools.github.io
0.18/_downloads/7df5cd97aa959dd7e2627aba5e552081/plot_forward.ipynb
bsd-3-clause
[ "%matplotlib inline", "Head model and forward computation\nThe aim of this tutorial is to be a getting started for forward\ncomputation.\nFor more extensive details and presentation of the general\nconcepts for forward modeling. See ch_forward.", "import os.path as op\nimport mne\nfrom mne.datasets import sample\ndata_path = sample.data_path()\n\n# the raw file containing the channel location + types\nraw_fname = data_path + '/MEG/sample/sample_audvis_raw.fif'\n# The paths to Freesurfer reconstructions\nsubjects_dir = data_path + '/subjects'\nsubject = 'sample'", "Computing the forward operator\nTo compute a forward operator we need:\n\na -trans.fif file that contains the coregistration info.\na source space\nthe :term:BEM surfaces\n\nCompute and visualize BEM surfaces\nThe :term:BEM surfaces are the triangulations of the interfaces between\ndifferent tissues needed for forward computation. These surfaces are for\nexample the inner skull surface, the outer skull surface and the outer skin\nsurface, a.k.a. scalp surface.\nComputing the BEM surfaces requires FreeSurfer and makes use of either of\nthe two following command line tools:\n\ngen_mne_watershed_bem\ngen_mne_flash_bem\n\nOr by calling in a Python script one of the functions\n:func:mne.bem.make_watershed_bem or :func:mne.bem.make_flash_bem.\nHere we'll assume it's already computed. It takes a few minutes per subject.\nFor EEG we use 3 layers (inner skull, outer skull, and skin) while for\nMEG 1 layer (inner skull) is enough.\nLet's look at these surfaces. The function :func:mne.viz.plot_bem\nassumes that you have the the bem folder of your subject FreeSurfer\nreconstruction the necessary files.", "mne.viz.plot_bem(subject=subject, subjects_dir=subjects_dir,\n brain_surfaces='white', orientation='coronal')", "Visualization the coregistration\nThe coregistration is operation that allows to position the head and the\nsensors in a common coordinate system. In the MNE software the transformation\nto align the head and the sensors in stored in a so-called trans file.\nIt is a FIF file that ends with -trans.fif. It can be obtained with\n:func:mne.gui.coregistration (or its convenient command line\nequivalent gen_mne_coreg), or mrilab if you're using a Neuromag\nsystem.\nFor the Python version see :func:mne.gui.coregistration\nHere we assume the coregistration is done, so we just visually check the\nalignment with the following code.", "# The transformation file obtained by coregistration\ntrans = data_path + '/MEG/sample/sample_audvis_raw-trans.fif'\n\ninfo = mne.io.read_info(raw_fname)\n# Here we look at the dense head, which isn't used for BEM computations but\n# is useful for coregistration.\nmne.viz.plot_alignment(info, trans, subject=subject, dig=True,\n meg=['helmet', 'sensors'], subjects_dir=subjects_dir,\n surfaces='head-dense')", "Compute Source Space\nThe source space defines the position and orientation of the candidate source\nlocations. There are two types of source spaces:\n\n\nsource-based source space when the candidates are confined to a\n surface.\n\n\nvolumetric or discrete source space when the candidates are discrete,\n arbitrarily located source points bounded by the surface.\n\n\nSource-based source space is computed using\n:func:mne.setup_source_space, while volumetric source space is computed\nusing :func:mne.setup_volume_source_space.\nWe will now compute a source-based source space with an OCT-6 resolution.\nSee setting_up_source_space for details on source space definition\nand spacing parameter.", "src = mne.setup_source_space(subject, spacing='oct6',\n subjects_dir=subjects_dir, add_dist=False)\nprint(src)", "The surface based source space src contains two parts, one for the left\nhemisphere (4098 locations) and one for the right hemisphere\n(4098 locations). Sources can be visualized on top of the BEM surfaces\nin purple.", "mne.viz.plot_bem(subject=subject, subjects_dir=subjects_dir,\n brain_surfaces='white', src=src, orientation='coronal')", "To compute a volume based source space defined with a grid of candidate\ndipoles inside a sphere of radius 90mm centered at (0.0, 0.0, 40.0)\nyou can use the following code.\nObviously here, the sphere is not perfect. It is not restricted to the\nbrain and it can miss some parts of the cortex.", "sphere = (0.0, 0.0, 40.0, 90.0)\nvol_src = mne.setup_volume_source_space(subject, subjects_dir=subjects_dir,\n sphere=sphere)\nprint(vol_src)\n\nmne.viz.plot_bem(subject=subject, subjects_dir=subjects_dir,\n brain_surfaces='white', src=vol_src, orientation='coronal')", "To compute a volume based source space defined with a grid of candidate\ndipoles inside the brain (requires the :term:BEM surfaces) you can use the\nfollowing.", "surface = op.join(subjects_dir, subject, 'bem', 'inner_skull.surf')\nvol_src = mne.setup_volume_source_space(subject, subjects_dir=subjects_dir,\n surface=surface)\nprint(vol_src)\n\nmne.viz.plot_bem(subject=subject, subjects_dir=subjects_dir,\n brain_surfaces='white', src=vol_src, orientation='coronal')", "With the surface-based source space only sources that lie in the plotted MRI\nslices are shown. Let's write a few lines of mayavi to see all sources in 3D.", "import numpy as np # noqa\nfrom mayavi import mlab # noqa\nfrom surfer import Brain # noqa\n\nbrain = Brain('sample', 'lh', 'inflated', subjects_dir=subjects_dir)\nsurf = brain.geo['lh']\n\nvertidx = np.where(src[0]['inuse'])[0]\n\nmlab.points3d(surf.x[vertidx], surf.y[vertidx],\n surf.z[vertidx], color=(1, 1, 0), scale_factor=1.5)", "Compute forward solution\nWe can now compute the forward solution.\nTo reduce computation we'll just compute a single layer BEM (just inner\nskull) that can then be used for MEG (not EEG).\nWe specify if we want a one-layer or a three-layer BEM using the\nconductivity parameter.\nThe BEM solution requires a BEM model which describes the geometry\nof the head the conductivities of the different tissues.", "conductivity = (0.3,) # for single layer\n# conductivity = (0.3, 0.006, 0.3) # for three layers\nmodel = mne.make_bem_model(subject='sample', ico=4,\n conductivity=conductivity,\n subjects_dir=subjects_dir)\nbem = mne.make_bem_solution(model)", "Note that the :term:BEM does not involve any use of the trans file. The BEM\nonly depends on the head geometry and conductivities.\nIt is therefore independent from the MEG data and the head position.\nLet's now compute the forward operator, commonly referred to as the\ngain or leadfield matrix.\nSee :func:mne.make_forward_solution for details on parameters meaning.", "fwd = mne.make_forward_solution(raw_fname, trans=trans, src=src, bem=bem,\n meg=True, eeg=False, mindist=5.0, n_jobs=2)\nprint(fwd)", "We can explore the content of fwd to access the numpy array that contains\nthe gain matrix.", "leadfield = fwd['sol']['data']\nprint(\"Leadfield size : %d sensors x %d dipoles\" % leadfield.shape)", "To extract the numpy array containing the forward operator corresponding to\nthe source space fwd['src'] with cortical orientation constraint\nwe can use the following:", "fwd_fixed = mne.convert_forward_solution(fwd, surf_ori=True, force_fixed=True,\n use_cps=True)\nleadfield = fwd_fixed['sol']['data']\nprint(\"Leadfield size : %d sensors x %d dipoles\" % leadfield.shape)", "This is equivalent to the following code that explicitly applies the\nforward operator to a source estimate composed of the identity operator:", "n_dipoles = leadfield.shape[1]\nvertices = [src_hemi['vertno'] for src_hemi in fwd_fixed['src']]\nstc = mne.SourceEstimate(1e-9 * np.eye(n_dipoles), vertices, tmin=0., tstep=1)\nleadfield = mne.apply_forward(fwd_fixed, stc, info).data / 1e-9", "To save to disk a forward solution you can use\n:func:mne.write_forward_solution and to read it back from disk\n:func:mne.read_forward_solution. Don't forget that FIF files containing\nforward solution should end with -fwd.fif.\nTo get a fixed-orientation forward solution, use\n:func:mne.convert_forward_solution to convert the free-orientation\nsolution to (surface-oriented) fixed orientation.\nExercise\nBy looking at\nsphx_glr_auto_examples_forward_plot_forward_sensitivity_maps.py\nplot the sensitivity maps for EEG and compare it with the MEG, can you\njustify the claims that:\n\nMEG is not sensitive to radial sources\nEEG is more sensitive to deep sources\n\nHow will the MEG sensitivity maps and histograms change if you use a free\ninstead if a fixed/surface oriented orientation?\nTry this changing the mode parameter in :func:mne.sensitivity_map\naccordingly. Why don't we see any dipoles on the gyri?" ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
magenta/magenta-demos
jupyter-notebooks/Sketch_RNN_TF_To_JS_Tutorial.ipynb
apache-2.0
[ "In this notebook, I will show how to train the TensorFlow version of Sketch-RNN on a new dataset, and convert the weights of the TF model to a JSON format that is usable by Sketch-RNN-JS so that interactive web demos can be built.\nFor the purpose of this tutorial, I will be training on the dataset file called kanji.rdp25.npz which is available inside the repo https://github.com/hardmaru/sketch-rnn-datasets/ under the kanji subdirectory. If you have a custom dataset, you will need to convert it over to an .npz file using the stroke-3 format as done for these datasets. Please study the README.md in Sketch-RNN to understand how the file format that Sketch-RNN can work with work, in the section called \"Creating Your Own Dataset\".\nAfter cloning the TensorFlow repo for the Sketch-RNN model, below is the command that I ran to train the TensorFlow model:\npython sketch_rnn_train.py --data_dir=kanji --hparams=data_set=['kanji.rdp25.npz'],num_steps=200000,conditional=0,dec_rnn_size=1024\nI store the kanji.rdp25.npz inside the subdirectory called kanji but you can use whatever you want. The important thing to note here is that I'm trainining a decoder-only model by setting conditional=0 and I'm training a 1 layer LSTM with hidden size of 1024, which should be good enough for most datasets in the order of 10K size. Using 200K steps should take around half a day on a single P100 GPU, so it should cost around USD 10 dollars using the current prices for Google Cloud Platform to train this model.\nAfter the model is trained, I run the remaining commands for this IPython notebook to generate a file call custom.gen.json, which can be used in the Sketch-RNN-JS repo for interactive work:\nhttps://github.com/tensorflow/magenta-demos/tree/master/sketch-rnn-js\nThis json format created will also work for future TensorFlow.js and ML5.js versions of sketch-RNN.", "# import the required libraries\nimport numpy as np\nimport time\nimport random\n\nimport codecs\nimport collections\nimport os\nimport math\nimport json\nimport tensorflow as tf\nfrom six.moves import xrange\n\n# libraries required for visualisation:\nfrom IPython.display import SVG, display\nimport svgwrite # conda install -c omnia svgwrite=1.1.6\nimport PIL\nfrom PIL import Image\nimport matplotlib.pyplot as plt\n\n# set numpy output to something sensible\nnp.set_printoptions(precision=8, edgeitems=6, linewidth=200, suppress=True)\n\ntf.logging.info(\"TensorFlow Version: %s\", tf.__version__)\n\n\n# import our command line tools\n'''\nfrom magenta.models.sketch_rnn.sketch_rnn_train import *\nfrom magenta.models.sketch_rnn.model import *\nfrom magenta.models.sketch_rnn.utils import *\nfrom magenta.models.sketch_rnn.rnn import *\n'''\n\n# If code is modified to remove magenta dependencies:\nfrom sketch_rnn_train import *\nfrom model import *\nfrom utils import *\nfrom rnn import *\n\n# little function that displays vector images and saves them to .svg\ndef draw_strokes(data, factor=0.2, svg_filename = '/tmp/sketch_rnn/svg/sample.svg'):\n tf.gfile.MakeDirs(os.path.dirname(svg_filename))\n min_x, max_x, min_y, max_y = get_bounds(data, factor)\n dims = (50 + max_x - min_x, 50 + max_y - min_y)\n dwg = svgwrite.Drawing(svg_filename, size=dims)\n dwg.add(dwg.rect(insert=(0, 0), size=dims,fill='white'))\n lift_pen = 1\n abs_x = 25 - min_x \n abs_y = 25 - min_y\n p = \"M%s,%s \" % (abs_x, abs_y)\n command = \"m\"\n for i in xrange(len(data)):\n if (lift_pen == 1):\n command = \"m\"\n elif (command != \"l\"):\n command = \"l\"\n else:\n command = \"\"\n x = float(data[i,0])/factor\n y = float(data[i,1])/factor\n lift_pen = data[i, 2]\n p += command+str(x)+\",\"+str(y)+\" \"\n the_color = \"black\"\n stroke_width = 1\n dwg.add(dwg.path(p).stroke(the_color,stroke_width).fill(\"none\"))\n dwg.save()\n display(SVG(dwg.tostring()))\n\n# generate a 2D grid of many vector drawings\ndef make_grid_svg(s_list, grid_space=10.0, grid_space_x=16.0):\n def get_start_and_end(x):\n x = np.array(x)\n x = x[:, 0:2]\n x_start = x[0]\n x_end = x.sum(axis=0)\n x = x.cumsum(axis=0)\n x_max = x.max(axis=0)\n x_min = x.min(axis=0)\n center_loc = (x_max+x_min)*0.5\n return x_start-center_loc, x_end\n x_pos = 0.0\n y_pos = 0.0\n result = [[x_pos, y_pos, 1]]\n for sample in s_list:\n s = sample[0]\n grid_loc = sample[1]\n grid_y = grid_loc[0]*grid_space+grid_space*0.5\n grid_x = grid_loc[1]*grid_space_x+grid_space_x*0.5\n start_loc, delta_pos = get_start_and_end(s)\n\n loc_x = start_loc[0]\n loc_y = start_loc[1]\n new_x_pos = grid_x+loc_x\n new_y_pos = grid_y+loc_y\n result.append([new_x_pos-x_pos, new_y_pos-y_pos, 0])\n\n result += s.tolist()\n result[-1][2] = 1\n x_pos = new_x_pos+delta_pos[0]\n y_pos = new_y_pos+delta_pos[1]\n return np.array(result)", "define the path of the model you want to load, and also the path of the dataset", "# you may need to change these to link to where your data and checkpoints are actually stored!\n# in the default config, model_dir is likely to be /tmp/sketch_rnn/models\ndata_dir = './kanji'\nmodel_dir = './log'\n\n[train_set, valid_set, test_set, hps_model, eval_hps_model, sample_hps_model] = load_env(data_dir, model_dir)\n\n[hps_model, eval_hps_model, sample_hps_model] = load_model(model_dir)\n\n# construct the sketch-rnn model here:\nreset_graph()\nmodel = Model(hps_model)\neval_model = Model(eval_hps_model, reuse=True)\nsample_model = Model(sample_hps_model, reuse=True)\n\nsess = tf.InteractiveSession()\nsess.run(tf.global_variables_initializer())\n\ndef decode(z_input=None, draw_mode=True, temperature=0.1, factor=0.2):\n z = None\n if z_input is not None:\n z = [z_input]\n sample_strokes, m = sample(sess, sample_model, seq_len=eval_model.hps.max_seq_len, temperature=temperature, z=z)\n strokes = to_normal_strokes(sample_strokes)\n if draw_mode:\n draw_strokes(strokes, factor)\n return strokes\n\n# loads the weights from checkpoint into our model\nload_checkpoint(sess, model_dir)\n\n# randomly unconditionally generate 10 examples\nN = 10\nreconstructions = []\nfor i in range(N):\n reconstructions.append([decode(temperature=0.5, draw_mode=False), [0, i]])", "Let's see if our model kind of works by sampling from it:", "stroke_grid = make_grid_svg(reconstructions)\ndraw_strokes(stroke_grid)\n\ndef get_model_params():\n # get trainable params.\n model_names = []\n model_params = []\n model_shapes = []\n with sess.as_default():\n t_vars = tf.trainable_variables()\n for var in t_vars:\n param_name = var.name\n p = sess.run(var)\n model_names.append(param_name)\n params = p\n model_params.append(params)\n model_shapes.append(p.shape)\n return model_params, model_shapes, model_names\n\ndef quantize_params(params, max_weight=10.0, factor=32767):\n result = []\n max_weight = np.abs(max_weight)\n for p in params:\n r = np.array(p)\n r /= max_weight\n r[r>1.0] = 1.0\n r[r<-1.0] = -1.0\n result.append(np.round(r*factor).flatten().astype(np.int).tolist())\n return result\n\nmodel_params, model_shapes, model_names = get_model_params()\n\nmodel_names\n\n# scale factor converts \"model-coordinates\" to \"pixel coordinates\" for your JS canvas demo later on.\n# the larger it is, the larger your drawings (in pixel space) will be.\n# I recommend setting this to 100.0 and iterating the value in the json file later on when you build the JS part.\nscale_factor = 200.0\nmetainfo = {\"mode\":2,\"version\":6,\"max_seq_len\":train_set.max_seq_length,\"name\":\"custom\",\"scale_factor\":scale_factor}\n\nmodel_params_quantized = quantize_params(model_params)\n\nmodel_blob = [metainfo, model_shapes, model_params_quantized]\n\nwith open(\"custom.gen.full.json\", 'w') as outfile:\n json.dump(model_blob, outfile, separators=(',', ':'))", "After you dump the custom.gen.full.json, you should save the below code as compress_model.json, and run:\nnode compress_model.js custom.gen.full.json custom.gen.json\nTo get to the final file you can use for Sketch-RNN-JS\nBelow is the entire code for compress_model.js which will be run using node:\n```\n/\ncompress_model.js\nCompress JSON model to b64 encoded version to save bandwidth. only works for decoder-only sketch-rnn model.\n/\nconst assert = require('assert');\nconst fs = require('fs');\n/*\n * deals with decompressing b64 models to float arrays.\n /\nfunction btoa(s) {\n return Buffer.from(s, 'binary').toString('base64');\n}\nfunction string_to_uint8array(b64encoded) {\n var u8 = new Uint8Array(atob(b64encoded).split(\"\").map(function(c) {\n return c.charCodeAt(0); }));\n return u8;\n}\nfunction uintarray_to_string(u8) {\n var s = \"\";\n for (var i = 0, len = u8.length; i < len; i++) {\n s += String.fromCharCode(u8[i]);\n }\n var b64encoded = btoa(s);\n return b64encoded;\n};\nfunction string_to_array(s) {\n var u = string_to_uint8array(s);\n var result = new Int16Array(u.buffer);\n return result;\n};\nfunction array_to_string(a) {\n var u = new Uint8Array(a.buffer);\n var result = uintarray_to_string(u);\n return result;\n};\nvar args = process.argv.slice(2);\ntry {\n assert.strictEqual(args.length, 2);\n} catch (err) {\n console.log(\"Usage: node compress_model.js orig_full_model.json compressed_model.json\")\n process.exit(1);\n}\nvar orig_file = args[0];\nvar target_file = args[1];\nvar orig_model = JSON.parse(fs.readFileSync(orig_file, 'ascii'));\nvar model_weights = orig_model[2];\nvar compressed_weights = [];\nfor (var i=0;i<model_weights.length;i++) {\n compressed_weights.push(array_to_string(new Int16Array(model_weights[i])));\n}\nvar target_model = [orig_model[0], orig_model[1], compressed_weights];\nfs.writeFileSync(target_file, JSON.stringify(target_model), 'ascii');\n```" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
outlace/Machine-Learning-Experiments
VariableOutput.ipynb
mit
[ "A recursive neural network that decides how many times to run itself\nProduces variable-length outputs for static-length inputs.", "import numpy as np\n\nX = np.array([[0,0],[0,1],[1,0],[1,1]])\ny = np.array([[0],[0,0],[0,0,0],[0,0,0,0]])\n\ndef sigmoid(x):\n return np.matrix(1.0 / (1.0 + np.exp(-x)))\n\ndef relu(x):\n alpha = 0.01\n return np.maximum(x, (alpha * x))\n\n#initialize random weights\nnumIn, numHid, numOut = 2, 3, 2\ntheta1 = np.array( 0.5 * np.sqrt ( 6 / ( numIn + numHid) ) * np.random.randn( numIn + 1, numHid ), dtype=\"float32\" )\ntheta2 = np.array( 0.5 * np.sqrt ( 6 / ( numHid + numOut ) ) * np.random.randn( numHid + 1, numOut ), dtype=\"float32\" )\n\ntheta = np.append(theta1.flatten(), theta2.flatten()) #unroll vectors in a one long vector\n\ndef nn(x, theta):\n i = 0\n theta1 = np.array(theta[:9]).reshape(3,3)\n theta2 = np.array(theta[9:]).reshape(4,2)\n #print(theta1.shape)\n #print(theta2.shape)\n outputs = []\n def comp(x):\n #print(x)\n a1 = np.array(np.concatenate((x.reshape(1,2), np.ones((1,1))), axis=1))\n z2 = a1 @ theta1\n a2 = np.concatenate((relu(z2), np.ones((1,1))), axis=1)\n z3 = a2 @ theta2\n a3 = sigmoid(z3)\n return a3\n \n a3 = comp(x)\n outputs.append(a3[0,1])\n while a3[0,0] > 0.5 and i < 3: #prevent an infinite loop; constrain output length\n i += 1\n input = np.array([[a3[0,1],0]])\n a3 = comp(input)\n outputs.append(a3[0,1])\n return np.array(outputs)", "The neural network accepts an input vector of length 2. It has 2 output nodes. One node is used to control whether or not to recursively run itself, the other is the real data output. We simply threshold > 0.5 to trigger a recursive call to itself.", "###example output with random initial weights\nprint( nn(X[0], theta) )\nprint( nn(X[1], theta) ) \nprint( nn(X[2], theta) ) \nprint( nn(X[3], theta) ) ", "Cost Function\nArbitrarily assign a high cost to mismatches in the length of the output, then also assess MSE", "def costFunction(X, Y, theta):\n cost = 0\n for i in range(len(X)):\n y = Y[i]\n m = float(len(X[i]))\n hThetaX = nn(X[i], theta)\n if len(y) != len(hThetaX):\n cost += 3\n else:\n cost += (1/m) * np.sum(np.abs(y - hThetaX)**2)\n \n return cost", "Genetic Algorithm to Solve Weights:", "import random as rn, numpy as np\n# [Initial population size, mutation rate (=1%), num generations (30), solution length (13), # winners/per gen]\ninitPop, mutRate, numGen, solLen, numWin = 100, 0.01, 500, 17, 20\n#initialize current population to random values within range\ncurPop = np.random.choice(np.arange(-15,15,step=0.01),size=(initPop, solLen),replace=False)\nnextPop = np.zeros((curPop.shape[0], curPop.shape[1]))\nfitVec = np.zeros((initPop, 2)) #1st col is indices, 2nd col is cost\nfor i in range(numGen): #iterate through num generations\n #Create vector of all errors from cost function for each solution\n\tfitVec = np.array([np.array([x, np.sum(costFunction(X, y, curPop[x].T))]) for x in range(initPop)])\n\t#plt.pyplot.scatter(i,np.sum(fitVec[:,1]))\n\twinners = np.zeros((numWin, solLen))\n\tfor n in range(len(winners)): #for n in range(10)\n\t\tselected = np.random.choice(range(len(fitVec)), numWin/2, replace=False)\n\t\twnr = np.argmin(fitVec[selected,1])\n\t\twinners[n] = curPop[int(fitVec[selected[wnr]][0])]\n\tnextPop[:len(winners)] = winners #populate new gen with winners\n\tduplicWin = np.zeros((((initPop - len(winners))),winners.shape[1]))\n\tfor x in range(winners.shape[1]): #for each col in winners (3 cols)\n #Duplicate winners (20x3 matrix) 3 times to create 80x3 matrix, then shuffle columns\n\t\tnumDups = ((initPop - len(winners))/len(winners)) #num times to duplicate to fill rest of nextPop\n\t\tduplicWin[:, x] = np.repeat(winners[:, x], numDups, axis=0)#duplicate each col\n\t\tduplicWin[:, x] = np.random.permutation(duplicWin[:, x]) #shuffle each col (\"crossover\")\n #Populate the rest of the generation with offspring of mating pairs\n\tnextPop[len(winners):] = np.matrix(duplicWin)\n #Create a mutation matrix, mostly 1s, but some elements are random numbers from a normal distribution\n\tmutMatrix = [np.float(np.random.normal(0,2,1)) if rn.random() < mutRate else 1 for x in range(nextPop.size)]\n #randomly mutate part of the population by multiplying nextPop by our mutation matrix\n\tnextPop = np.multiply(nextPop, np.matrix(mutMatrix).reshape(nextPop.shape))\n\tcurPop = nextPop\nbest_soln = curPop[np.argmin(fitVec[:,1])]\nprint(\"Best Sol'n:\\n%s\\nCost:%s\" % (best_soln,np.sum(costFunction(X, y, best_soln.T))))\n\n#Demonstrate variable output after training\nprint( np.round(nn(X[0], best_soln.reshape(17,1)), 2) )\nprint( np.round(nn(X[1], best_soln.reshape(17,1)), 2) )\nprint( np.round(nn(X[2], best_soln.reshape(17,1)), 2) )\nprint( np.round(nn(X[3], best_soln.reshape(17,1)), 2) )" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
machow/siuba
docs/backends.ipynb
mit
[ "import pandas as pd\n\npd.set_option(\"display.max_rows\", 5)", "Backends\nQuick examples\npandas (fast grouped) _", "# pandas fast grouped implementation ----\nfrom siuba.data import cars\nfrom siuba import _\nfrom siuba.experimental.pd_groups import fast_mutate, fast_filter, fast_summarize\n\nfast_mutate(\n cars.groupby('cyl'),\n avg_mpg = _.mpg.mean(), # aggregation\n hp_per_mpg = _.hp / _.mpg, # elementwise \n demeaned = _.hp - _.hp.mean(), # elementwise + agg\n)", "SQL _", "from siuba import _, mutate, group_by, summarize, show_query\nfrom siuba.sql import LazyTbl\nfrom sqlalchemy import create_engine\n\n# create sqlite db, add pandas DataFrame to it\nengine = create_engine(\"sqlite:///:memory:\")\ncars.to_sql(\"cars\", engine, if_exists=\"replace\")\n\n# define query\nq = (LazyTbl(engine, \"cars\")\n >> group_by(_.cyl)\n >> summarize(avg_mpg=_.mpg.mean())\n)\n\nq\n\nres = show_query(q)", "Supported methods\nThe table below shows the pandas methods supported by different backends. Note that the regular, ungrouped backend supports all methods, and the fast grouped implementation supports most methods a person could use without having to call the (slow) DataFrame.apply method.\n\n🚧This table is displayed a bit funky, but will be cleaned up!\n\npandas (ungrouped)\nIn general, ungrouped pandas DataFrames do not require any translation.\nOn this kind of data, verbs like mutate are just alternative implementations of methods like DataFrame.assign.", "from siuba import _, mutate\n\ndf = pd.DataFrame({\n 'g': ['a', 'a', 'b'], \n 'x': [1,2,3],\n })\n\ndf.assign(y = lambda _: _.x + 1)\n\nmutate(df, y = _.x + 1)", "Siuba verbs also work on grouped DataFrames, but are not always fast. They are the potentially slow, reference implementation.", "mutate(\n df.groupby('g'),\n y = _.x + 1,\n z = _.x - _.x.mean()\n)", "pandas (fast grouped)\nNote that you could easily enable these fast methods by default, by aliasing them at import.\npython\nfrom siuba.experimental.pd_groups import fast_mutate as mutate\nArchitecture (1)\nCurrently, the fast grouped implementation puts all the logic in the verbs. That is, fast_mutate dispatches for DataFrameGroupBy a function that handles all the necessary translation of lazy expressions.\nSee TODO link this ADR for more details.\nSQL\nArchitecture (2)\nThe SQL implementation consists largely of the following:\n\nLazyTbl - a class that holds a sqlalchemy connection, table name, and list of select statements.\nVerbs that dispatch on LazyTbl - eg. mutate takes a LazyTbl, and returns a LazyTbl that has a new select statement corresponding to that mutate.\nCallListeners for (1) translating lazy expressions to SQL specific functions, and (2) adding grouping information to OVER clauses.\n\nSee TODO link this ADR for more details." ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
mne-tools/mne-tools.github.io
0.18/_downloads/c92aa91c680730c756234cdbc466c558/plot_introduction.ipynb
bsd-3-clause
[ "%matplotlib inline", "Overview of MEG/EEG analysis with MNE-Python\nThis tutorial covers the basic EEG/MEG pipeline for event-related analysis:\nloading data, epoching, averaging, plotting, and estimating cortical activity\nfrom sensor data. It introduces the core MNE-Python data structures\n:class:~mne.io.Raw, :class:~mne.Epochs, :class:~mne.Evoked, and\n:class:~mne.SourceEstimate, and covers a lot of ground fairly quickly (at the\nexpense of depth). Subsequent tutorials address each of these topics in greater\ndetail.\n :depth: 1\nWe begin by importing the necessary Python modules:", "import os\nimport numpy as np\nimport mne", "Loading data\n^^^^^^^^^^^^\nMNE-Python data structures are based around the FIF file format from\nNeuromag, but there are reader functions for a wide variety of other\ndata formats &lt;data-formats&gt;. MNE-Python also has interfaces to a\nvariety of :doc:publicly available datasets &lt;../../manual/datasets_index&gt;,\nwhich MNE-Python can download and manage for you.\nWe'll start this tutorial by loading one of the example datasets (called\n\"sample-dataset\"), which contains EEG and MEG data from one subject\nperforming an audiovisual experiment, along with structural MRI scans for\nthat subject. The :func:mne.datasets.sample.data_path function will\nautomatically download the dataset if it isn't found in one of the expected\nlocations, then return the directory path to the dataset (see the\ndocumentation of :func:~mne.datasets.sample.data_path for a list of places\nit checks before downloading). Note also that for this tutorial to run\nsmoothly on our servers, we're using a filtered and downsampled version of\nthe data (:file:sample_audvis_filt-0-40_raw.fif), but an unfiltered version\n(:file:sample_audvis_raw.fif) is also included in the sample dataset and\ncould be substituted here when running the tutorial locally.", "sample_data_folder = mne.datasets.sample.data_path()\nsample_data_raw_file = os.path.join(sample_data_folder, 'MEG', 'sample',\n 'sample_audvis_filt-0-40_raw.fif')\nraw = mne.io.read_raw_fif(sample_data_raw_file)", "By default, :func:~mne.io.read_raw_fif displays some information about the\nfile it's loading; for example, here it tells us that there are four\n\"projection items\" in the file along with the recorded data; those are\n:term:SSP projectors &lt;projector&gt; calculated to remove environmental noise\nfrom the MEG signals, plus a projector to mean-reference the EEG channels;\nthese are discussed\nin a later tutorial. In addition to the information displayed during loading,\nyou can get a glimpse of the basic details of a :class:~mne.io.Raw object\nby printing it; even more is available by printing its info attribute\n(a :class:dictionary-like object &lt;mne.Info&gt; that is preserved across\n:class:~mne.io.Raw, :class:~mne.Epochs, and :class:~mne.Evoked\nobjects). The info data structure keeps track of channel locations,\napplied filters, projectors, etc. Notice especially the chs entry,\nshowing that MNE-Python detects different sensor types and handles each\nappropriately.\n.. TODO edit prev. paragraph when projectors tutorial is added: ...those are\n discussed in the tutorial projectors-tutorial. (or whatever link)", "print(raw)\nprint(raw.info)", ":class:~mne.io.Raw objects also have several built-in plotting methods;\nhere we show the power spectral density (PSD) for each sensor type with\n:meth:~mne.io.Raw.plot_psd, as well as a plot of the raw sensor traces with\n:meth:~mne.io.Raw.plot. In the PSD plot, we'll only plot frequencies below\n50 Hz (since our data are low-pass filtered at 40 Hz). In interactive Python\nsessions, :meth:~mne.io.Raw.plot is interactive and allows scrolling,\nscaling, bad channel marking, annotation, projector toggling, etc.", "raw.plot_psd(fmax=50)\nraw.plot(duration=5, n_channels=30)", "Preprocessing\n^^^^^^^^^^^^^\nMNE-Python supports a variety of preprocessing approaches and techniques\n(maxwell filtering, signal-space projection, independent components analysis,\nfiltering, downsampling, etc); see the full list of capabilities in the\n:mod:mne.preprocessing and :mod:mne.filter submodules. Here we'll clean\nup our data by performing independent components analysis\n(:class:~mne.preprocessing.ICA); for brevity we'll skip the steps that\nhelped us determined which components best capture the artifacts (see\n:doc:../preprocessing/plot_artifacts_correction_ica for a detailed\nwalk-through of that process).", "# set up and fit the ICA\nica = mne.preprocessing.ICA(n_components=20, random_state=97, max_iter=800)\nica.fit(raw)\nica.exclude = [1, 2] # details on how we picked these are omitted here\nica.plot_properties(raw, picks=ica.exclude)", "Once we're confident about which component(s) we want to remove, we pass them\nas the exclude parameter and then apply the ICA to the raw signal. The\n:meth:~mne.preprocessing.ICA.apply method requires the raw data to be\nloaded into memory (by default it's only read from disk as-needed), so we'll\nuse :meth:~mne.io.Raw.load_data first. We'll also make a copy of the\n:class:~mne.io.Raw object so we can compare the signal before and after\nartifact removal side-by-side:", "orig_raw = raw.copy()\nraw.load_data()\nica.apply(raw)\n\n# show some frontal channels to clearly illustrate the artifact removal\nchs = ['MEG 0111', 'MEG 0121', 'MEG 0131', 'MEG 0211', 'MEG 0221', 'MEG 0231',\n 'MEG 0311', 'MEG 0321', 'MEG 0331', 'MEG 1511', 'MEG 1521', 'MEG 1531',\n 'EEG 001', 'EEG 002', 'EEG 003', 'EEG 004', 'EEG 005', 'EEG 006',\n 'EEG 007', 'EEG 008']\nchan_idxs = [raw.ch_names.index(ch) for ch in chs]\norig_raw.plot(order=chan_idxs, start=12, duration=4)\nraw.plot(order=chan_idxs, start=12, duration=4)", "Detecting experimental events\n^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\nThe sample dataset includes several :term:\"STIM\" channels &lt;stim channel&gt;\nthat recorded electrical\nsignals sent from the stimulus delivery computer (as brief DC shifts /\nsquarewave pulses). These pulses (often called \"triggers\") are used in this\ndataset to mark experimental events: stimulus onset, stimulus type, and\nparticipant response (button press). The individual STIM channels are\ncombined onto a single channel, in such a way that voltage\nlevels on that channel can be unambiguously decoded as a particular event\ntype. On older Neuromag systems (such as that used to record the sample data)\nthis summation channel was called STI 014, so we can pass that channel\nname to the :func:mne.find_events function to recover the timing and\nidentity of the stimulus events.", "events = mne.find_events(raw, stim_channel='STI 014')\nprint(events[:5]) # show the first 5", "The resulting events array is an ordinary 3-column :class:NumPy array\n&lt;numpy.ndarray&gt;, with sample number in the first column and integer event ID\nin the last column; the middle column is usually ignored. Rather than keeping\ntrack of integer event IDs, we can provide an event dictionary that maps\nthe integer IDs to experimental conditions or events. In this dataset, the\nmapping looks like this:\n+----------+----------------------------------------------------------+\n| Event ID | Condition |\n+==========+==========================================================+\n| 1 | auditory stimulus (tone) to the left ear |\n+----------+----------------------------------------------------------+\n| 2 | auditory stimulus (tone) to the right ear |\n+----------+----------------------------------------------------------+\n| 3 | visual stimulus (checkerboard) to the left visual field |\n+----------+----------------------------------------------------------+\n| 4 | visual stimulus (checkerboard) to the right visual field |\n+----------+----------------------------------------------------------+\n| 5 | smiley face (catch trial) |\n+----------+----------------------------------------------------------+\n| 32 | subject button press |\n+----------+----------------------------------------------------------+", "event_dict = {'auditory/left': 1, 'auditory/right': 2, 'visual/left': 3,\n 'visual/right': 4, 'smiley': 5, 'buttonpress': 32}", "Event dictionaries like this one are used when extracting epochs from\ncontinuous data; the / character in the dictionary keys allows pooling\nacross conditions by requesting partial condition descriptors (i.e.,\nrequesting 'auditory' will select all epochs with Event IDs 1 and 2;\nrequesting 'left' will select all epochs with Event IDs 1 and 3). An\nexample of this is shown in the next section. There is also a convenient\n:func:~mne.viz.plot_events function for visualizing the distribution of\nevents across the duration of the recording (to make sure event detection\nworked as expected). Here we'll also make use of the :class:~mne.Info\nattribute to get the sampling frequency of the recording (so our x-axis will\nbe in seconds instead of in samples).", "fig = mne.viz.plot_events(events, event_id=event_dict, sfreq=raw.info['sfreq'])\nfig.subplots_adjust(right=0.7) # make room for the legend", "For paradigms that are not event-related (e.g., analysis of resting-state\ndata), you can extract regularly spaced (possibly overlapping) spans of data\nby creating events using :func:mne.make_fixed_length_events and then\nproceeding with epoching as described in the next section.\nEpoching continuous data\n^^^^^^^^^^^^^^^^^^^^^^^^\nThe :class:~mne.io.Raw object and the events array are the bare minimum\nneeded to create an :class:~mne.Epochs object, which we create with the\n:class:mne.Epochs class constructor. Here we'll also specify some data\nquality constraints: we'll reject any epoch where peak-to-peak signal\namplitude is beyond reasonable limits for that channel type. This is done\nwith a rejection dictionary; you may include or omit thresholds for any of\nthe channel types present in your data. The values given here are reasonable\nfor this particular dataset, but may need to be adapted for different\nhardware or recording conditions. For a more automated approach, consider\nusing the autoreject package_.", "reject_criteria = dict(mag=4000e-15, # 4000 fT\n grad=4000e-13, # 4000 fT/cm\n eeg=150e-6, # 150 μV\n eog=250e-6) # 250 μV", "We'll also pass the event dictionary as the event_id parameter (so we can\nwork with easy-to-pool event labels instead of the integer event IDs), and\nspecify tmin and tmax (the time relative to each event at which to\nstart and end each epoch). As mentioned above, by default\n:class:~mne.io.Raw and :class:~mne.Epochs data aren't loaded into memory\n(they're accessed from disk only when needed), but here we'll force loading\ninto memory using the preload=True parameter so that we can see the\nresults of the rejection criteria being applied:", "epochs = mne.Epochs(raw, events, event_id=event_dict, tmin=-0.2, tmax=0.5,\n reject=reject_criteria, preload=True)", "Next we'll pool across left/right stimulus presentations so we can compare\nauditory versus visual responses. To avoid biasing our signals to the\nleft or right, we'll use :meth:~mne.Epochs.equalize_event_counts first to\nrandomly sample epochs from each condition to match the number of epochs\npresent in the condition with the fewest good epochs.", "conds_we_care_about = ['auditory/left', 'auditory/right',\n 'visual/left', 'visual/right']\nepochs.equalize_event_counts(conds_we_care_about) # this operates in-place\naud_epochs = epochs['auditory']\nvis_epochs = epochs['visual']\ndel raw, epochs # free up memory", "Like :class:~mne.io.Raw objects, :class:~mne.Epochs objects also have a\nnumber of built-in plotting methods. One is :meth:~mne.Epochs.plot_image,\nwhich shows each epoch as one row of an image map, with color representing\nsignal magnitude; the average evoked response and the sensor location are\nshown below the image:", "aud_epochs.plot_image(picks=['MEG 1332', 'EEG 021'])", "<div class=\"alert alert-info\"><h4>Note</h4><p>Both :class:`~mne.io.Raw` and :class:`~mne.Epochs` objects have\n :meth:`~mne.Epochs.get_data` methods that return the underlying data\n as a :class:`NumPy array <numpy.ndarray>`. Both methods have a ``picks``\n parameter for subselecting which channel(s) to return; ``raw.get_data()``\n has additional parameters for restricting the time domain. The resulting\n matrices have dimension ``(n_channels, n_times)`` for\n :class:`~mne.io.Raw` and ``(n_epochs, n_channels, n_times)`` for\n :class:`~mne.Epochs`.</p></div>\n\nTime-frequency analysis\n^^^^^^^^^^^^^^^^^^^^^^^\nThe :mod:mne.time_frequency submodule provides implementations of several\nalgorithms to compute time-frequency representations, power spectral density,\nand cross-spectral density. Here, for example, we'll compute for the auditory\nepochs the induced power at different frequencies and times, using Morlet\nwavelets. On this dataset the result is not especially informative (it just\nshows the evoked \"auditory N100\" response); see here\n&lt;inter-trial-coherence&gt; for a more extended example on a dataset with richer\nfrequency content.", "frequencies = np.arange(7, 30, 3)\npower = mne.time_frequency.tfr_morlet(aud_epochs, n_cycles=2, return_itc=False,\n freqs=frequencies, decim=3)\npower.plot(['MEG 1332'])", "Estimating evoked responses\n^^^^^^^^^^^^^^^^^^^^^^^^^^^\nNow that we have our conditions in aud_epochs and vis_epochs, we can\nget an estimate of evoked responses to auditory versus visual stimuli by\naveraging together the epochs in each condition. This is as simple as calling\nthe :meth:~mne.Epochs.average method on the :class:~mne.Epochs object,\nand then using a function from the :mod:mne.viz module to compare the\nglobal field power for each sensor type of the two :class:~mne.Evoked\nobjects:", "aud_evoked = aud_epochs.average()\nvis_evoked = vis_epochs.average()\n\nmne.viz.plot_compare_evokeds(dict(auditory=aud_evoked, visual=vis_evoked),\n show_legend='upper left',\n show_sensors='upper right')", "We can also get a more detailed view of each :class:~mne.Evoked object\nusing other plotting methods such as :meth:~mne.Evoked.plot_joint or\n:meth:~mne.Evoked.plot_topomap. Here we'll examine just the EEG channels,\nand see the classic auditory evoked N100-P200 pattern over dorso-frontal\nelectrodes, then plot scalp topographies at some additional arbitrary times:", "# sphinx_gallery_thumbnail_number = 13\naud_evoked.plot_joint(picks='eeg')\naud_evoked.plot_topomap(times=[0., 0.08, 0.1, 0.12, 0.2], ch_type='eeg')", "Evoked objects can also be combined to show contrasts between conditions,\nusing the :func:mne.combine_evoked function. A simple difference can be\ngenerated by negating one of the :class:~mne.Evoked objects passed into the\nfunction. We'll then plot the difference wave at each sensor using\n:meth:~mne.Evoked.plot_topo:", "evoked_diff = mne.combine_evoked([aud_evoked, -vis_evoked], weights='equal')\nevoked_diff.pick_types('mag').plot_topo(color='r', legend=False)", "Inverse modeling\n^^^^^^^^^^^^^^^^\nFinally, we can estimate the origins of the evoked activity by projecting the\nsensor data into this subject's :term:source space (a set of points either\non the cortical surface or within the cortical volume of that subject, as\nestimated by structural MRI scans). MNE-Python supports lots of ways of doing\nthis (dynamic statistical parametric mapping, dipole fitting, beamformers,\netc.); here we'll use minimum-norm estimation (MNE) to generate a continuous\nmap of activation constrained to the cortical surface. MNE uses a linear\n:term:inverse operator to project EEG+MEG sensor measurements into the\nsource space. The inverse operator is computed from the\n:term:forward solution for this subject and an estimate of the\ncovariance of sensor measurements &lt;tut_compute_covariance&gt;. For this\ntutorial we'll skip those computational steps and load a pre-computed inverse\noperator from disk (it's included with the sample data\n&lt;sample-dataset&gt;). Because this \"inverse problem\" is underdetermined (there\nis no unique solution), here we further constrain the solution by providing a\nregularization parameter specifying the relative smoothness of the current\nestimates in terms of a signal-to-noise ratio (where \"noise\" here is akin to\nbaseline activity level across all of cortex).", "# load inverse operator\ninverse_operator_file = os.path.join(sample_data_folder, 'MEG', 'sample',\n 'sample_audvis-meg-oct-6-meg-inv.fif')\ninv_operator = mne.minimum_norm.read_inverse_operator(inverse_operator_file)\n# set signal-to-noise ratio (SNR) to compute regularization parameter (λ²)\nsnr = 3.\nlambda2 = 1. / snr ** 2\n# generate the source time course (STC)\nstc = mne.minimum_norm.apply_inverse(vis_evoked, inv_operator,\n lambda2=lambda2,\n method='MNE') # or dSPM, sLORETA, eLORETA", "Finally, in order to plot the source estimate on the subject's cortical\nsurface we'll also need the path to the sample subject's structural MRI files\n(the subjects_dir):", "# path to subjects' MRI files\nsubjects_dir = os.path.join(sample_data_folder, 'subjects')\n# plot\nstc.plot(initial_time=0.1, hemi='split', views=['lat', 'med'],\n subjects_dir=subjects_dir)", "The remaining tutorials have much more detail on each of these topics (as\nwell as many other capabilities of MNE-Python not mentioned here:\nconnectivity analysis, encoding/decoding models, lots more visualization\noptions, etc). Read on to learn more!\n.. LINKS" ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
VandyAstroML/Vanderbilt_Computational_Bootcamp
notebooks/Week_05/05_Numpy_Matplotlib.ipynb
mit
[ "Week 5 - Numpy & Matplotlib\nToday's Agenda\n\nNumpy\nMatplotlib\n\nNumpy - Numerical Python\nFrom their website (http://www.numpy.org/):\n\nNumPy is the fundamental package for scientific computing with Python. \n* a powerful N-dimensional array object\n* sophisticated (broadcasting) functions\n* tools for integrating C/C++ and Fortran code\n* useful linear algebra, Fourier transform, and random number capabilities\n\nYou can import \"numpy\" as", "import numpy as np", "Numpy arrays\nIn standard Python, data is stored as lists, and multidimensional data as lists of lists. In numpy, however, we can now work with arrays. To get these arrays, we can use np.asarray to convert a list into an array. Below we take a quick look at how a list behaves differently from an array.", "# We first create an array `x`\nstart = 1\nstop = 11\nstep = 1\n\nx = np.arange(start, stop, step)\n\nprint(x)", "We can also manipulate the array. For example, we can:\n\nMultiply by two:", "x * 2", "Take the square of all the values in the array:", "x ** 2", "Or even do some math on it:", "(x**2) + (5*x) + (x / 3)", "If we want to set up an array in numpy, we can use range to make a list and then convert it to an array, but we can also just create an array directly in numpy. np.arange will do this with integers, and np.linspace will do this with floats, and allows for non-integer steps.", "print(np.arange(10))\n\nprint(np.linspace(1,10,10))", "Last week we had to use a function or a loop to carry out math on a list. However with numpy we can do this a lot simpler by making sure we're working with an array, and carrying out the mathematical operations on that array.", "x=np.arange(10)\nprint(x)\n\nprint(x**2)", "In numpy, we also have more options for quickly (and without much code) examining the contents of an array. One of the most helpful tools for this is np.where. np.where uses a conditional statement on the array and returns an array that contains indices of all the values that were true for the conditional statement. We can then call the original array and use the new array to get all the values that were true for the conditional statement.\nThere are also functions like max and min that will give the maximum and minimum, respectively.", "# Defining starting and ending values of the array, as well as the number of elements in the array.\nstart = 0\nstop = 100\nn_elements = 201\n\nx = np.linspace(start, stop, n_elements)\n\nprint(x)", "And we can select only those values that are divisible by 5:", "# This function returns the indices that match the criteria of `x % 5 == 0`:\nx_5 = np.where(x%5 == 0)\n\nprint(x_5)\n\n# And one can use those indices to *only* select those values:\nprint(x[x_5])", "Or similarly:", "x[x%5 == 0]", "And you can find the max and min values of the array:", "print('The minimum of `x` is `{0}`'.format(x.min()))\n\nprint('The maximum of `x` is `{0}`'.format(x.max()))", "Numpy also provides some tools for loading and saving data, loadtxt and savetxt. Here I'm using a function called transpose so that instead of each array being a row, they each get treated as a column.\nWhen we load the information again, it's now a 2D array. We can select parts of those arrays just as we could for 1D arrays.", "start = 0\nstop = 100\nn_elem = 501\n\nx = np.linspace(start, stop, n_elem)\n\n# We can now create another array from `x`:\ny = (.1*x)**2 - (5*x) + 3\n\n# And finally, we can dump `x` and `y` to a file:\nnp.savetxt('myfile.txt', np.transpose([x,y]))\n\n# We can also load the data from `myfile.txt` and display it:\ndata = np.loadtxt('myfile.txt')\nprint('2D-array from file `myfile.txt`:\\n\\n', data, '\\n')\n\n# You can also select certain elements of the 2D-array\nprint('Selecting certain elements from `data`:\\n\\n', data[:3,:], '\\n')", "Resources\n\nScientific Lectures on Python - Numpy: iPython Notebook\nData Science iPython Notebooks - Numpy: iPython Notebook\n\nMatplotlib\nMatplotlib is a Python 2D plotting library which\n* produces publication quality figures in a variety of hardcopy formats and interactive environments across platforms\n* Quick way to visualize data from Python\n* Main plotting utility in Python\nFrom their website (http://matplotlib.org/):\n\nMatplotlib is a Python 2D plotting library which produces publication quality figures in a variety of hardcopy formats and interactive environments across platforms. Matplotlib can be used in Python scripts, the Python and IPython shell, the jupyter notebook, web application servers, and four graphical user interface toolkits.\n\nA great starting point to figuring out how to make a particular figure is to start from the Matplotlib gallery and look for what you want to make.", "## Importing modules\n%matplotlib inline\n\n# Importing LaTeX\nfrom matplotlib import rc\nrc('text', usetex=True)\n\n# Importing matplotlib and other modules\nimport matplotlib.pyplot as plt\nimport numpy as np", "We can now load in the data from myfile.txt", "data = np.loadtxt('myfile.txt')", "The simplest figure is to simply make a plot. We can have multiple figures, but for now, just one. The plt.plot function will connect the points, but if we want a scatter plot, then plt.scatter will work.", "plt.figure(1, figsize=(8,8))\nplt.plot(data[:,0],data[:,1])\nplt.show()", "You can also pass the *data.T value instead:", "plt.figure(1, figsize=(8,8))\nplt.plot(*data.T)\nplt.show()", "We can take that same figure and add on the needed labels and titles.", "# Creating figure\nplt.figure(figsize=(8,8))\nplt.plot(*data.T)\nplt.title(r'$y = 0.2x^{2} - 5x + 3$', fontsize=20)\nplt.xlabel('x value', fontsize=20)\nplt.ylabel('y value', fontsize=20)\nplt.show()", "There's a large number of options available for plotting, so try using the initial code below, combined with the information here to try out a few of the following things: changing the line width, changing the line color", "plt.figure(figsize=(8,8))\nplt.plot(data[:,0],data[:,1])\nplt.title(r'$y = 0.2x^{2} - 5x + 3$', fontsize=20)\nplt.xlabel('x value', fontsize=20)\nplt.ylabel('y value', fontsize=20)\nplt.show()" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
google/earthengine-api
python/examples/ipynb/UNET_regression_demo.ipynb
apache-2.0
[ "#@title Copyright 2020 Google LLC. { display-mode: \"form\" }\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.", "<table class=\"ee-notebook-buttons\" align=\"left\"><td>\n<a target=\"_blank\" href=\"http://colab.research.google.com/github/google/earthengine-api/blob/master/python/examples/ipynb/UNET_regression_demo.ipynb\">\n <img src=\"https://www.tensorflow.org/images/colab_logo_32px.png\" /> Run in Google Colab</a>\n</td><td>\n<a target=\"_blank\" href=\"https://github.com/google/earthengine-api/blob/master/python/examples/ipynb/UNET_regression_demo.ipynb\"><img width=32px src=\"https://www.tensorflow.org/images/GitHub-Mark-32px.png\" /> View source on GitHub</a></td></table>\n\nIntroduction\nThis is an Earth Engine <> TensorFlow demonstration notebook. Suppose you want to predict a continuous output (regression) from a stack of continuous inputs. In this example, the output is impervious surface area from NLCD and the input is a Landsat 8 composite. The model is a fully convolutional neural network (FCNN), specifically U-net. This notebook shows:\n\nExporting training/testing patches from Earth Engine, suitable for training an FCNN model.\nPreprocessing.\nTraining and validating an FCNN model.\nMaking predictions with the trained model and importing them to Earth Engine.\n\nSetup software libraries\nAuthenticate and import as necessary.", "# Cloud authentication.\nfrom google.colab import auth\nauth.authenticate_user()\n\n# Import, authenticate and initialize the Earth Engine library.\nimport ee\nee.Authenticate()\nee.Initialize()\n\n# Tensorflow setup.\nimport tensorflow as tf\nprint(tf.__version__)\n\n# Folium setup.\nimport folium\nprint(folium.__version__)", "Variables\nDeclare the variables that will be in use throughout the notebook.\nSpecify your Cloud Storage Bucket\nYou must have write access to a bucket to run this demo! To run it read-only, use the demo bucket below, but note that writes to this bucket will not work.", "# INSERT YOUR BUCKET HERE:\nBUCKET = 'your-bucket-name'", "Set other global variables", "# Specify names locations for outputs in Cloud Storage. \nFOLDER = 'fcnn-demo'\nTRAINING_BASE = 'training_patches'\nEVAL_BASE = 'eval_patches'\n\n# Specify inputs (Landsat bands) to the model and the response variable.\nopticalBands = ['B1', 'B2', 'B3', 'B4', 'B5', 'B6', 'B7']\nthermalBands = ['B10', 'B11']\nBANDS = opticalBands + thermalBands\nRESPONSE = 'impervious'\nFEATURES = BANDS + [RESPONSE]\n\n# Specify the size and shape of patches expected by the model.\nKERNEL_SIZE = 256\nKERNEL_SHAPE = [KERNEL_SIZE, KERNEL_SIZE]\nCOLUMNS = [\n tf.io.FixedLenFeature(shape=KERNEL_SHAPE, dtype=tf.float32) for k in FEATURES\n]\nFEATURES_DICT = dict(zip(FEATURES, COLUMNS))\n\n# Sizes of the training and evaluation datasets.\nTRAIN_SIZE = 16000\nEVAL_SIZE = 8000\n\n# Specify model training parameters.\nBATCH_SIZE = 16\nEPOCHS = 10\nBUFFER_SIZE = 2000\nOPTIMIZER = 'SGD'\nLOSS = 'MeanSquaredError'\nMETRICS = ['RootMeanSquaredError']", "Imagery\nGather and setup the imagery to use for inputs (predictors). This is a three-year, cloud-free, Landsat 8 composite. Display it in the notebook for a sanity check.", "# Use Landsat 8 surface reflectance data.\nl8sr = ee.ImageCollection('LANDSAT/LC08/C01/T1_SR')\n\n# Cloud masking function.\ndef maskL8sr(image):\n cloudShadowBitMask = ee.Number(2).pow(3).int()\n cloudsBitMask = ee.Number(2).pow(5).int()\n qa = image.select('pixel_qa')\n mask1 = qa.bitwiseAnd(cloudShadowBitMask).eq(0).And(\n qa.bitwiseAnd(cloudsBitMask).eq(0))\n mask2 = image.mask().reduce('min')\n mask3 = image.select(opticalBands).gt(0).And(\n image.select(opticalBands).lt(10000)).reduce('min')\n mask = mask1.And(mask2).And(mask3)\n return image.select(opticalBands).divide(10000).addBands(\n image.select(thermalBands).divide(10).clamp(273.15, 373.15)\n .subtract(273.15).divide(100)).updateMask(mask)\n\n# The image input data is a cloud-masked median composite.\nimage = l8sr.filterDate('2015-01-01', '2017-12-31').map(maskL8sr).median()\n\n# Use folium to visualize the imagery.\nmapid = image.getMapId({'bands': ['B4', 'B3', 'B2'], 'min': 0, 'max': 0.3})\nmap = folium.Map(location=[38., -122.5])\nfolium.TileLayer(\n tiles=mapid['tile_fetcher'].url_format,\n attr='Map Data &copy; <a href=\"https://earthengine.google.com/\">Google Earth Engine</a>',\n overlay=True,\n name='median composite',\n ).add_to(map)\n\nmapid = image.getMapId({'bands': ['B10'], 'min': 0, 'max': 0.5})\nfolium.TileLayer(\n tiles=mapid['tile_fetcher'].url_format,\n attr='Map Data &copy; <a href=\"https://earthengine.google.com/\">Google Earth Engine</a>',\n overlay=True,\n name='thermal',\n ).add_to(map)\nmap.add_child(folium.LayerControl())\nmap", "Prepare the response (what we want to predict). This is impervious surface area (in fraction of a pixel) from the 2016 NLCD dataset. Display to check.", "nlcd = ee.Image('USGS/NLCD/NLCD2016').select('impervious')\nnlcd = nlcd.divide(100).float()\n\nmapid = nlcd.getMapId({'min': 0, 'max': 1})\nmap = folium.Map(location=[38., -122.5])\nfolium.TileLayer(\n tiles=mapid['tile_fetcher'].url_format,\n attr='Map Data &copy; <a href=\"https://earthengine.google.com/\">Google Earth Engine</a>',\n overlay=True,\n name='nlcd impervious',\n ).add_to(map)\nmap.add_child(folium.LayerControl())\nmap", "Stack the 2D images (Landsat composite and NLCD impervious surface) to create a single image from which samples can be taken. Convert the image into an array image in which each pixel stores 256x256 patches of pixels for each band. This is a key step that bears emphasis: to export training patches, convert a multi-band image to an array image using neighborhoodToArray(), then sample the image at points.", "featureStack = ee.Image.cat([\n image.select(BANDS),\n nlcd.select(RESPONSE)\n]).float()\n\nlist = ee.List.repeat(1, KERNEL_SIZE)\nlists = ee.List.repeat(list, KERNEL_SIZE)\nkernel = ee.Kernel.fixed(KERNEL_SIZE, KERNEL_SIZE, lists)\n\narrays = featureStack.neighborhoodToArray(kernel)", "Use some pre-made geometries to sample the stack in strategic locations. Specifically, these are hand-made polygons in which to take the 256x256 samples. Display the sampling polygons on a map, red for training polygons, blue for evaluation.", "trainingPolys = ee.FeatureCollection('projects/google/DemoTrainingGeometries')\nevalPolys = ee.FeatureCollection('projects/google/DemoEvalGeometries')\n\npolyImage = ee.Image(0).byte().paint(trainingPolys, 1).paint(evalPolys, 2)\npolyImage = polyImage.updateMask(polyImage)\n\nmapid = polyImage.getMapId({'min': 1, 'max': 2, 'palette': ['red', 'blue']})\nmap = folium.Map(location=[38., -100.], zoom_start=5)\nfolium.TileLayer(\n tiles=mapid['tile_fetcher'].url_format,\n attr='Map Data &copy; <a href=\"https://earthengine.google.com/\">Google Earth Engine</a>',\n overlay=True,\n name='training polygons',\n ).add_to(map)\nmap.add_child(folium.LayerControl())\nmap", "Sampling\nThe mapped data look reasonable so take a sample from each polygon and merge the results into a single export. The key step is sampling the array image at points, to get all the pixels in a 256x256 neighborhood at each point. It's worth noting that to build the training and testing data for the FCNN, you export a single TFRecord file that contains patches of pixel values in each record. You do NOT need to export each training/testing patch to a different image. Since each record potentially contains a lot of data (especially with big patches or many input bands), some manual sharding of the computation is necessary to avoid the computed value too large error. Specifically, the following code takes multiple (smaller) samples within each geometry, merging the results to get a single export.", "# Convert the feature collections to lists for iteration.\ntrainingPolysList = trainingPolys.toList(trainingPolys.size())\nevalPolysList = evalPolys.toList(evalPolys.size())\n\n# These numbers determined experimentally.\nn = 200 # Number of shards in each polygon.\nN = 2000 # Total sample size in each polygon.\n\n# Export all the training data (in many pieces), with one task \n# per geometry.\nfor g in range(trainingPolys.size().getInfo()):\n geomSample = ee.FeatureCollection([])\n for i in range(n):\n sample = arrays.sample(\n region = ee.Feature(trainingPolysList.get(g)).geometry(), \n scale = 30,\n numPixels = N / n, # Size of the shard.\n seed = i,\n tileScale = 8\n )\n geomSample = geomSample.merge(sample)\n\n desc = TRAINING_BASE + '_g' + str(g)\n task = ee.batch.Export.table.toCloudStorage(\n collection = geomSample,\n description = desc,\n bucket = BUCKET,\n fileNamePrefix = FOLDER + '/' + desc,\n fileFormat = 'TFRecord',\n selectors = BANDS + [RESPONSE]\n )\n task.start()\n\n# Export all the evaluation data.\nfor g in range(evalPolys.size().getInfo()):\n geomSample = ee.FeatureCollection([])\n for i in range(n):\n sample = arrays.sample(\n region = ee.Feature(evalPolysList.get(g)).geometry(), \n scale = 30,\n numPixels = N / n,\n seed = i,\n tileScale = 8\n )\n geomSample = geomSample.merge(sample)\n\n desc = EVAL_BASE + '_g' + str(g)\n task = ee.batch.Export.table.toCloudStorage(\n collection = geomSample,\n description = desc,\n bucket = BUCKET,\n fileNamePrefix = FOLDER + '/' + desc,\n fileFormat = 'TFRecord',\n selectors = BANDS + [RESPONSE]\n )\n task.start()", "Training data\nLoad the data exported from Earth Engine into a tf.data.Dataset. The following are helper functions for that.", "def parse_tfrecord(example_proto):\n \"\"\"The parsing function.\n Read a serialized example into the structure defined by FEATURES_DICT.\n Args:\n example_proto: a serialized Example.\n Returns:\n A dictionary of tensors, keyed by feature name.\n \"\"\"\n return tf.io.parse_single_example(example_proto, FEATURES_DICT)\n\n\ndef to_tuple(inputs):\n \"\"\"Function to convert a dictionary of tensors to a tuple of (inputs, outputs).\n Turn the tensors returned by parse_tfrecord into a stack in HWC shape.\n Args:\n inputs: A dictionary of tensors, keyed by feature name.\n Returns:\n A tuple of (inputs, outputs).\n \"\"\"\n inputsList = [inputs.get(key) for key in FEATURES]\n stacked = tf.stack(inputsList, axis=0)\n # Convert from CHW to HWC\n stacked = tf.transpose(stacked, [1, 2, 0])\n return stacked[:,:,:len(BANDS)], stacked[:,:,len(BANDS):]\n\n\ndef get_dataset(pattern):\n \"\"\"Function to read, parse and format to tuple a set of input tfrecord files.\n Get all the files matching the pattern, parse and convert to tuple.\n Args:\n pattern: A file pattern to match in a Cloud Storage bucket.\n Returns:\n A tf.data.Dataset\n \"\"\"\n glob = tf.io.gfile.glob(pattern)\n dataset = tf.data.TFRecordDataset(glob, compression_type='GZIP')\n dataset = dataset.map(parse_tfrecord, num_parallel_calls=5)\n dataset = dataset.map(to_tuple, num_parallel_calls=5)\n return dataset", "Use the helpers to read in the training dataset. Print the first record to check.", "def get_training_dataset():\n\t\"\"\"Get the preprocessed training dataset\n Returns: \n A tf.data.Dataset of training data.\n \"\"\"\n\tglob = 'gs://' + BUCKET + '/' + FOLDER + '/' + TRAINING_BASE + '*'\n\tdataset = get_dataset(glob)\n\tdataset = dataset.shuffle(BUFFER_SIZE).batch(BATCH_SIZE).repeat()\n\treturn dataset\n\ntraining = get_training_dataset()\n\nprint(iter(training.take(1)).next())", "Evaluation data\nNow do the same thing to get an evaluation dataset. Note that unlike the training dataset, the evaluation dataset has a batch size of 1, is not repeated and is not shuffled.", "def get_eval_dataset():\n\t\"\"\"Get the preprocessed evaluation dataset\n Returns: \n A tf.data.Dataset of evaluation data.\n \"\"\"\n\tglob = 'gs://' + BUCKET + '/' + FOLDER + '/' + EVAL_BASE + '*'\n\tdataset = get_dataset(glob)\n\tdataset = dataset.batch(1).repeat()\n\treturn dataset\n\nevaluation = get_eval_dataset()", "Model\nHere we use the Keras implementation of the U-Net model. The U-Net model takes 256x256 pixel patches as input and outputs per-pixel class probability, label or a continuous output. We can implement the model essentially unmodified, but will use mean squared error loss on the sigmoidal output since we are treating this as a regression problem, rather than a classification problem. Since impervious surface fraction is constrained to [0,1], with many values close to zero or one, a saturating activation function is suitable here.", "from tensorflow.python.keras import layers\nfrom tensorflow.python.keras import losses\nfrom tensorflow.python.keras import models\nfrom tensorflow.python.keras import metrics\nfrom tensorflow.python.keras import optimizers\n\ndef conv_block(input_tensor, num_filters):\n\tencoder = layers.Conv2D(num_filters, (3, 3), padding='same')(input_tensor)\n\tencoder = layers.BatchNormalization()(encoder)\n\tencoder = layers.Activation('relu')(encoder)\n\tencoder = layers.Conv2D(num_filters, (3, 3), padding='same')(encoder)\n\tencoder = layers.BatchNormalization()(encoder)\n\tencoder = layers.Activation('relu')(encoder)\n\treturn encoder\n\ndef encoder_block(input_tensor, num_filters):\n\tencoder = conv_block(input_tensor, num_filters)\n\tencoder_pool = layers.MaxPooling2D((2, 2), strides=(2, 2))(encoder)\n\treturn encoder_pool, encoder\n\ndef decoder_block(input_tensor, concat_tensor, num_filters):\n\tdecoder = layers.Conv2DTranspose(num_filters, (2, 2), strides=(2, 2), padding='same')(input_tensor)\n\tdecoder = layers.concatenate([concat_tensor, decoder], axis=-1)\n\tdecoder = layers.BatchNormalization()(decoder)\n\tdecoder = layers.Activation('relu')(decoder)\n\tdecoder = layers.Conv2D(num_filters, (3, 3), padding='same')(decoder)\n\tdecoder = layers.BatchNormalization()(decoder)\n\tdecoder = layers.Activation('relu')(decoder)\n\tdecoder = layers.Conv2D(num_filters, (3, 3), padding='same')(decoder)\n\tdecoder = layers.BatchNormalization()(decoder)\n\tdecoder = layers.Activation('relu')(decoder)\n\treturn decoder\n\ndef get_model():\n\tinputs = layers.Input(shape=[None, None, len(BANDS)]) # 256\n\tencoder0_pool, encoder0 = encoder_block(inputs, 32) # 128\n\tencoder1_pool, encoder1 = encoder_block(encoder0_pool, 64) # 64\n\tencoder2_pool, encoder2 = encoder_block(encoder1_pool, 128) # 32\n\tencoder3_pool, encoder3 = encoder_block(encoder2_pool, 256) # 16\n\tencoder4_pool, encoder4 = encoder_block(encoder3_pool, 512) # 8\n\tcenter = conv_block(encoder4_pool, 1024) # center\n\tdecoder4 = decoder_block(center, encoder4, 512) # 16\n\tdecoder3 = decoder_block(decoder4, encoder3, 256) # 32\n\tdecoder2 = decoder_block(decoder3, encoder2, 128) # 64\n\tdecoder1 = decoder_block(decoder2, encoder1, 64) # 128\n\tdecoder0 = decoder_block(decoder1, encoder0, 32) # 256\n\toutputs = layers.Conv2D(1, (1, 1), activation='sigmoid')(decoder0)\n\n\tmodel = models.Model(inputs=[inputs], outputs=[outputs])\n\n\tmodel.compile(\n\t\toptimizer=optimizers.get(OPTIMIZER), \n\t\tloss=losses.get(LOSS),\n\t\tmetrics=[metrics.get(metric) for metric in METRICS])\n\n\treturn model", "Training the model\nYou train a Keras model by calling .fit() on it. Here we're going to train for 10 epochs, which is suitable for demonstration purposes. For production use, you probably want to optimize this parameter, for example through hyperparamter tuning.", "m = get_model()\n\nm.fit(\n x=training, \n epochs=EPOCHS, \n steps_per_epoch=int(TRAIN_SIZE / BATCH_SIZE), \n validation_data=evaluation,\n validation_steps=EVAL_SIZE)", "Note that the notebook VM is sometimes not heavy-duty enough to get through a whole training job, especially if you have a large buffer size or a large number of epochs. You can still use this notebook for training, but may need to set up an alternative VM (learn more) for production use. Alternatively, you can package your code for running large training jobs on Google's AI Platform as described here. The following code loads a pre-trained model, which you can use for predictions right away.", "# Load a trained model. 50 epochs. 25 hours. Final RMSE ~0.08.\nMODEL_DIR = 'gs://ee-docs-demos/fcnn-demo/trainer/model'\nm = tf.keras.models.load_model(MODEL_DIR)\nm.summary()", "Prediction\nThe prediction pipeline is:\n\nExport imagery on which to do predictions from Earth Engine in TFRecord format to a Cloud Storge bucket.\nUse the trained model to make the predictions.\nWrite the predictions to a TFRecord file in a Cloud Storage.\nUpload the predictions TFRecord file to Earth Engine.\n\nThe following functions handle this process. It's useful to separate the export from the predictions so that you can experiment with different models without running the export every time.", "def doExport(out_image_base, kernel_buffer, region):\n \"\"\"Run the image export task. Block until complete.\n \"\"\"\n task = ee.batch.Export.image.toCloudStorage(\n image = image.select(BANDS),\n description = out_image_base,\n bucket = BUCKET,\n fileNamePrefix = FOLDER + '/' + out_image_base,\n region = region.getInfo()['coordinates'],\n scale = 30,\n fileFormat = 'TFRecord',\n maxPixels = 1e10,\n formatOptions = {\n 'patchDimensions': KERNEL_SHAPE,\n 'kernelSize': kernel_buffer,\n 'compressed': True,\n 'maxFileSize': 104857600\n }\n )\n task.start()\n\n # Block until the task completes.\n print('Running image export to Cloud Storage...')\n import time\n while task.active():\n time.sleep(30)\n\n # Error condition\n if task.status()['state'] != 'COMPLETED':\n print('Error with image export.')\n else:\n print('Image export completed.')\n\ndef doPrediction(out_image_base, user_folder, kernel_buffer, region):\n \"\"\"Perform inference on exported imagery, upload to Earth Engine.\n \"\"\"\n\n print('Looking for TFRecord files...')\n\n # Get a list of all the files in the output bucket.\n filesList = !gsutil ls 'gs://'{BUCKET}'/'{FOLDER}\n\n # Get only the files generated by the image export.\n exportFilesList = [s for s in filesList if out_image_base in s]\n\n # Get the list of image files and the JSON mixer file.\n imageFilesList = []\n jsonFile = None\n for f in exportFilesList:\n if f.endswith('.tfrecord.gz'):\n imageFilesList.append(f)\n elif f.endswith('.json'):\n jsonFile = f\n\n # Make sure the files are in the right order.\n imageFilesList.sort()\n\n from pprint import pprint\n pprint(imageFilesList)\n print(jsonFile)\n\n import json\n # Load the contents of the mixer file to a JSON object.\n jsonText = !gsutil cat {jsonFile}\n # Get a single string w/ newlines from the IPython.utils.text.SList\n mixer = json.loads(jsonText.nlstr)\n pprint(mixer)\n patches = mixer['totalPatches']\n\n # Get set up for prediction.\n x_buffer = int(kernel_buffer[0] / 2)\n y_buffer = int(kernel_buffer[1] / 2)\n\n buffered_shape = [\n KERNEL_SHAPE[0] + kernel_buffer[0],\n KERNEL_SHAPE[1] + kernel_buffer[1]]\n\n imageColumns = [\n tf.io.FixedLenFeature(shape=buffered_shape, dtype=tf.float32) \n for k in BANDS\n ]\n\n imageFeaturesDict = dict(zip(BANDS, imageColumns))\n\n def parse_image(example_proto):\n return tf.io.parse_single_example(example_proto, imageFeaturesDict)\n\n def toTupleImage(inputs):\n inputsList = [inputs.get(key) for key in BANDS]\n stacked = tf.stack(inputsList, axis=0)\n stacked = tf.transpose(stacked, [1, 2, 0])\n return stacked\n\n # Create a dataset from the TFRecord file(s) in Cloud Storage.\n imageDataset = tf.data.TFRecordDataset(imageFilesList, compression_type='GZIP')\n imageDataset = imageDataset.map(parse_image, num_parallel_calls=5)\n imageDataset = imageDataset.map(toTupleImage).batch(1)\n\n # Perform inference.\n print('Running predictions...')\n predictions = m.predict(imageDataset, steps=patches, verbose=1)\n # print(predictions[0])\n\n print('Writing predictions...')\n out_image_file = 'gs://' + BUCKET + '/' + FOLDER + '/' + out_image_base + '.TFRecord'\n writer = tf.io.TFRecordWriter(out_image_file)\n patches = 0\n for predictionPatch in predictions:\n print('Writing patch ' + str(patches) + '...')\n predictionPatch = predictionPatch[\n x_buffer:x_buffer+KERNEL_SIZE, y_buffer:y_buffer+KERNEL_SIZE]\n\n # Create an example.\n example = tf.train.Example(\n features=tf.train.Features(\n feature={\n 'impervious': tf.train.Feature(\n float_list=tf.train.FloatList(\n value=predictionPatch.flatten()))\n }\n )\n )\n # Write the example.\n writer.write(example.SerializeToString())\n patches += 1\n\n writer.close()\n\n # Start the upload.\n out_image_asset = user_folder + '/' + out_image_base\n !earthengine upload image --asset_id={out_image_asset} {out_image_file} {jsonFile}", "Now there's all the code needed to run the prediction pipeline, all that remains is to specify the output region in which to do the prediction, the names of the output files, where to put them, and the shape of the outputs. In terms of the shape, the model is trained on 256x256 patches, but can work (in theory) on any patch that's big enough with even dimensions (reference). Because of tile boundary artifacts, give the model slightly larger patches for prediction, then clip out the middle 256x256 patch. This is controlled with a kernel buffer, half the size of which will extend beyond the kernel buffer. For example, specifying a 128x128 kernel will append 64 pixels on each side of the patch, to ensure that the pixels in the output are taken from inputs completely covered by the kernel.", "# Output assets folder: YOUR FOLDER\nuser_folder = 'users/username' # INSERT YOUR FOLDER HERE.\n\n# Base file name to use for TFRecord files and assets.\nbj_image_base = 'FCNN_demo_beijing_384_'\n# Half this will extend on the sides of each patch.\nbj_kernel_buffer = [128, 128]\n# Beijing\nbj_region = ee.Geometry.Polygon(\n [[[115.9662455210937, 40.121362012835235],\n [115.9662455210937, 39.64293313749715],\n [117.01818643906245, 39.64293313749715],\n [117.01818643906245, 40.121362012835235]]], None, False)\n\n# Run the export.\ndoExport(bj_image_base, bj_kernel_buffer, bj_region)\n\n# Run the prediction.\ndoPrediction(bj_image_base, user_folder, bj_kernel_buffer, bj_region)", "Display the output\nOne the data has been exported, the model has made predictions and the predictions have been written to a file, and the image imported to Earth Engine, it's possible to display the resultant Earth Engine asset. Here, display the impervious area predictions over Beijing, China.", "out_image = ee.Image(user_folder + '/' + bj_image_base)\nmapid = out_image.getMapId({'min': 0, 'max': 1})\nmap = folium.Map(location=[39.898, 116.5097])\nfolium.TileLayer(\n tiles=mapid['tile_fetcher'].url_format,\n attr='Map Data &copy; <a href=\"https://earthengine.google.com/\">Google Earth Engine</a>',\n overlay=True,\n name='predicted impervious',\n ).add_to(map)\nmap.add_child(folium.LayerControl())\nmap" ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
ES-DOC/esdoc-jupyterhub
notebooks/test-institute-2/cmip6/models/sandbox-3/atmos.ipynb
gpl-3.0
[ "ES-DOC CMIP6 Model Properties - Atmos\nMIP Era: CMIP6\nInstitute: TEST-INSTITUTE-2\nSource ID: SANDBOX-3\nTopic: Atmos\nSub-Topics: Dynamical Core, Radiation, Turbulence Convection, Microphysics Precipitation, Cloud Scheme, Observation Simulation, Gravity Waves, Solar, Volcanos. \nProperties: 156 (127 required)\nModel descriptions: Model description details\nInitialized From: -- \nNotebook Help: Goto notebook help page\nNotebook Initialised: 2018-02-15 16:54:45\nDocument Setup\nIMPORTANT: to be executed each time you run the notebook", "# DO NOT EDIT ! \nfrom pyesdoc.ipython.model_topic import NotebookOutput \n\n# DO NOT EDIT ! \nDOC = NotebookOutput('cmip6', 'test-institute-2', 'sandbox-3', 'atmos')", "Document Authors\nSet document authors", "# Set as follows: DOC.set_author(\"name\", \"email\") \n# TODO - please enter value(s)", "Document Contributors\nSpecify document contributors", "# Set as follows: DOC.set_contributor(\"name\", \"email\") \n# TODO - please enter value(s)", "Document Publication\nSpecify document publication status", "# Set publication status: \n# 0=do not publish, 1=publish. \nDOC.set_publication_status(0)", "Document Table of Contents\n1. Key Properties --&gt; Overview\n2. Key Properties --&gt; Resolution\n3. Key Properties --&gt; Timestepping\n4. Key Properties --&gt; Orography\n5. Grid --&gt; Discretisation\n6. Grid --&gt; Discretisation --&gt; Horizontal\n7. Grid --&gt; Discretisation --&gt; Vertical\n8. Dynamical Core\n9. Dynamical Core --&gt; Top Boundary\n10. Dynamical Core --&gt; Lateral Boundary\n11. Dynamical Core --&gt; Diffusion Horizontal\n12. Dynamical Core --&gt; Advection Tracers\n13. Dynamical Core --&gt; Advection Momentum\n14. Radiation\n15. Radiation --&gt; Shortwave Radiation\n16. Radiation --&gt; Shortwave GHG\n17. Radiation --&gt; Shortwave Cloud Ice\n18. Radiation --&gt; Shortwave Cloud Liquid\n19. Radiation --&gt; Shortwave Cloud Inhomogeneity\n20. Radiation --&gt; Shortwave Aerosols\n21. Radiation --&gt; Shortwave Gases\n22. Radiation --&gt; Longwave Radiation\n23. Radiation --&gt; Longwave GHG\n24. Radiation --&gt; Longwave Cloud Ice\n25. Radiation --&gt; Longwave Cloud Liquid\n26. Radiation --&gt; Longwave Cloud Inhomogeneity\n27. Radiation --&gt; Longwave Aerosols\n28. Radiation --&gt; Longwave Gases\n29. Turbulence Convection\n30. Turbulence Convection --&gt; Boundary Layer Turbulence\n31. Turbulence Convection --&gt; Deep Convection\n32. Turbulence Convection --&gt; Shallow Convection\n33. Microphysics Precipitation\n34. Microphysics Precipitation --&gt; Large Scale Precipitation\n35. Microphysics Precipitation --&gt; Large Scale Cloud Microphysics\n36. Cloud Scheme\n37. Cloud Scheme --&gt; Optical Cloud Properties\n38. Cloud Scheme --&gt; Sub Grid Scale Water Distribution\n39. Cloud Scheme --&gt; Sub Grid Scale Ice Distribution\n40. Observation Simulation\n41. Observation Simulation --&gt; Isscp Attributes\n42. Observation Simulation --&gt; Cosp Attributes\n43. Observation Simulation --&gt; Radar Inputs\n44. Observation Simulation --&gt; Lidar Inputs\n45. Gravity Waves\n46. Gravity Waves --&gt; Orographic Gravity Waves\n47. Gravity Waves --&gt; Non Orographic Gravity Waves\n48. Solar\n49. Solar --&gt; Solar Pathways\n50. Solar --&gt; Solar Constant\n51. Solar --&gt; Orbital Parameters\n52. Solar --&gt; Insolation Ozone\n53. Volcanos\n54. Volcanos --&gt; Volcanoes Treatment \n1. Key Properties --&gt; Overview\nTop level key properties\n1.1. Model Overview\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nOverview of atmosphere model", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.key_properties.overview.model_overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "1.2. Model Name\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nName of atmosphere model code (CAM 4.0, ARPEGE 3.2,...)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.key_properties.overview.model_name') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "1.3. Model Family\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nType of atmospheric model.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.key_properties.overview.model_family') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"AGCM\" \n# \"ARCM\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "1.4. Basic Approximations\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nBasic approximations made in the atmosphere.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.key_properties.overview.basic_approximations') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"primitive equations\" \n# \"non-hydrostatic\" \n# \"anelastic\" \n# \"Boussinesq\" \n# \"hydrostatic\" \n# \"quasi-hydrostatic\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "2. Key Properties --&gt; Resolution\nCharacteristics of the model resolution\n2.1. Horizontal Resolution Name\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nThis is a string usually used by the modelling group to describe the resolution of the model grid, e.g. T42, N48.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.key_properties.resolution.horizontal_resolution_name') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "2.2. Canonical Horizontal Resolution\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nExpression quoted for gross comparisons of resolution, e.g. 2.5 x 3.75 degrees lat-lon.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.key_properties.resolution.canonical_horizontal_resolution') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "2.3. Range Horizontal Resolution\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nRange of horizontal resolution with spatial details, eg. 1 deg (Equator) - 0.5 deg", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.key_properties.resolution.range_horizontal_resolution') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "2.4. Number Of Vertical Levels\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nNumber of vertical levels resolved on the computational grid.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.key_properties.resolution.number_of_vertical_levels') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "2.5. High Top\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDoes the atmosphere have a high-top? High-Top atmospheres have a fully resolved stratosphere with a model top above the stratopause.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.key_properties.resolution.high_top') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "3. Key Properties --&gt; Timestepping\nCharacteristics of the atmosphere model time stepping\n3.1. Timestep Dynamics\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nTimestep for the dynamics, e.g. 30 min.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.key_properties.timestepping.timestep_dynamics') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "3.2. Timestep Shortwave Radiative Transfer\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nTimestep for the shortwave radiative transfer, e.g. 1.5 hours.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.key_properties.timestepping.timestep_shortwave_radiative_transfer') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "3.3. Timestep Longwave Radiative Transfer\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nTimestep for the longwave radiative transfer, e.g. 3 hours.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.key_properties.timestepping.timestep_longwave_radiative_transfer') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "4. Key Properties --&gt; Orography\nCharacteristics of the model orography\n4.1. Type\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nTime adaptation of the orography.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.key_properties.orography.type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"present day\" \n# \"modified\" \n# TODO - please enter value(s)\n", "4.2. Changes\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nIf the orography type is modified describe the time adaptation changes.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.key_properties.orography.changes') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"related to ice sheets\" \n# \"related to tectonics\" \n# \"modified mean\" \n# \"modified variance if taken into account in model (cf gravity waves)\" \n# TODO - please enter value(s)\n", "5. Grid --&gt; Discretisation\nAtmosphere grid discretisation\n5.1. Overview\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nOverview description of grid discretisation in the atmosphere", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.grid.discretisation.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "6. Grid --&gt; Discretisation --&gt; Horizontal\nAtmosphere discretisation in the horizontal\n6.1. Scheme Type\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nHorizontal discretisation type", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.grid.discretisation.horizontal.scheme_type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"spectral\" \n# \"fixed grid\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "6.2. Scheme Method\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nHorizontal discretisation method", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.grid.discretisation.horizontal.scheme_method') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"finite elements\" \n# \"finite volumes\" \n# \"finite difference\" \n# \"centered finite difference\" \n# TODO - please enter value(s)\n", "6.3. Scheme Order\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nHorizontal discretisation function order", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.grid.discretisation.horizontal.scheme_order') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"second\" \n# \"third\" \n# \"fourth\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "6.4. Horizontal Pole\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nHorizontal discretisation pole singularity treatment", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.grid.discretisation.horizontal.horizontal_pole') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"filter\" \n# \"pole rotation\" \n# \"artificial island\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "6.5. Grid Type\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nHorizontal grid type", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.grid.discretisation.horizontal.grid_type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Gaussian\" \n# \"Latitude-Longitude\" \n# \"Cubed-Sphere\" \n# \"Icosahedral\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "7. Grid --&gt; Discretisation --&gt; Vertical\nAtmosphere discretisation in the vertical\n7.1. Coordinate Type\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nType of vertical coordinate system", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.grid.discretisation.vertical.coordinate_type') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"isobaric\" \n# \"sigma\" \n# \"hybrid sigma-pressure\" \n# \"hybrid pressure\" \n# \"vertically lagrangian\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "8. Dynamical Core\nCharacteristics of the dynamical core\n8.1. Overview\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nOverview description of atmosphere dynamical core", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.dynamical_core.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "8.2. Name\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nCommonly used name for the dynamical core of the model.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.dynamical_core.name') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "8.3. Timestepping Type\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nTimestepping framework type", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.dynamical_core.timestepping_type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Adams-Bashforth\" \n# \"explicit\" \n# \"implicit\" \n# \"semi-implicit\" \n# \"leap frog\" \n# \"multi-step\" \n# \"Runge Kutta fifth order\" \n# \"Runge Kutta second order\" \n# \"Runge Kutta third order\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "8.4. Prognostic Variables\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nList of the model prognostic variables", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.dynamical_core.prognostic_variables') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"surface pressure\" \n# \"wind components\" \n# \"divergence/curl\" \n# \"temperature\" \n# \"potential temperature\" \n# \"total water\" \n# \"water vapour\" \n# \"water liquid\" \n# \"water ice\" \n# \"total water moments\" \n# \"clouds\" \n# \"radiation\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "9. Dynamical Core --&gt; Top Boundary\nType of boundary layer at the top of the model\n9.1. Top Boundary Condition\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nTop boundary condition", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.dynamical_core.top_boundary.top_boundary_condition') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"sponge layer\" \n# \"radiation boundary condition\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "9.2. Top Heat\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nTop boundary heat treatment", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.dynamical_core.top_boundary.top_heat') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "9.3. Top Wind\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nTop boundary wind treatment", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.dynamical_core.top_boundary.top_wind') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "10. Dynamical Core --&gt; Lateral Boundary\nType of lateral boundary condition (if the model is a regional model)\n10.1. Condition\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nType of lateral boundary condition", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.dynamical_core.lateral_boundary.condition') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"sponge layer\" \n# \"radiation boundary condition\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "11. Dynamical Core --&gt; Diffusion Horizontal\nHorizontal diffusion scheme\n11.1. Scheme Name\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nHorizontal diffusion scheme name", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.dynamical_core.diffusion_horizontal.scheme_name') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "11.2. Scheme Method\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nHorizontal diffusion scheme method", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.dynamical_core.diffusion_horizontal.scheme_method') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"iterated Laplacian\" \n# \"bi-harmonic\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "12. Dynamical Core --&gt; Advection Tracers\nTracer advection scheme\n12.1. Scheme Name\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nTracer advection scheme name", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.dynamical_core.advection_tracers.scheme_name') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Heun\" \n# \"Roe and VanLeer\" \n# \"Roe and Superbee\" \n# \"Prather\" \n# \"UTOPIA\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "12.2. Scheme Characteristics\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nTracer advection scheme characteristics", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.dynamical_core.advection_tracers.scheme_characteristics') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Eulerian\" \n# \"modified Euler\" \n# \"Lagrangian\" \n# \"semi-Lagrangian\" \n# \"cubic semi-Lagrangian\" \n# \"quintic semi-Lagrangian\" \n# \"mass-conserving\" \n# \"finite volume\" \n# \"flux-corrected\" \n# \"linear\" \n# \"quadratic\" \n# \"quartic\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "12.3. Conserved Quantities\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nTracer advection scheme conserved quantities", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.dynamical_core.advection_tracers.conserved_quantities') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"dry mass\" \n# \"tracer mass\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "12.4. Conservation Method\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nTracer advection scheme conservation method", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.dynamical_core.advection_tracers.conservation_method') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"conservation fixer\" \n# \"Priestley algorithm\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "13. Dynamical Core --&gt; Advection Momentum\nMomentum advection scheme\n13.1. Scheme Name\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nMomentum advection schemes name", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.dynamical_core.advection_momentum.scheme_name') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"VanLeer\" \n# \"Janjic\" \n# \"SUPG (Streamline Upwind Petrov-Galerkin)\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "13.2. Scheme Characteristics\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nMomentum advection scheme characteristics", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.dynamical_core.advection_momentum.scheme_characteristics') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"2nd order\" \n# \"4th order\" \n# \"cell-centred\" \n# \"staggered grid\" \n# \"semi-staggered grid\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "13.3. Scheme Staggering Type\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nMomentum advection scheme staggering type", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.dynamical_core.advection_momentum.scheme_staggering_type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Arakawa B-grid\" \n# \"Arakawa C-grid\" \n# \"Arakawa D-grid\" \n# \"Arakawa E-grid\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "13.4. Conserved Quantities\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nMomentum advection scheme conserved quantities", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.dynamical_core.advection_momentum.conserved_quantities') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Angular momentum\" \n# \"Horizontal momentum\" \n# \"Enstrophy\" \n# \"Mass\" \n# \"Total energy\" \n# \"Vorticity\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "13.5. Conservation Method\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nMomentum advection scheme conservation method", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.dynamical_core.advection_momentum.conservation_method') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"conservation fixer\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "14. Radiation\nCharacteristics of the atmosphere radiation process\n14.1. Aerosols\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nAerosols whose radiative effect is taken into account in the atmosphere model", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.radiation.aerosols') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"sulphate\" \n# \"nitrate\" \n# \"sea salt\" \n# \"dust\" \n# \"ice\" \n# \"organic\" \n# \"BC (black carbon / soot)\" \n# \"SOA (secondary organic aerosols)\" \n# \"POM (particulate organic matter)\" \n# \"polar stratospheric ice\" \n# \"NAT (nitric acid trihydrate)\" \n# \"NAD (nitric acid dihydrate)\" \n# \"STS (supercooled ternary solution aerosol particle)\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "15. Radiation --&gt; Shortwave Radiation\nProperties of the shortwave radiation scheme\n15.1. Overview\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nOverview description of shortwave radiation in the atmosphere", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.radiation.shortwave_radiation.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "15.2. Name\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nCommonly used name for the shortwave radiation scheme", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.radiation.shortwave_radiation.name') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "15.3. Spectral Integration\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nShortwave radiation scheme spectral integration", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.radiation.shortwave_radiation.spectral_integration') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"wide-band model\" \n# \"correlated-k\" \n# \"exponential sum fitting\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "15.4. Transport Calculation\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nShortwave radiation transport calculation methods", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.radiation.shortwave_radiation.transport_calculation') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"two-stream\" \n# \"layer interaction\" \n# \"bulk\" \n# \"adaptive\" \n# \"multi-stream\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "15.5. Spectral Intervals\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nShortwave radiation scheme number of spectral intervals", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.radiation.shortwave_radiation.spectral_intervals') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "16. Radiation --&gt; Shortwave GHG\nRepresentation of greenhouse gases in the shortwave radiation scheme\n16.1. Greenhouse Gas Complexity\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nComplexity of greenhouse gases whose shortwave radiative effects are taken into account in the atmosphere model", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.radiation.shortwave_GHG.greenhouse_gas_complexity') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"CO2\" \n# \"CH4\" \n# \"N2O\" \n# \"CFC-11 eq\" \n# \"CFC-12 eq\" \n# \"HFC-134a eq\" \n# \"Explicit ODSs\" \n# \"Explicit other fluorinated gases\" \n# \"O3\" \n# \"H2O\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "16.2. ODS\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N\nOzone depleting substances whose shortwave radiative effects are explicitly taken into account in the atmosphere model", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.radiation.shortwave_GHG.ODS') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"CFC-12\" \n# \"CFC-11\" \n# \"CFC-113\" \n# \"CFC-114\" \n# \"CFC-115\" \n# \"HCFC-22\" \n# \"HCFC-141b\" \n# \"HCFC-142b\" \n# \"Halon-1211\" \n# \"Halon-1301\" \n# \"Halon-2402\" \n# \"methyl chloroform\" \n# \"carbon tetrachloride\" \n# \"methyl chloride\" \n# \"methylene chloride\" \n# \"chloroform\" \n# \"methyl bromide\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "16.3. Other Flourinated Gases\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N\nOther flourinated gases whose shortwave radiative effects are explicitly taken into account in the atmosphere model", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.radiation.shortwave_GHG.other_flourinated_gases') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"HFC-134a\" \n# \"HFC-23\" \n# \"HFC-32\" \n# \"HFC-125\" \n# \"HFC-143a\" \n# \"HFC-152a\" \n# \"HFC-227ea\" \n# \"HFC-236fa\" \n# \"HFC-245fa\" \n# \"HFC-365mfc\" \n# \"HFC-43-10mee\" \n# \"CF4\" \n# \"C2F6\" \n# \"C3F8\" \n# \"C4F10\" \n# \"C5F12\" \n# \"C6F14\" \n# \"C7F16\" \n# \"C8F18\" \n# \"c-C4F8\" \n# \"NF3\" \n# \"SF6\" \n# \"SO2F2\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "17. Radiation --&gt; Shortwave Cloud Ice\nShortwave radiative properties of ice crystals in clouds\n17.1. General Interactions\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nGeneral shortwave radiative interactions with cloud ice crystals", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.radiation.shortwave_cloud_ice.general_interactions') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"scattering\" \n# \"emission/absorption\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "17.2. Physical Representation\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nPhysical representation of cloud ice crystals in the shortwave radiation scheme", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.radiation.shortwave_cloud_ice.physical_representation') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"bi-modal size distribution\" \n# \"ensemble of ice crystals\" \n# \"mean projected area\" \n# \"ice water path\" \n# \"crystal asymmetry\" \n# \"crystal aspect ratio\" \n# \"effective crystal radius\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "17.3. Optical Methods\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nOptical methods applicable to cloud ice crystals in the shortwave radiation scheme", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.radiation.shortwave_cloud_ice.optical_methods') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"T-matrix\" \n# \"geometric optics\" \n# \"finite difference time domain (FDTD)\" \n# \"Mie theory\" \n# \"anomalous diffraction approximation\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "18. Radiation --&gt; Shortwave Cloud Liquid\nShortwave radiative properties of liquid droplets in clouds\n18.1. General Interactions\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nGeneral shortwave radiative interactions with cloud liquid droplets", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.radiation.shortwave_cloud_liquid.general_interactions') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"scattering\" \n# \"emission/absorption\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "18.2. Physical Representation\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nPhysical representation of cloud liquid droplets in the shortwave radiation scheme", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.radiation.shortwave_cloud_liquid.physical_representation') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"cloud droplet number concentration\" \n# \"effective cloud droplet radii\" \n# \"droplet size distribution\" \n# \"liquid water path\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "18.3. Optical Methods\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nOptical methods applicable to cloud liquid droplets in the shortwave radiation scheme", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.radiation.shortwave_cloud_liquid.optical_methods') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"geometric optics\" \n# \"Mie theory\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "19. Radiation --&gt; Shortwave Cloud Inhomogeneity\nCloud inhomogeneity in the shortwave radiation scheme\n19.1. Cloud Inhomogeneity\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nMethod for taking into account horizontal cloud inhomogeneity", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.radiation.shortwave_cloud_inhomogeneity.cloud_inhomogeneity') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Monte Carlo Independent Column Approximation\" \n# \"Triplecloud\" \n# \"analytic\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "20. Radiation --&gt; Shortwave Aerosols\nShortwave radiative properties of aerosols\n20.1. General Interactions\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nGeneral shortwave radiative interactions with aerosols", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.radiation.shortwave_aerosols.general_interactions') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"scattering\" \n# \"emission/absorption\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "20.2. Physical Representation\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nPhysical representation of aerosols in the shortwave radiation scheme", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.radiation.shortwave_aerosols.physical_representation') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"number concentration\" \n# \"effective radii\" \n# \"size distribution\" \n# \"asymmetry\" \n# \"aspect ratio\" \n# \"mixing state\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "20.3. Optical Methods\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nOptical methods applicable to aerosols in the shortwave radiation scheme", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.radiation.shortwave_aerosols.optical_methods') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"T-matrix\" \n# \"geometric optics\" \n# \"finite difference time domain (FDTD)\" \n# \"Mie theory\" \n# \"anomalous diffraction approximation\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "21. Radiation --&gt; Shortwave Gases\nShortwave radiative properties of gases\n21.1. General Interactions\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nGeneral shortwave radiative interactions with gases", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.radiation.shortwave_gases.general_interactions') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"scattering\" \n# \"emission/absorption\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "22. Radiation --&gt; Longwave Radiation\nProperties of the longwave radiation scheme\n22.1. Overview\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nOverview description of longwave radiation in the atmosphere", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.radiation.longwave_radiation.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "22.2. Name\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nCommonly used name for the longwave radiation scheme.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.radiation.longwave_radiation.name') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "22.3. Spectral Integration\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nLongwave radiation scheme spectral integration", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.radiation.longwave_radiation.spectral_integration') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"wide-band model\" \n# \"correlated-k\" \n# \"exponential sum fitting\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "22.4. Transport Calculation\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nLongwave radiation transport calculation methods", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.radiation.longwave_radiation.transport_calculation') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"two-stream\" \n# \"layer interaction\" \n# \"bulk\" \n# \"adaptive\" \n# \"multi-stream\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "22.5. Spectral Intervals\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nLongwave radiation scheme number of spectral intervals", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.radiation.longwave_radiation.spectral_intervals') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "23. Radiation --&gt; Longwave GHG\nRepresentation of greenhouse gases in the longwave radiation scheme\n23.1. Greenhouse Gas Complexity\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nComplexity of greenhouse gases whose longwave radiative effects are taken into account in the atmosphere model", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.radiation.longwave_GHG.greenhouse_gas_complexity') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"CO2\" \n# \"CH4\" \n# \"N2O\" \n# \"CFC-11 eq\" \n# \"CFC-12 eq\" \n# \"HFC-134a eq\" \n# \"Explicit ODSs\" \n# \"Explicit other fluorinated gases\" \n# \"O3\" \n# \"H2O\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "23.2. ODS\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N\nOzone depleting substances whose longwave radiative effects are explicitly taken into account in the atmosphere model", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.radiation.longwave_GHG.ODS') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"CFC-12\" \n# \"CFC-11\" \n# \"CFC-113\" \n# \"CFC-114\" \n# \"CFC-115\" \n# \"HCFC-22\" \n# \"HCFC-141b\" \n# \"HCFC-142b\" \n# \"Halon-1211\" \n# \"Halon-1301\" \n# \"Halon-2402\" \n# \"methyl chloroform\" \n# \"carbon tetrachloride\" \n# \"methyl chloride\" \n# \"methylene chloride\" \n# \"chloroform\" \n# \"methyl bromide\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "23.3. Other Flourinated Gases\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N\nOther flourinated gases whose longwave radiative effects are explicitly taken into account in the atmosphere model", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.radiation.longwave_GHG.other_flourinated_gases') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"HFC-134a\" \n# \"HFC-23\" \n# \"HFC-32\" \n# \"HFC-125\" \n# \"HFC-143a\" \n# \"HFC-152a\" \n# \"HFC-227ea\" \n# \"HFC-236fa\" \n# \"HFC-245fa\" \n# \"HFC-365mfc\" \n# \"HFC-43-10mee\" \n# \"CF4\" \n# \"C2F6\" \n# \"C3F8\" \n# \"C4F10\" \n# \"C5F12\" \n# \"C6F14\" \n# \"C7F16\" \n# \"C8F18\" \n# \"c-C4F8\" \n# \"NF3\" \n# \"SF6\" \n# \"SO2F2\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "24. Radiation --&gt; Longwave Cloud Ice\nLongwave radiative properties of ice crystals in clouds\n24.1. General Interactions\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nGeneral longwave radiative interactions with cloud ice crystals", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.radiation.longwave_cloud_ice.general_interactions') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"scattering\" \n# \"emission/absorption\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "24.2. Physical Reprenstation\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nPhysical representation of cloud ice crystals in the longwave radiation scheme", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.radiation.longwave_cloud_ice.physical_reprenstation') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"bi-modal size distribution\" \n# \"ensemble of ice crystals\" \n# \"mean projected area\" \n# \"ice water path\" \n# \"crystal asymmetry\" \n# \"crystal aspect ratio\" \n# \"effective crystal radius\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "24.3. Optical Methods\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nOptical methods applicable to cloud ice crystals in the longwave radiation scheme", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.radiation.longwave_cloud_ice.optical_methods') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"T-matrix\" \n# \"geometric optics\" \n# \"finite difference time domain (FDTD)\" \n# \"Mie theory\" \n# \"anomalous diffraction approximation\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "25. Radiation --&gt; Longwave Cloud Liquid\nLongwave radiative properties of liquid droplets in clouds\n25.1. General Interactions\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nGeneral longwave radiative interactions with cloud liquid droplets", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.radiation.longwave_cloud_liquid.general_interactions') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"scattering\" \n# \"emission/absorption\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "25.2. Physical Representation\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nPhysical representation of cloud liquid droplets in the longwave radiation scheme", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.radiation.longwave_cloud_liquid.physical_representation') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"cloud droplet number concentration\" \n# \"effective cloud droplet radii\" \n# \"droplet size distribution\" \n# \"liquid water path\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "25.3. Optical Methods\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nOptical methods applicable to cloud liquid droplets in the longwave radiation scheme", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.radiation.longwave_cloud_liquid.optical_methods') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"geometric optics\" \n# \"Mie theory\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "26. Radiation --&gt; Longwave Cloud Inhomogeneity\nCloud inhomogeneity in the longwave radiation scheme\n26.1. Cloud Inhomogeneity\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nMethod for taking into account horizontal cloud inhomogeneity", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.radiation.longwave_cloud_inhomogeneity.cloud_inhomogeneity') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Monte Carlo Independent Column Approximation\" \n# \"Triplecloud\" \n# \"analytic\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "27. Radiation --&gt; Longwave Aerosols\nLongwave radiative properties of aerosols\n27.1. General Interactions\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nGeneral longwave radiative interactions with aerosols", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.radiation.longwave_aerosols.general_interactions') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"scattering\" \n# \"emission/absorption\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "27.2. Physical Representation\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nPhysical representation of aerosols in the longwave radiation scheme", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.radiation.longwave_aerosols.physical_representation') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"number concentration\" \n# \"effective radii\" \n# \"size distribution\" \n# \"asymmetry\" \n# \"aspect ratio\" \n# \"mixing state\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "27.3. Optical Methods\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nOptical methods applicable to aerosols in the longwave radiation scheme", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.radiation.longwave_aerosols.optical_methods') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"T-matrix\" \n# \"geometric optics\" \n# \"finite difference time domain (FDTD)\" \n# \"Mie theory\" \n# \"anomalous diffraction approximation\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "28. Radiation --&gt; Longwave Gases\nLongwave radiative properties of gases\n28.1. General Interactions\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nGeneral longwave radiative interactions with gases", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.radiation.longwave_gases.general_interactions') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"scattering\" \n# \"emission/absorption\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "29. Turbulence Convection\nAtmosphere Convective Turbulence and Clouds\n29.1. Overview\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nOverview description of atmosphere convection and turbulence", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.turbulence_convection.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "30. Turbulence Convection --&gt; Boundary Layer Turbulence\nProperties of the boundary layer turbulence scheme\n30.1. Scheme Name\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nBoundary layer turbulence scheme name", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.turbulence_convection.boundary_layer_turbulence.scheme_name') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Mellor-Yamada\" \n# \"Holtslag-Boville\" \n# \"EDMF\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "30.2. Scheme Type\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nBoundary layer turbulence scheme type", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.turbulence_convection.boundary_layer_turbulence.scheme_type') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"TKE prognostic\" \n# \"TKE diagnostic\" \n# \"TKE coupled with water\" \n# \"vertical profile of Kz\" \n# \"non-local diffusion\" \n# \"Monin-Obukhov similarity\" \n# \"Coastal Buddy Scheme\" \n# \"Coupled with convection\" \n# \"Coupled with gravity waves\" \n# \"Depth capped at cloud base\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "30.3. Closure Order\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nBoundary layer turbulence scheme closure order", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.turbulence_convection.boundary_layer_turbulence.closure_order') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "30.4. Counter Gradient\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nUses boundary layer turbulence scheme counter gradient", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.turbulence_convection.boundary_layer_turbulence.counter_gradient') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "31. Turbulence Convection --&gt; Deep Convection\nProperties of the deep convection scheme\n31.1. Scheme Name\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDeep convection scheme name", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.turbulence_convection.deep_convection.scheme_name') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "31.2. Scheme Type\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nDeep convection scheme type", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.turbulence_convection.deep_convection.scheme_type') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"mass-flux\" \n# \"adjustment\" \n# \"plume ensemble\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "31.3. Scheme Method\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nDeep convection scheme method", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.turbulence_convection.deep_convection.scheme_method') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"CAPE\" \n# \"bulk\" \n# \"ensemble\" \n# \"CAPE/WFN based\" \n# \"TKE/CIN based\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "31.4. Processes\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nPhysical processes taken into account in the parameterisation of deep convection", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.turbulence_convection.deep_convection.processes') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"vertical momentum transport\" \n# \"convective momentum transport\" \n# \"entrainment\" \n# \"detrainment\" \n# \"penetrative convection\" \n# \"updrafts\" \n# \"downdrafts\" \n# \"radiative effect of anvils\" \n# \"re-evaporation of convective precipitation\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "31.5. Microphysics\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N\nMicrophysics scheme for deep convection. Microphysical processes directly control the amount of detrainment of cloud hydrometeor and water vapor from updrafts", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.turbulence_convection.deep_convection.microphysics') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"tuning parameter based\" \n# \"single moment\" \n# \"two moment\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "32. Turbulence Convection --&gt; Shallow Convection\nProperties of the shallow convection scheme\n32.1. Scheme Name\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nShallow convection scheme name", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.turbulence_convection.shallow_convection.scheme_name') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "32.2. Scheme Type\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nshallow convection scheme type", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.turbulence_convection.shallow_convection.scheme_type') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"mass-flux\" \n# \"cumulus-capped boundary layer\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "32.3. Scheme Method\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nshallow convection scheme method", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.turbulence_convection.shallow_convection.scheme_method') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"same as deep (unified)\" \n# \"included in boundary layer turbulence\" \n# \"separate diagnosis\" \n# TODO - please enter value(s)\n", "32.4. Processes\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nPhysical processes taken into account in the parameterisation of shallow convection", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.turbulence_convection.shallow_convection.processes') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"convective momentum transport\" \n# \"entrainment\" \n# \"detrainment\" \n# \"penetrative convection\" \n# \"re-evaporation of convective precipitation\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "32.5. Microphysics\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N\nMicrophysics scheme for shallow convection", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.turbulence_convection.shallow_convection.microphysics') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"tuning parameter based\" \n# \"single moment\" \n# \"two moment\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "33. Microphysics Precipitation\nLarge Scale Cloud Microphysics and Precipitation\n33.1. Overview\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nOverview description of large scale cloud microphysics and precipitation", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.microphysics_precipitation.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "34. Microphysics Precipitation --&gt; Large Scale Precipitation\nProperties of the large scale precipitation scheme\n34.1. Scheme Name\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nCommonly used name of the large scale precipitation parameterisation scheme", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.microphysics_precipitation.large_scale_precipitation.scheme_name') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "34.2. Hydrometeors\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nPrecipitating hydrometeors taken into account in the large scale precipitation scheme", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.microphysics_precipitation.large_scale_precipitation.hydrometeors') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"liquid rain\" \n# \"snow\" \n# \"hail\" \n# \"graupel\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "35. Microphysics Precipitation --&gt; Large Scale Cloud Microphysics\nProperties of the large scale cloud microphysics scheme\n35.1. Scheme Name\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nCommonly used name of the microphysics parameterisation scheme used for large scale clouds.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.microphysics_precipitation.large_scale_cloud_microphysics.scheme_name') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "35.2. Processes\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nLarge scale cloud microphysics processes", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.microphysics_precipitation.large_scale_cloud_microphysics.processes') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"mixed phase\" \n# \"cloud droplets\" \n# \"cloud ice\" \n# \"ice nucleation\" \n# \"water vapour deposition\" \n# \"effect of raindrops\" \n# \"effect of snow\" \n# \"effect of graupel\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "36. Cloud Scheme\nCharacteristics of the cloud scheme\n36.1. Overview\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nOverview description of the atmosphere cloud scheme", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.cloud_scheme.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "36.2. Name\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nCommonly used name for the cloud scheme", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.cloud_scheme.name') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "36.3. Atmos Coupling\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N\nAtmosphere components that are linked to the cloud scheme", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.cloud_scheme.atmos_coupling') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"atmosphere_radiation\" \n# \"atmosphere_microphysics_precipitation\" \n# \"atmosphere_turbulence_convection\" \n# \"atmosphere_gravity_waves\" \n# \"atmosphere_solar\" \n# \"atmosphere_volcano\" \n# \"atmosphere_cloud_simulator\" \n# TODO - please enter value(s)\n", "36.4. Uses Separate Treatment\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDifferent cloud schemes for the different types of clouds (convective, stratiform and boundary layer)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.cloud_scheme.uses_separate_treatment') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "36.5. Processes\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nProcesses included in the cloud scheme", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.cloud_scheme.processes') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"entrainment\" \n# \"detrainment\" \n# \"bulk cloud\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "36.6. Prognostic Scheme\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nIs the cloud scheme a prognostic scheme?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.cloud_scheme.prognostic_scheme') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "36.7. Diagnostic Scheme\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nIs the cloud scheme a diagnostic scheme?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.cloud_scheme.diagnostic_scheme') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "36.8. Prognostic Variables\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N\nList the prognostic variables used by the cloud scheme, if applicable.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.cloud_scheme.prognostic_variables') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"cloud amount\" \n# \"liquid\" \n# \"ice\" \n# \"rain\" \n# \"snow\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "37. Cloud Scheme --&gt; Optical Cloud Properties\nOptical cloud properties\n37.1. Cloud Overlap Method\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nMethod for taking into account overlapping of cloud layers", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.cloud_scheme.optical_cloud_properties.cloud_overlap_method') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"random\" \n# \"maximum\" \n# \"maximum-random\" \n# \"exponential\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "37.2. Cloud Inhomogeneity\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nMethod for taking into account cloud inhomogeneity", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.cloud_scheme.optical_cloud_properties.cloud_inhomogeneity') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "38. Cloud Scheme --&gt; Sub Grid Scale Water Distribution\nSub-grid scale water distribution\n38.1. Type\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nSub-grid scale water distribution type", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_water_distribution.type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"prognostic\" \n# \"diagnostic\" \n# TODO - please enter value(s)\n", "38.2. Function Name\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nSub-grid scale water distribution function name", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_water_distribution.function_name') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "38.3. Function Order\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nSub-grid scale water distribution function type", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_water_distribution.function_order') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "38.4. Convection Coupling\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nSub-grid scale water distribution coupling with convection", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_water_distribution.convection_coupling') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"coupled with deep\" \n# \"coupled with shallow\" \n# \"not coupled with convection\" \n# TODO - please enter value(s)\n", "39. Cloud Scheme --&gt; Sub Grid Scale Ice Distribution\nSub-grid scale ice distribution\n39.1. Type\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nSub-grid scale ice distribution type", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_ice_distribution.type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"prognostic\" \n# \"diagnostic\" \n# TODO - please enter value(s)\n", "39.2. Function Name\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nSub-grid scale ice distribution function name", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_ice_distribution.function_name') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "39.3. Function Order\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nSub-grid scale ice distribution function type", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_ice_distribution.function_order') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "39.4. Convection Coupling\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nSub-grid scale ice distribution coupling with convection", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_ice_distribution.convection_coupling') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"coupled with deep\" \n# \"coupled with shallow\" \n# \"not coupled with convection\" \n# TODO - please enter value(s)\n", "40. Observation Simulation\nCharacteristics of observation simulation\n40.1. Overview\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nOverview description of observation simulator characteristics", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.observation_simulation.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "41. Observation Simulation --&gt; Isscp Attributes\nISSCP Characteristics\n41.1. Top Height Estimation Method\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nCloud simulator ISSCP top height estimation methodUo", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.observation_simulation.isscp_attributes.top_height_estimation_method') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"no adjustment\" \n# \"IR brightness\" \n# \"visible optical depth\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "41.2. Top Height Direction\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nCloud simulator ISSCP top height direction", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.observation_simulation.isscp_attributes.top_height_direction') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"lowest altitude level\" \n# \"highest altitude level\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "42. Observation Simulation --&gt; Cosp Attributes\nCFMIP Observational Simulator Package attributes\n42.1. Run Configuration\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nCloud simulator COSP run configuration", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.observation_simulation.cosp_attributes.run_configuration') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Inline\" \n# \"Offline\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "42.2. Number Of Grid Points\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nCloud simulator COSP number of grid points", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.observation_simulation.cosp_attributes.number_of_grid_points') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "42.3. Number Of Sub Columns\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nCloud simulator COSP number of sub-cloumns used to simulate sub-grid variability", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.observation_simulation.cosp_attributes.number_of_sub_columns') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "42.4. Number Of Levels\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nCloud simulator COSP number of levels", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.observation_simulation.cosp_attributes.number_of_levels') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "43. Observation Simulation --&gt; Radar Inputs\nCharacteristics of the cloud radar simulator\n43.1. Frequency\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: FLOAT&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nCloud simulator radar frequency (Hz)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.observation_simulation.radar_inputs.frequency') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "43.2. Type\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nCloud simulator radar type", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.observation_simulation.radar_inputs.type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"surface\" \n# \"space borne\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "43.3. Gas Absorption\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nCloud simulator radar uses gas absorption", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.observation_simulation.radar_inputs.gas_absorption') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "43.4. Effective Radius\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nCloud simulator radar uses effective radius", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.observation_simulation.radar_inputs.effective_radius') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "44. Observation Simulation --&gt; Lidar Inputs\nCharacteristics of the cloud lidar simulator\n44.1. Ice Types\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nCloud simulator lidar ice type", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.observation_simulation.lidar_inputs.ice_types') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"ice spheres\" \n# \"ice non-spherical\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "44.2. Overlap\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nCloud simulator lidar overlap", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.observation_simulation.lidar_inputs.overlap') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"max\" \n# \"random\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "45. Gravity Waves\nCharacteristics of the parameterised gravity waves in the atmosphere, whether from orography or other sources.\n45.1. Overview\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nOverview description of gravity wave parameterisation in the atmosphere", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.gravity_waves.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "45.2. Sponge Layer\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nSponge layer in the upper levels in order to avoid gravity wave reflection at the top.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.gravity_waves.sponge_layer') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Rayleigh friction\" \n# \"Diffusive sponge layer\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "45.3. Background\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nBackground wave distribution", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.gravity_waves.background') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"continuous spectrum\" \n# \"discrete spectrum\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "45.4. Subgrid Scale Orography\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nSubgrid scale orography effects taken into account.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.gravity_waves.subgrid_scale_orography') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"effect on drag\" \n# \"effect on lifting\" \n# \"enhanced topography\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "46. Gravity Waves --&gt; Orographic Gravity Waves\nGravity waves generated due to the presence of orography\n46.1. Name\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nCommonly used name for the orographic gravity wave scheme", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.gravity_waves.orographic_gravity_waves.name') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "46.2. Source Mechanisms\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nOrographic gravity wave source mechanisms", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.gravity_waves.orographic_gravity_waves.source_mechanisms') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"linear mountain waves\" \n# \"hydraulic jump\" \n# \"envelope orography\" \n# \"low level flow blocking\" \n# \"statistical sub-grid scale variance\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "46.3. Calculation Method\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nOrographic gravity wave calculation method", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.gravity_waves.orographic_gravity_waves.calculation_method') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"non-linear calculation\" \n# \"more than two cardinal directions\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "46.4. Propagation Scheme\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nOrographic gravity wave propogation scheme", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.gravity_waves.orographic_gravity_waves.propagation_scheme') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"linear theory\" \n# \"non-linear theory\" \n# \"includes boundary layer ducting\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "46.5. Dissipation Scheme\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nOrographic gravity wave dissipation scheme", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.gravity_waves.orographic_gravity_waves.dissipation_scheme') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"total wave\" \n# \"single wave\" \n# \"spectral\" \n# \"linear\" \n# \"wave saturation vs Richardson number\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "47. Gravity Waves --&gt; Non Orographic Gravity Waves\nGravity waves generated by non-orographic processes.\n47.1. Name\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nCommonly used name for the non-orographic gravity wave scheme", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.gravity_waves.non_orographic_gravity_waves.name') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "47.2. Source Mechanisms\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nNon-orographic gravity wave source mechanisms", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.gravity_waves.non_orographic_gravity_waves.source_mechanisms') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"convection\" \n# \"precipitation\" \n# \"background spectrum\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "47.3. Calculation Method\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nNon-orographic gravity wave calculation method", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.gravity_waves.non_orographic_gravity_waves.calculation_method') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"spatially dependent\" \n# \"temporally dependent\" \n# TODO - please enter value(s)\n", "47.4. Propagation Scheme\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nNon-orographic gravity wave propogation scheme", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.gravity_waves.non_orographic_gravity_waves.propagation_scheme') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"linear theory\" \n# \"non-linear theory\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "47.5. Dissipation Scheme\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nNon-orographic gravity wave dissipation scheme", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.gravity_waves.non_orographic_gravity_waves.dissipation_scheme') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"total wave\" \n# \"single wave\" \n# \"spectral\" \n# \"linear\" \n# \"wave saturation vs Richardson number\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "48. Solar\nTop of atmosphere solar insolation characteristics\n48.1. Overview\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nOverview description of solar insolation of the atmosphere", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.solar.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "49. Solar --&gt; Solar Pathways\nPathways for solar forcing of the atmosphere\n49.1. Pathways\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nPathways for the solar forcing of the atmosphere model domain", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.solar.solar_pathways.pathways') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"SW radiation\" \n# \"precipitating energetic particles\" \n# \"cosmic rays\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "50. Solar --&gt; Solar Constant\nSolar constant and top of atmosphere insolation characteristics\n50.1. Type\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nTime adaptation of the solar constant.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.solar.solar_constant.type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"fixed\" \n# \"transient\" \n# TODO - please enter value(s)\n", "50.2. Fixed Value\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: FLOAT&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nIf the solar constant is fixed, enter the value of the solar constant (W m-2).", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.solar.solar_constant.fixed_value') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "50.3. Transient Characteristics\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nsolar constant transient characteristics (W m-2)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.solar.solar_constant.transient_characteristics') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "51. Solar --&gt; Orbital Parameters\nOrbital parameters and top of atmosphere insolation characteristics\n51.1. Type\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nTime adaptation of orbital parameters", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.solar.orbital_parameters.type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"fixed\" \n# \"transient\" \n# TODO - please enter value(s)\n", "51.2. Fixed Reference Date\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nReference date for fixed orbital parameters (yyyy)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.solar.orbital_parameters.fixed_reference_date') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "51.3. Transient Method\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDescription of transient orbital parameters", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.solar.orbital_parameters.transient_method') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "51.4. Computation Method\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nMethod used for computing orbital parameters.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.solar.orbital_parameters.computation_method') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Berger 1978\" \n# \"Laskar 2004\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "52. Solar --&gt; Insolation Ozone\nImpact of solar insolation on stratospheric ozone\n52.1. Solar Ozone Impact\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDoes top of atmosphere insolation impact on stratospheric ozone?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.solar.insolation_ozone.solar_ozone_impact') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "53. Volcanos\nCharacteristics of the implementation of volcanoes\n53.1. Overview\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nOverview description of the implementation of volcanic effects in the atmosphere", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.volcanos.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "54. Volcanos --&gt; Volcanoes Treatment\nTreatment of volcanoes in the atmosphere\n54.1. Volcanoes Implementation\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nHow volcanic effects are modeled in the atmosphere.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.volcanos.volcanoes_treatment.volcanoes_implementation') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"high frequency solar constant anomaly\" \n# \"stratospheric aerosols optical thickness\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "©2017 ES-DOC" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
winpython/winpython_afterdoc
docs/installing_julia_and_ijulia.ipynb
mit
[ "Installating Julia/IJulia\n1 - Downloading and Installing the right Julia binary in the right place", "import os\nimport sys\nimport io\nimport re\n\nimport urllib.request as request # Python 3\n\n# get latest stable release info, download link and hashes\ng = request.urlopen(\"https://julialang.org/downloads/\")\ns = g.read().decode()\ng.close;\n\nr = r'<a href=\".current_stable_release\">([^<]+)</a></h2> ' + \\\n r'<p>Checksums for this release are available in both <a href=\"([^\"]*)\">MD5</a> and <a href=\"([^\"]*)\">SHA256</a> formats.</p>' + \\\n r'[^W]*Windows <a href=\"/downloads/platform/.windows\">.help.</a> <td colspan=3 > <a href=\"[^\"]*\">64-bit .installer.</a>, <a href=\"([^\"]*)\">64-bit .portable.</a>' + \\\n r' <td colspan=3 > <a href=\"[^\"]*\">32-bit .installer.</a>, <a href=\"([^\"]*)\">32-bit .portable.</a>'\n\nrelease_str, md5link, sha256link, ziplink64bit, ziplink32bit = re.findall(r,s)[0]\njulia_version=re.findall(r\"v([^\\s]+)\",release_str)[0]\nprint(release_str)\nprint(julia_version)\nprint(ziplink64bit)\nprint(ziplink32bit)\nprint(md5link)\nprint(sha256link)", "overwrite links, since v1.5.3 installation does not work properly due to\nhttps://github.com/JuliaLang/julia/issues/38411", "if julia_version=='1.5.3':\n julia_version='1.6.0-rc1'\n ziplink64bit='https://julialang-s3.julialang.org/bin/winnt/x64/1.6/julia-1.6.0-rc1-win64.zip'\n md5link='https://julialang-s3.julialang.org/bin/checksums/julia-1.6.0-rc1.md5'\n sha256link='https://julialang-s3.julialang.org/bin/checksums/julia-1.6.0-rc1.sha256'\n print(julia_version)\n\n# download checksums\ng = request.urlopen(md5link)\nmd5hashes = g.read().decode()\ng.close;\n\ng = request.urlopen(sha256link)\nsha256hashes = g.read().decode()\ng.close;\n\n# downloading julia (may take 1 minute or 2)\n\nif 'amd64' in sys.version.lower():\n julia_zip=ziplink64bit.split(\"/\")[-1]\n julia_url=ziplink64bit\nelse:\n julia_zip=ziplink32bit.split(\"/\")[-1]\n julia_url=ziplink32bit\n \nhashes=(re.findall(r\"([0-9a-f]{32})\\s\"+julia_zip, md5hashes)[0] , re.findall(r\"([0-9a-f]{64})\\s+\"+julia_zip, sha256hashes)[0])\n \njulia_zip_fullpath = os.path.join(os.environ[\"WINPYDIRBASE\"], \"t\", julia_zip)\n\ng = request.urlopen(julia_url) \nwith io.open(julia_zip_fullpath, 'wb') as f:\n f.write(g.read())\ng.close\ng = None\n\n#checking it's there\nassert os.path.isfile(julia_zip_fullpath)\n\n# checking the hashes\nimport hashlib\ndef give_hash(of_file, with_this):\n with io.open(julia_zip_fullpath, 'rb') as f:\n return with_this(f.read()).hexdigest() \nprint (\" \"*12+\"MD5\"+\" \"*(32-12-3)+\" \"+\" \"*15+\"SHA-256\"+\" \"*(40-15-5)+\"\\n\"+\"-\"*32+\" \"+\"-\"*64)\n\nprint (\"%s %s %s\" % (give_hash(julia_zip_fullpath, hashlib.md5) , give_hash(julia_zip_fullpath, hashlib.sha256),julia_zip))\nassert give_hash(julia_zip_fullpath, hashlib.md5) == hashes[0].lower() \nassert give_hash(julia_zip_fullpath, hashlib.sha256) == hashes[1].lower()\n\n# will be in env next time\nos.environ[\"JUPYTER\"] = os.path.join(os.environ[\"WINPYDIR\"],\"Scripts\",\"jupyter.exe\")\nos.environ[\"JULIA_HOME\"] = os.path.join(os.environ[\"WINPYDIRBASE\"], \"t\", \"julia-\"+julia_version)\nos.environ[\"JULIA_EXE_PATH\"] = os.path.join(os.environ[\"JULIA_HOME\"], \"bin\")\nos.environ[\"JULIA_EXE\"] = \"julia.exe\"\nos.environ[\"JULIA\"] = os.path.join(os.environ[\"JULIA_EXE_PATH\"],os.environ[\"JULIA_EXE\"])\nos.environ[\"JULIA_PKGDIR\"] = os.path.join(os.environ[\"WINPYDIRBASE\"],\"settings\",\".julia\")\nos.environ[\"JULIA_DEPOT_PATH\"] = os.environ[\"JULIA_PKGDIR\"] \nos.environ[\"JULIA_HISTORY\"] = os.path.join(os.environ[\"JULIA_PKGDIR\"],\"logs\",\"repl_history.jl\")\nos.environ[\"CONDA_JL_HOME\"] = os.path.join(os.environ[\"JULIA_HOME\"], \"conda\", \"3\")\n\n\n# move JULIA_EXE_PATH to the beginning of PATH, since a julia installation may be present on the machine\nos.environ[\"PATH\"] = os.environ[\"JULIA_EXE_PATH\"] + \";\" + os.environ[\"PATH\"]\n\nif not os.path.isdir(os.environ[\"JULIA_PKGDIR\"]):\n os.mkdir(os.environ[\"JULIA_PKGDIR\"])\n \nif not os.path.isdir(os.path.join(os.environ[\"JULIA_PKGDIR\"],\"logs\")):\n os.mkdir(os.path.join(os.environ[\"JULIA_PKGDIR\"],\"logs\"))\n\nif not os.path.isfile(os.environ[\"JULIA_HISTORY\"]):\n open(os.environ[\"JULIA_HISTORY\"], 'a').close() # create empty file\n\n# extract the zip archive\nimport zipfile\ntry:\n with zipfile.ZipFile(julia_zip_fullpath) as z:\n z.extractall(os.path.join(os.environ[\"WINPYDIRBASE\"], \"t\"))\n print(\"Extracted all files\")\nexcept:\n print(\"Invalid file\")\n\n# delete zip file\nos.remove(julia_zip_fullpath)", "2 - Initialize Julia , IJulia, and make them link to winpython", "# connecting Julia to WinPython (only once, or everytime you move things)\n# see the Windows terminal window for the detailed status. This may take \n# a minute or two.\nimport julia\njulia.install()\n\n%load_ext julia.magic\n\ninfo = julia.juliainfo.JuliaInfo.load()\nprint(info.julia)\nprint(info.sysimage)\nprint(info.version_raw)\n\nfrom julia.api import Julia\njl = Julia(compiled_modules=False)\n\n# sanity check\nassert jl.eval(\"1+2\") == 3", "Print julia's versioninfo()\nThe environment should point to the usb drive and not to C:\\ (your local installation of julia maybe...)", "jl.eval(\"using InteractiveUtils\")\njl.eval('file = open(\"julia_versioninfo.txt\",\"w\")') \njl.eval(\"versioninfo(file,verbose=false)\")\njl.eval(\"close(file)\")\n\nwith open('julia_versioninfo.txt', 'r') as f:\n print(f.read())\n \nos.remove('julia_versioninfo.txt')", "Install julia Packages", "%%julia\nusing Pkg\n\nPkg.instantiate()\nPkg.update()\n\n%%julia\n# add useful packages. Again, this may take a while...\nPkg.add(\"IJulia\")\nPkg.add(\"Plots\")\nPkg.add(\"Interact\")\nPkg.add(\"Compose\")\nPkg.add(\"SymPy\")\n\nusing Compose\nusing SymPy\nusing IJulia\nusing Plots", "Fix the kernel.json to allow arbitrary drive letters and modify the env.bat\nthe path to kernel.jl is hardcoded in the kernel.json file\nthis will cause trouble, if the drive letter of the usb drive changes\nuse relative paths instead\nrewrite kernel.json and delete the one created from IJulia.jl Package", "kernel_path = os.path.join(os.environ[\"WINPYDIRBASE\"], \"settings\", \"kernels\", \"julia-\"+julia_version[0:3])\nassert os.path.isdir(kernel_path)\n\nwith open(os.path.join(kernel_path,\"kernel.json\"), 'r') as f:\n kernel_str = f.read()\n\nnew_kernel_str = kernel_str.replace(os.environ[\"WINPYDIRBASE\"].replace(\"\\\\\",\"\\\\\\\\\"),\"{prefix}\\\\\\\\..\")\nprint(new_kernel_str)\n\nwith open(os.path.join(kernel_path,\"kernel.json\"), 'w') as f:\n f.write(new_kernel_str)\n\n# add JULIA env variables to env.bat\ninp_str = r\"\"\"\nrem ******************\nrem handle Julia {0} if included\nrem ******************\n\nif not exist \"%WINPYDIRBASE%\\t\\julia-{0}\\bin\" goto julia_bad_{0}\nset JULIA_PKGDIR=%WINPYDIRBASE%\\settings\\.julia\nset JULIA_DEPOT_PATH=%JULIA_PKGDIR%\nset JULIA_EXE=julia.exe\nset JULIA_HOME=%WINPYDIRBASE%\\t\\julia-{0}\nset JULIA_HISTORY=%JULIA_PKGDIR%\\logs\\repl_history.jl\n:julia_bad_{0}\n\n\"\"\".format(julia_version)\n\n# append to env.bat\nwith open(os.path.join(os.environ[\"WINPYDIRBASE\"],\"scripts\",\"env.bat\"), 'a') as file :\n file.write(inp_str)", "3 - Launching a Julia Notebook\nchoose a Julia Kernel from Notebook, or Julia from Jupyterlab Launcher\n4 - Julia Magic\nor use %load_ext julia.magic then %julia or %%julia" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
mathnathan/notebooks
mpfi/probability blog post.ipynb
mit
[ "%matplotlib inline\nimport numpy as np\nimport matplotlib.pyplot as plt", "Introduction\nMachine learning literature makes heavy use of probabilistic graphical models\nand bayesian statistics. In fact, state of the art (SOTA) architectures, such as\n[variational autoencoders][vae-blog] (VAE) or [generative adversarial\nnetworks][gan-blog] (GAN), are intrinsically stochastic by nature. To\nwholesomely understand research in this field not only do we need a broad\nknowledge of mathematics, probability, and optimization but we somehow need\nintuition about how these concepts are applied to real world problems. For\nexample, one of the most common applications of deep learning techniques is\nvision. We may want to classify images or generate new ones. Most SOTA\ntechniques pose these problems in a probabilistic framework. We frequently see\nthings like $p(\\mathbf{x}|\\mathbf{z})$ where $\\mathbf{x}$ is an image and\n$\\mathbf{z}$ is a latent variable. What do we mean by the probability of an\nimage? What is a latent variable, and why is it necessary[^Bishop2006] to pose\nthe problems this way?\nShort answer, it is necessary due to the inherent uncertainty of our universe.\nIn this case, uncertainty in image acquisition can be introduced via many\nsources, such as the recording apparatus, the finite precision of our\nmeasurements, as well as the intrinsic stochasticity of the process being\nmeasured. Perhaps the most important source of uncertainty we will consider is\ndue to there being sources of variability that are themselves unobserved.\nProbability theory provides us with a framework to reason in the presence of\nuncertainty and information theory allows us to quantify uncertainty. As we \nelluded earlier the field of machine learning makes heavy use of both, and\nthis is no coincidence.\nRepresentations\nHow do we describe a face? The word \"face\" is a symbol and this symbol means\ndifferent things to different people. Yet, there is enough commonality between\nour interpretations that we are able to effectively communicate with one\nanother using the word. How is that? What are the underlying features of faces\nthat we all hold common? Why is a simple smiley face clip art so obviously\nperceived as a face? To make it more concrete, why are two simple ellipses\ndecorated underneath by a short curve so clearly a face, while an eye lid,\nlower lip, one ear and a nostril, not? \nInsert Image of Faces\nLeft: Most would likely agree, this is clearly a face. Middle:\nWith nearly all of the details removed, a mere two circles and\ncurve are enough to create what the author still recognizes\nas a face. Right: Does this look like a face to you? An ear, \nnostril, eyelid, and lip do not seem to convey a face as clearly\nas the eyes and the mouth do. We will quantify this demonstration\nshortly.\nFeatures, or representations, are built on the idea that characteristics of the\nsymbol \"face\" are not a property of any one face. Rather, they only arise from\nthe myriad of things we use the symbol to represent. In other words, a\nparticular face is not ascribed meaning by the word \"face\" - the word \"face\"\nderives meaning from the many faces it represents. This suggests that facial\ncharacteristics can be described through the statistical properties of all\nfaces. Loosely speaking, these underlying statistical characteristics are what\nthe machine learning field often calls latent variables.\nProbability of an Image\nMost images are contaminated with noise that must be addressed. At the\nhighest level, we have noise being added to the data by the imaging device. The\nnext level of uncertainty comes as a consequence of discretization.\nImages in reality are continuous but in the process of imaging we only measure\ncertain points along the face. Consider for example a military satellite\ntracking a vehicle. If one wishes to predict the future location of the van,\nthe prediction is limited to be within one of the discrete cells that make up\nits measurements. However, the true location of the van could be anywhere\nwithin that grid cell. There is also intrinsic stochasticity at the atomic\nlevel that we ignore. The fluctuations taking place at that scale are assumed\nto be averaged out in our observations.\nThe unobserved sources of variability will be our primary focus. Before we\naddress that, let us lay down some preliminary concepts. We are going to assume\nthat there exists some true unknown process that determines what faces look\nlike. A dataset of faces can then be considered as a sample of this process at \nvarious points throughout its life. This suggests that these snapshots are a\noutputs of the underlying data generating process. Considering the many\nsources of uncertainty outlined above, it is natural to describe this process\nas a probability distribution. There will be many ways to interpret the data as\na probability, but we will begin by considering any one image to be the result\nof a data generating distribution, $P_{data}(\\mathbf{x})$. Here $\\mathbf{x}$ is considered to be\nan image of a face with $n$ pixels. So $P_{data}$ is a joint distribution over\neach pixel of the frame with a probability density function (pdf),\n$p_{data}(x_1,x_2,\\dots,x_n)$.\nTo build intuition about what $p_{data}(\\mathbf{x})$ is and how it relates to\nthe assumed data generating process, we will explore a simple example. Take an\nimage with only 2 pixels... [$x_1$,$x_2$] where both $x_1$ and $x_2$ are in\n[0,1]. Each image can be considered as a two dimensional point, in\n$\\mathbb{R}^2$. All possible images would occupy a square in the 2 dimensional\nplane. An example of what this might look like can be seen in Figure\n\\ref{fig:images_in_2dspace} on page \\pageref{fig:images_in_2dspace}. Any one\npoint inside the unit square would represent an image. For example the image\nassociated with the point $(0.25,0.85)$ is shown below.", "x1 = np.random.uniform(size=500)\nx2 = np.random.uniform(size=500)\nfig = plt.figure();\nax = fig.add_subplot(1,1,1);\nax.scatter(x1,x2, edgecolor='black', s=80);\nax.grid();\nax.set_axisbelow(True);\nax.set_xlim(-0.25,1.25); ax.set_ylim(-0.25,1.25)\nax.set_xlabel('Pixel 2'); ax.set_ylabel('Pixel 1'); plt.savefig('images_in_2dspace.pdf')", "Any one point inside the unit square would represent an image. For example the image associated with the point $(0.25,0.85)$ is shown below.", "im = [(0.25, 0.85)]\nplt.imshow(im, cmap='gray',vmin=0,vmax=1)\nplt.tick_params(\n axis='both', # changes apply to the x-axis\n which='both', # both major and minor ticks are affected\n bottom='off', # ticks along the bottom edge are off\n top='off', # ticks along the top edge are off\n left='off',\n right='off'\n)\nplt.xticks([])\nplt.yticks([])\nplt.xlabel('Pixel 1 = 0.25 Pixel 2 = 0.85')\nplt.savefig('sample_2dspace_image.pdf')", "Now consider the case where there is some \nprocess correlating the two variables. This \nwould be similar to their being some rules behind\nthe structure of faces. We know, that this must be\nthe case because if it weren't then faces would\nbe created randomly and we would not see the \npatterns that was do. In \nthis case, the pixels would be correlated in \nsome manner due to the mechanism driving the\nconstruction of faces. In this simple case, \nlet's consider a direct correlation of the \nform $x_1 = \\frac{1}{2} \\cos(2\\pi x_2)+\\frac{1}{2}+\\epsilon$ \nwhere $\\epsilon$ is a noise term coming from\na low variability normal distribution \n$\\epsilon \\sim N(0,\\frac{1}{10})$. We see \nin Figure \\ref{fig:structured_images_in_2dspace}\non page \\pageref{fig:structured_images_in_2dspace}\nthat in this case, the images plotted\nin two dimensions resulting from this \nrelationship form a distinct pattern.", "x1 = lambda x2: 0.5*np.cos(2*np.pi*x2)+0.5\nx2 = np.linspace(0,1,200)\neps = np.random.normal(scale=0.1, size=200)\nfig = plt.figure();\nax = fig.add_subplot(1,1,1);\nax.scatter(x2,x1(x2)+eps, edgecolor='black', s=80);\nax.grid();\nax.set_axisbelow(True);\nax.set_xlim(-0.25,1.25); ax.set_ylim(-0.25,1.25); plt.axes().set_aspect('equal')\nax.set_xlabel('Pixel 2'); ax.set_ylabel('Pixel 1'); plt.savefig('structured_images_in_2dspace.pdf')", "We will refer to the structure suggested by \nthe two dimensional points as the 'manifold'.\nThis is a common practice when analyzing images.\nA 28 by 28 dimensional image will be a point in\n784 dimensional space. If we are examining \nimages with structure, various images of the\nnumber 2 for example, then it turns out that \nthese images will form a manifold in 784 \ndimensional space. In most cases, as is the \ncase in our contrived example, this manifold \nexists in a lower dimensional space than that\nof the images themselves. The goal is to 'learn'\nthis manifold. In our simple case we can describe\nthe manifold as a function of only 1 variable \n$$f(t) = <t,\\frac{1}{2} \\cos(2\\pi t)+\\frac{1}{2}>$$ \nThis is what we would call the underlying data \ngenerating process. In practice we usually \ndescribe the manifold in terms of a probability\ndistribution. We will refer to the data \ngenerating distribution in our example as \n$p_{test}(x_1, x_2)$. Why did we choose a \nprobability to describe the manifold created \nby the data generating process? How might this\nprobability be interpreted?\nLearning the actual distribution turns out to \nbe a difficult task. Here we will use a\ncommon non parametric technique for describing\ndistributions, the histogram. Looking at a \nhistogram of the images, or two dimensional points,\nwill give us insight into the structure of the \ndistribution from which they came. Notice here \nthough that the histogram merely describes the \ndistribution, we do not know what it is.", "from matplotlib.colors import LogNorm\nx2 = np.random.uniform(size=100000)\neps = np.random.normal(scale=0.1, size=100000)\nhist2d = plt.hist2d(x2,x1(x2)+eps, bins=50, norm=LogNorm())\nplt.xlim(0.0,1.0); plt.ylim(-0.3,1.3); plt.axes().set_aspect('equal')\nplt.xlabel('Pixel 2'); plt.ylabel('Pixel 1')\nplt.colorbar();\nplt.savefig('histogram_of_structured_images.pdf')", "As our intuition might have suggested, the data\ngenerating distribution looks very similar to \nthe structure suggested by the two dimensional\nimages plotted above. There is high probability\nvery near the actual curve \n$x_1 = \\frac{1}{2} \\cos(2\\pi x_2)+\\frac{1}{2}$ \nand low probability as we move away. We imposed\nthe uncertainty via the Gaussian noise term \n$\\epsilon$. However, in real data the \nuncertainty can be due to the myriad sources\noutlined above. In these cases a complex \nprobability distribution isn't an arbitrary \nchoice for representing the data, it becomes \nnecessary. \nHopefully we're now beginning to understand how\nto interpret $p_{test}(x_1, x_2)$. One might say\n$p_{test}$ measures how likely a certain \nconfiguration of $x_1$ and $x_2$ is to have \narisen from the data generating process $f(t)$.\nTherefore if one can learn the data generating\ndistribution, then they have a descriptive\nmeasure of the true underlying data generating\nprocess. This intuition extends to the \n$p_{data}(x)$ for faces that was presented \nabove. A sample from the LFW dataset is shown in \nFigure \\ref{fig:Agnelo_Queiroz_0001} on page\n\\pageref{fig:Agnelo_Queiroz_0001}." ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
ES-DOC/esdoc-jupyterhub
notebooks/ec-earth-consortium/cmip6/models/ec-earth3-cc/seaice.ipynb
gpl-3.0
[ "ES-DOC CMIP6 Model Properties - Seaice\nMIP Era: CMIP6\nInstitute: EC-EARTH-CONSORTIUM\nSource ID: EC-EARTH3-CC\nTopic: Seaice\nSub-Topics: Dynamics, Thermodynamics, Radiative Processes. \nProperties: 80 (63 required)\nModel descriptions: Model description details\nInitialized From: -- \nNotebook Help: Goto notebook help page\nNotebook Initialised: 2018-02-15 16:53:59\nDocument Setup\nIMPORTANT: to be executed each time you run the notebook", "# DO NOT EDIT ! \nfrom pyesdoc.ipython.model_topic import NotebookOutput \n\n# DO NOT EDIT ! \nDOC = NotebookOutput('cmip6', 'ec-earth-consortium', 'ec-earth3-cc', 'seaice')", "Document Authors\nSet document authors", "# Set as follows: DOC.set_author(\"name\", \"email\") \n# TODO - please enter value(s)", "Document Contributors\nSpecify document contributors", "# Set as follows: DOC.set_contributor(\"name\", \"email\") \n# TODO - please enter value(s)", "Document Publication\nSpecify document publication status", "# Set publication status: \n# 0=do not publish, 1=publish. \nDOC.set_publication_status(0)", "Document Table of Contents\n1. Key Properties --&gt; Model\n2. Key Properties --&gt; Variables\n3. Key Properties --&gt; Seawater Properties\n4. Key Properties --&gt; Resolution\n5. Key Properties --&gt; Tuning Applied\n6. Key Properties --&gt; Key Parameter Values\n7. Key Properties --&gt; Assumptions\n8. Key Properties --&gt; Conservation\n9. Grid --&gt; Discretisation --&gt; Horizontal\n10. Grid --&gt; Discretisation --&gt; Vertical\n11. Grid --&gt; Seaice Categories\n12. Grid --&gt; Snow On Seaice\n13. Dynamics\n14. Thermodynamics --&gt; Energy\n15. Thermodynamics --&gt; Mass\n16. Thermodynamics --&gt; Salt\n17. Thermodynamics --&gt; Salt --&gt; Mass Transport\n18. Thermodynamics --&gt; Salt --&gt; Thermodynamics\n19. Thermodynamics --&gt; Ice Thickness Distribution\n20. Thermodynamics --&gt; Ice Floe Size Distribution\n21. Thermodynamics --&gt; Melt Ponds\n22. Thermodynamics --&gt; Snow Processes\n23. Radiative Processes \n1. Key Properties --&gt; Model\nName of seaice model used.\n1.1. Model Overview\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nOverview of sea ice model.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.key_properties.model.model_overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "1.2. Model Name\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nName of sea ice model code (e.g. CICE 4.2, LIM 2.1, etc.)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.key_properties.model.model_name') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "2. Key Properties --&gt; Variables\nList of prognostic variable in the sea ice model.\n2.1. Prognostic\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nList of prognostic variables in the sea ice component.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.key_properties.variables.prognostic') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Sea ice temperature\" \n# \"Sea ice concentration\" \n# \"Sea ice thickness\" \n# \"Sea ice volume per grid cell area\" \n# \"Sea ice u-velocity\" \n# \"Sea ice v-velocity\" \n# \"Sea ice enthalpy\" \n# \"Internal ice stress\" \n# \"Salinity\" \n# \"Snow temperature\" \n# \"Snow depth\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "3. Key Properties --&gt; Seawater Properties\nProperties of seawater relevant to sea ice\n3.1. Ocean Freezing Point\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nEquation used to compute the freezing point (in deg C) of seawater, as a function of salinity and pressure", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.key_properties.seawater_properties.ocean_freezing_point') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"TEOS-10\" \n# \"Constant\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "3.2. Ocean Freezing Point Value\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: FLOAT&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nIf using a constant seawater freezing point, specify this value.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.key_properties.seawater_properties.ocean_freezing_point_value') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "4. Key Properties --&gt; Resolution\nResolution of the sea ice grid\n4.1. Name\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nThis is a string usually used by the modelling group to describe the resolution of this grid e.g. N512L180, T512L70, ORCA025 etc.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.key_properties.resolution.name') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "4.2. Canonical Horizontal Resolution\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nExpression quoted for gross comparisons of resolution, eg. 50km or 0.1 degrees etc.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.key_properties.resolution.canonical_horizontal_resolution') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "4.3. Number Of Horizontal Gridpoints\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nTotal number of horizontal (XY) points (or degrees of freedom) on computational grid.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.key_properties.resolution.number_of_horizontal_gridpoints') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "5. Key Properties --&gt; Tuning Applied\nTuning applied to sea ice model component\n5.1. Description\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nGeneral overview description of tuning: explain and motivate the main targets and metrics retained. Document the relative weight given to climate performance metrics versus process oriented metrics, and on the possible conflicts with parameterization level tuning. In particular describe any struggle with a parameter value that required pushing it to its limits to solve a particular model deficiency.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.key_properties.tuning_applied.description') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "5.2. Target\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nWhat was the aim of tuning, e.g. correct sea ice minima, correct seasonal cycle.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.key_properties.tuning_applied.target') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "5.3. Simulations\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\n*Which simulations had tuning applied, e.g. all, not historical, only pi-control? *", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.key_properties.tuning_applied.simulations') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "5.4. Metrics Used\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nList any observed metrics used in tuning model/parameters", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.key_properties.tuning_applied.metrics_used') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "5.5. Variables\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nWhich variables were changed during the tuning process?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.key_properties.tuning_applied.variables') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "6. Key Properties --&gt; Key Parameter Values\nValues of key parameters\n6.1. Typical Parameters\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N\nWhat values were specificed for the following parameters if used?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.key_properties.key_parameter_values.typical_parameters') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Ice strength (P*) in units of N m{-2}\" \n# \"Snow conductivity (ks) in units of W m{-1} K{-1} \" \n# \"Minimum thickness of ice created in leads (h0) in units of m\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "6.2. Additional Parameters\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N\nIf you have any additional paramterised values that you have used (e.g. minimum open water fraction or bare ice albedo), please provide them here as a comma separated list", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.key_properties.key_parameter_values.additional_parameters') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "7. Key Properties --&gt; Assumptions\nAssumptions made in the sea ice model\n7.1. Description\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nGeneral overview description of any key assumptions made in this model.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.key_properties.assumptions.description') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "7.2. On Diagnostic Variables\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nNote any assumptions that specifically affect the CMIP6 diagnostic sea ice variables.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.key_properties.assumptions.on_diagnostic_variables') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "7.3. Missing Processes\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nList any key processes missing in this model configuration? Provide full details where this affects the CMIP6 diagnostic sea ice variables?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.key_properties.assumptions.missing_processes') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "8. Key Properties --&gt; Conservation\nConservation in the sea ice component\n8.1. Description\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nProvide a general description of conservation methodology.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.key_properties.conservation.description') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "8.2. Properties\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nProperties conserved in sea ice by the numerical schemes.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.key_properties.conservation.properties') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Energy\" \n# \"Mass\" \n# \"Salt\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "8.3. Budget\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nFor each conserved property, specify the output variables which close the related budgets. as a comma separated list. For example: Conserved property, variable1, variable2, variable3", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.key_properties.conservation.budget') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "8.4. Was Flux Correction Used\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDoes conservation involved flux correction?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.key_properties.conservation.was_flux_correction_used') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "8.5. Corrected Conserved Prognostic Variables\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nList any variables which are conserved by more than the numerical scheme alone.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.key_properties.conservation.corrected_conserved_prognostic_variables') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "9. Grid --&gt; Discretisation --&gt; Horizontal\nSea ice discretisation in the horizontal\n9.1. Grid\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nGrid on which sea ice is horizontal discretised?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.grid.discretisation.horizontal.grid') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Ocean grid\" \n# \"Atmosphere Grid\" \n# \"Own Grid\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "9.2. Grid Type\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nWhat is the type of sea ice grid?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.grid.discretisation.horizontal.grid_type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Structured grid\" \n# \"Unstructured grid\" \n# \"Adaptive grid\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "9.3. Scheme\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nWhat is the advection scheme?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.grid.discretisation.horizontal.scheme') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Finite differences\" \n# \"Finite elements\" \n# \"Finite volumes\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "9.4. Thermodynamics Time Step\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nWhat is the time step in the sea ice model thermodynamic component in seconds.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.grid.discretisation.horizontal.thermodynamics_time_step') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "9.5. Dynamics Time Step\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nWhat is the time step in the sea ice model dynamic component in seconds.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.grid.discretisation.horizontal.dynamics_time_step') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "9.6. Additional Details\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nSpecify any additional horizontal discretisation details.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.grid.discretisation.horizontal.additional_details') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "10. Grid --&gt; Discretisation --&gt; Vertical\nSea ice vertical properties\n10.1. Layering\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nWhat type of sea ice vertical layers are implemented for purposes of thermodynamic calculations?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.grid.discretisation.vertical.layering') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Zero-layer\" \n# \"Two-layers\" \n# \"Multi-layers\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "10.2. Number Of Layers\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nIf using multi-layers specify how many.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.grid.discretisation.vertical.number_of_layers') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "10.3. Additional Details\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nSpecify any additional vertical grid details.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.grid.discretisation.vertical.additional_details') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "11. Grid --&gt; Seaice Categories\nWhat method is used to represent sea ice categories ?\n11.1. Has Mulitple Categories\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nSet to true if the sea ice model has multiple sea ice categories.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.grid.seaice_categories.has_mulitple_categories') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "11.2. Number Of Categories\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nIf using sea ice categories specify how many.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.grid.seaice_categories.number_of_categories') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "11.3. Category Limits\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nIf using sea ice categories specify each of the category limits.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.grid.seaice_categories.category_limits') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "11.4. Ice Thickness Distribution Scheme\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDescribe the sea ice thickness distribution scheme", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.grid.seaice_categories.ice_thickness_distribution_scheme') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "11.5. Other\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nIf the sea ice model does not use sea ice categories specify any additional details. For example models that paramterise the ice thickness distribution ITD (i.e there is no explicit ITD) but there is assumed distribution and fluxes are computed accordingly.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.grid.seaice_categories.other') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "12. Grid --&gt; Snow On Seaice\nSnow on sea ice details\n12.1. Has Snow On Ice\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nIs snow on ice represented in this model?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.grid.snow_on_seaice.has_snow_on_ice') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "12.2. Number Of Snow Levels\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nNumber of vertical levels of snow on ice?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.grid.snow_on_seaice.number_of_snow_levels') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "12.3. Snow Fraction\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDescribe how the snow fraction on sea ice is determined", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.grid.snow_on_seaice.snow_fraction') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "12.4. Additional Details\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nSpecify any additional details related to snow on ice.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.grid.snow_on_seaice.additional_details') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "13. Dynamics\nSea Ice Dynamics\n13.1. Horizontal Transport\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nWhat is the method of horizontal advection of sea ice?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.dynamics.horizontal_transport') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Incremental Re-mapping\" \n# \"Prather\" \n# \"Eulerian\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "13.2. Transport In Thickness Space\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nWhat is the method of sea ice transport in thickness space (i.e. in thickness categories)?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.dynamics.transport_in_thickness_space') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Incremental Re-mapping\" \n# \"Prather\" \n# \"Eulerian\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "13.3. Ice Strength Formulation\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nWhich method of sea ice strength formulation is used?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.dynamics.ice_strength_formulation') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Hibler 1979\" \n# \"Rothrock 1975\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "13.4. Redistribution\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nWhich processes can redistribute sea ice (including thickness)?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.dynamics.redistribution') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Rafting\" \n# \"Ridging\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "13.5. Rheology\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nRheology, what is the ice deformation formulation?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.dynamics.rheology') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Free-drift\" \n# \"Mohr-Coloumb\" \n# \"Visco-plastic\" \n# \"Elastic-visco-plastic\" \n# \"Elastic-anisotropic-plastic\" \n# \"Granular\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "14. Thermodynamics --&gt; Energy\nProcesses related to energy in sea ice thermodynamics\n14.1. Enthalpy Formulation\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nWhat is the energy formulation?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.energy.enthalpy_formulation') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Pure ice latent heat (Semtner 0-layer)\" \n# \"Pure ice latent and sensible heat\" \n# \"Pure ice latent and sensible heat + brine heat reservoir (Semtner 3-layer)\" \n# \"Pure ice latent and sensible heat + explicit brine inclusions (Bitz and Lipscomb)\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "14.2. Thermal Conductivity\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nWhat type of thermal conductivity is used?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.energy.thermal_conductivity') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Pure ice\" \n# \"Saline ice\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "14.3. Heat Diffusion\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nWhat is the method of heat diffusion?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.energy.heat_diffusion') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Conduction fluxes\" \n# \"Conduction and radiation heat fluxes\" \n# \"Conduction, radiation and latent heat transport\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "14.4. Basal Heat Flux\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nMethod by which basal ocean heat flux is handled?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.energy.basal_heat_flux') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Heat Reservoir\" \n# \"Thermal Fixed Salinity\" \n# \"Thermal Varying Salinity\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "14.5. Fixed Salinity Value\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: FLOAT&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nIf you have selected {Thermal properties depend on S-T (with fixed salinity)}, supply fixed salinity value for each sea ice layer.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.energy.fixed_salinity_value') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "14.6. Heat Content Of Precipitation\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDescribe the method by which the heat content of precipitation is handled.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.energy.heat_content_of_precipitation') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "14.7. Precipitation Effects On Salinity\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nIf precipitation (freshwater) that falls on sea ice affects the ocean surface salinity please provide further details.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.energy.precipitation_effects_on_salinity') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "15. Thermodynamics --&gt; Mass\nProcesses related to mass in sea ice thermodynamics\n15.1. New Ice Formation\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDescribe the method by which new sea ice is formed in open water.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.mass.new_ice_formation') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "15.2. Ice Vertical Growth And Melt\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDescribe the method that governs the vertical growth and melt of sea ice.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.mass.ice_vertical_growth_and_melt') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "15.3. Ice Lateral Melting\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nWhat is the method of sea ice lateral melting?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.mass.ice_lateral_melting') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Floe-size dependent (Bitz et al 2001)\" \n# \"Virtual thin ice melting (for single-category)\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "15.4. Ice Surface Sublimation\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDescribe the method that governs sea ice surface sublimation.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.mass.ice_surface_sublimation') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "15.5. Frazil Ice\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDescribe the method of frazil ice formation.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.mass.frazil_ice') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "16. Thermodynamics --&gt; Salt\nProcesses related to salt in sea ice thermodynamics.\n16.1. Has Multiple Sea Ice Salinities\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDoes the sea ice model use two different salinities: one for thermodynamic calculations; and one for the salt budget?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.salt.has_multiple_sea_ice_salinities') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "16.2. Sea Ice Salinity Thermal Impacts\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDoes sea ice salinity impact the thermal properties of sea ice?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.salt.sea_ice_salinity_thermal_impacts') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "17. Thermodynamics --&gt; Salt --&gt; Mass Transport\nMass transport of salt\n17.1. Salinity Type\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nHow is salinity determined in the mass transport of salt calculation?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.salt.mass_transport.salinity_type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Constant\" \n# \"Prescribed salinity profile\" \n# \"Prognostic salinity profile\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "17.2. Constant Salinity Value\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: FLOAT&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nIf using a constant salinity value specify this value in PSU?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.salt.mass_transport.constant_salinity_value') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "17.3. Additional Details\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDescribe the salinity profile used.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.salt.mass_transport.additional_details') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "18. Thermodynamics --&gt; Salt --&gt; Thermodynamics\nSalt thermodynamics\n18.1. Salinity Type\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nHow is salinity determined in the thermodynamic calculation?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.salt.thermodynamics.salinity_type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Constant\" \n# \"Prescribed salinity profile\" \n# \"Prognostic salinity profile\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "18.2. Constant Salinity Value\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: FLOAT&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nIf using a constant salinity value specify this value in PSU?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.salt.thermodynamics.constant_salinity_value') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "18.3. Additional Details\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDescribe the salinity profile used.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.salt.thermodynamics.additional_details') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "19. Thermodynamics --&gt; Ice Thickness Distribution\nIce thickness distribution details.\n19.1. Representation\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nHow is the sea ice thickness distribution represented?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.ice_thickness_distribution.representation') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Explicit\" \n# \"Virtual (enhancement of thermal conductivity, thin ice melting)\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "20. Thermodynamics --&gt; Ice Floe Size Distribution\nIce floe-size distribution details.\n20.1. Representation\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nHow is the sea ice floe-size represented?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.ice_floe_size_distribution.representation') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Explicit\" \n# \"Parameterised\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "20.2. Additional Details\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nPlease provide further details on any parameterisation of floe-size.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.ice_floe_size_distribution.additional_details') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "21. Thermodynamics --&gt; Melt Ponds\nCharacteristics of melt ponds.\n21.1. Are Included\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nAre melt ponds included in the sea ice model?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.melt_ponds.are_included') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "21.2. Formulation\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nWhat method of melt pond formulation is used?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.melt_ponds.formulation') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Flocco and Feltham (2010)\" \n# \"Level-ice melt ponds\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "21.3. Impacts\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nWhat do melt ponds have an impact on?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.melt_ponds.impacts') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Albedo\" \n# \"Freshwater\" \n# \"Heat\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "22. Thermodynamics --&gt; Snow Processes\nThermodynamic processes in snow on sea ice\n22.1. Has Snow Aging\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nSet to True if the sea ice model has a snow aging scheme.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.snow_processes.has_snow_aging') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "22.2. Snow Aging Scheme\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDescribe the snow aging scheme.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.snow_processes.snow_aging_scheme') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "22.3. Has Snow Ice Formation\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nSet to True if the sea ice model has snow ice formation.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.snow_processes.has_snow_ice_formation') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "22.4. Snow Ice Formation Scheme\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDescribe the snow ice formation scheme.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.snow_processes.snow_ice_formation_scheme') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "22.5. Redistribution\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nWhat is the impact of ridging on snow cover?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.snow_processes.redistribution') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "22.6. Heat Diffusion\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nWhat is the heat diffusion through snow methodology in sea ice thermodynamics?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.snow_processes.heat_diffusion') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Single-layered heat diffusion\" \n# \"Multi-layered heat diffusion\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "23. Radiative Processes\nSea Ice Radiative Processes\n23.1. Surface Albedo\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nMethod used to handle surface albedo.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.radiative_processes.surface_albedo') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Delta-Eddington\" \n# \"Parameterized\" \n# \"Multi-band albedo\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "23.2. Ice Radiation Transmission\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nMethod by which solar radiation through sea ice is handled.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.radiative_processes.ice_radiation_transmission') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Delta-Eddington\" \n# \"Exponential attenuation\" \n# \"Ice radiation transmission per category\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "©2017 ES-DOC" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
greenca/diy-spectrometer
peak-detection.ipynb
mit
[ "Detecting Peaks in a Spectrum", "import spectrumlib\nimport matplotlib.pyplot as plt\nimport numpy as np\n%pylab inline\n\nfilename = 'shear.png'\nspectrum = spectrumlib.getSpectrum(filename)\n\nplt.plot(spectrum)", "First, find the relative maxima of the spectrum.", "from scipy.signal import argrelextrema\nmaxes = argrelextrema(spectrum, np.greater, order=2)\n\nprint maxes[0]\n\nfor x in maxes[0]:\n plt.axvline(x)\nplt.plot(spectrum, color='r')", "This clearly gives us way too many local maxima. \nSo, next, we try the find_peaks_cwt function from scipy.signal, which uses wavelets.", "from scipy.signal import find_peaks_cwt\ncwt_peaks = find_peaks_cwt(spectrum, np.arange(10,15))\n\nprint cwt_peaks\n\nfor x in cwt_peaks:\n plt.axvline(x)\nplt.plot(spectrum, color='r')", "This is better, in that we lost all the spurious values, but it doesn't match that well, and we don't get the\ndouble-peak (near 150) anymore.\nhttps://gist.github.com/endolith/250860 has a python translation of a matlab peak-detection script. Downloaded as peakdetect.py", "import peakdetect\npeaks, valleys = peakdetect.peakdet(spectrum, 3)\n\nprint peaks\n\nfor index, val in peaks:\n plt.axvline(index)\nplt.plot(spectrum, color='r')", "This is a pretty decent result, which we should be able to use for matching with known spectra.\nCalibration\nThe sample spectrum above is for a fluorescent lamp. This is a known spectrum, that we can use for calibration. Here is a labelled plot of the spectrum: \n\n\"Fluorescent lighting spectrum peaks labelled\". Licensed under CC BY-SA 3.0 via Wikimedia Commons - http://commons.wikimedia.org/wiki/File:Fluorescent_lighting_spectrum_peaks_labelled.gif#/media/File:Fluorescent_lighting_spectrum_peaks_labelled.gif\nVisually, this appears to match pretty well with our spectrum. We calibrate the x-axis by matching two points with the known spectrum. Let's use the strongest two peaks: 5, at 546.5 nm (from Mercury) and 12, at 611.6 nm (from Europium). In our spectrum, peak 4 has a higher intensity than peak 12, but we'll use peak 12 anyway, because peaks 4 and 5 are too close together to get an accurate calibration.", "intensities = sorted([intensity for index, intensity in peaks])\npeak5 = [index for index, intensity in peaks if intensity == intensities[-1]][0]\npeak12 = [index for index, intensity in peaks if intensity == intensities[-3]][0]\nprint peak5, peak12", "Linear scale between index numbers and wavelengths:\n<br>wavelength = m*index + b", "peak5_wl = 546.5\npeak12_wl = 611.6\nm = (peak12_wl - peak5_wl)/(peak12 - peak5)\nb = peak5_wl - m*peak5\nprint m, b\n\nwavelengths = [m*index + b for index in range(len(spectrum))]\n\nplt.plot(wavelengths, spectrum)\nplt.xlabel('Wavelength (nm)')\nplt.ylabel('Intensity')" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
tanghaibao/goatools
notebooks/cell_cycle.ipynb
bsd-2-clause
[ "Cell Cycle genes\nUsing Gene Ontologies (GO), create an up-to-date list of all human protein-coding genes that are know to be associated with cell cycle.\n1. Download Ontologies, if necessary", "# Get http://geneontology.org/ontology/go-basic.obo\nfrom goatools.base import download_go_basic_obo\nobo_fname = download_go_basic_obo()", "2. Download Associations, if necessary", "# Get ftp://ftp.ncbi.nlm.nih.gov/gene/DATA/gene2go.gz\nfrom goatools.base import download_ncbi_associations\ngene2go = download_ncbi_associations()", "3. Read associations\nNormally, when reading associations, GeneID2GOs are returned. We get the reverse, GO2GeneIDs, by adding the key-word arg, \"go2geneids=True\" to the call to read_ncbi_gene2go.", "from goatools.anno.genetogo_reader import Gene2GoReader\n\nobjanno = Gene2GoReader(\"gene2go\", taxids=[9606])\ngo2geneids_human = objanno.get_id2gos(namespace='BP', go2geneids=True)\n\nprint(\"{N:} GO terms associated with human NCBI Entrez GeneIDs\".format(N=len(go2geneids_human)))", "4. Initialize Gene-Search Helper", "from goatools.go_search import GoSearch\n\nsrchhelp = GoSearch(\"go-basic.obo\", go2items=go2geneids_human)", "5. Find human all genes related to \"cell cycle\"\n5a. Prepare \"cell cycle\" text searches\nWe will need to search for both cell cycle and cell cycle-independent. Those GOs that contain the text cell cycle-independent are specifically not related to cell cycle and must be removed from our list of cell cycle GO terms.", "import re\n\n# Compile search pattern for 'cell cycle'\ncell_cycle_all = re.compile(r'cell cycle', flags=re.IGNORECASE)\ncell_cycle_not = re.compile(r'cell cycle.independent', flags=re.IGNORECASE)", "5b. Find NCBI Entrez GeneIDs related to \"cell cycle\"", "# Find ALL GOs and GeneIDs associated with 'cell cycle'.\n\n# Details of search are written to a log file\nfout_allgos = \"cell_cycle_gos_human.log\" \nwith open(fout_allgos, \"w\") as log:\n # Search for 'cell cycle' in GO terms\n gos_cc_all = srchhelp.get_matching_gos(cell_cycle_all, prt=log)\n # Find any GOs matching 'cell cycle-independent' (e.g., \"lysosome\")\n gos_no_cc = srchhelp.get_matching_gos(cell_cycle_not, gos=gos_cc_all, prt=log)\n # Remove GO terms that are not \"cell cycle\" GOs\n gos = gos_cc_all.difference(gos_no_cc)\n # Add children GOs of cell cycle GOs\n gos_all = srchhelp.add_children_gos(gos)\n # Get Entrez GeneIDs for cell cycle GOs\n geneids = srchhelp.get_items(gos_all)\nprint(\"{N} human NCBI Entrez GeneIDs related to 'cell cycle' found.\".format(N=len(geneids)))\n", "6. Print the \"cell cycle\" protein-coding gene Symbols\nIn this example, the background is all human protein-codinge genes. \nFollow the instructions in the background_genes_ncbi notebook to download a set of background population genes from NCBI.", "from genes_ncbi_9606_proteincoding import GENEID2NT\n\nfor geneid in geneids: # geneids associated with cell-cycle\n nt = GENEID2NT.get(geneid, None)\n if nt is not None:\n print(\"{Symbol:<10} {desc}\".format(\n Symbol = nt.Symbol, \n desc = nt.description))", "Copyright 2016-present, DV Klopfenstein, Haibao Tang. All rights reserved." ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
AkshanshChahal/BTP
Baseline 2.ipynb
mit
[ "Establishing a Baseline for the Problem\nUsing variety of regression algorithms (non linear)", "import pandas as pd\nimport numpy as np\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import mean_squared_error\nfrom math import sqrt\n\nimport pprint\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\nfrom sklearn.model_selection import cross_val_score\nfrom sklearn import metrics\n\nfrom sklearn.svm import SVR\n\n%matplotlib inline\n\n# importing the dataset we prepared and saved using Baseline 1 Notebook\nricep = pd.read_csv(\"/Users/macbook/Documents/BTP/Notebook/BTP/ricep.csv\")\nricep.head()\n\nricep = ricep.drop([\"Unnamed: 0\"],axis=1)\nricep[\"phosphorus\"] = ricep[\"phosphorus\"]*10\n\nricep[\"value\"] = ricep[\"Production\"]/ricep[\"Area\"]\n\nX = ricep[[\"X1\",\"X2\",\"X3\",\"X4\",\"phosphorus\"]]\ny = ricep[[\"value\"]]*1000\n\n# Z-Score Normalization OR try using the sklearn internal normalizing by setting mormalize flag = true !!!\n\ncols = list(X.columns)\nfor col in cols:\n col_zscore = col + '_zscore'\n X[col_zscore] = (X[col] - X[col].mean())/X[col].std(ddof=0)\n\nX_ = X[[\"X1_zscore\", \"X2_zscore\", \"X3_zscore\", \"X4_zscore\", \"phosphorus_zscore\"]]\nX_.head()\n\nX_train, X_test, y_train, y_test = train_test_split(X_, y, test_size=0.2, random_state=1)", "First checking the avg RMSE for Linear Regression", "clf = LinearRegression()\nscores = cross_val_score(clf, X_, y, cv=5, scoring='neg_mean_squared_error')\nfor i in range(0,5):\n scores[i] = sqrt(-1*scores[i])\n \nprint(scores)\navg_rmse = scores.mean()\nprint(\"\\n\\nAvg RMSE is \",scores.mean())", "Epsilon-Support Vector Regression (SVR)\nRBF Kernel", "# 5 Fold CV, to calculate avg RMSE\nclf = SVR(C=500000.0, epsilon=0.1, kernel='rbf', gamma=0.0008)\nscores = cross_val_score(clf, X_, y.values.ravel(), cv=5, scoring='neg_mean_squared_error')\nfor i in range(0,5):\n scores[i] = sqrt(-1*scores[i])\n\nprint(scores)\navg_rmse = scores.mean()\nprint(\"\\n\\nAvg RMSE is \",scores.mean())\n\n# Just the 4 original features (no soil data)\nX_old = X[[\"X1_zscore\", \"X2_zscore\", \"X3_zscore\", \"X4_zscore\"]]\n\n# 5 Fold CV, to calculate avg RMSE\nclf = SVR(C=1000.0, epsilon=0.1, kernel='rbf', gamma=0.027)\nscores = cross_val_score(clf, X_old, y.values.ravel(), cv=5, scoring='neg_mean_squared_error')\nfor i in range(0,5):\n scores[i] = sqrt(-1*scores[i])\n\nprint(scores)\navg_rmse = scores.mean()\nprint(\"\\n\\nAvg RMSE is \",scores.mean())", "SVR : 927\nLR : 1018\nSVR (RBF kernel) works better than Linear Regression.\nAlso, the soil feature, for now, does more harm than good (Phosphorous content)\nLets check the importance of Rain Data", "# Just 2 features (no rain data)\nX_nr = X[[\"X1_zscore\", \"X2_zscore\"]]\n\n# 5 Fold CV, to calculate avg RMSE\nclf = SVR(C=1000.0, epsilon=0.1, kernel='rbf', gamma=0.027)\nscores = cross_val_score(clf, X_nr, y.values.ravel(), cv=5, scoring='neg_mean_squared_error')\nfor i in range(0,5):\n scores[i] = sqrt(-1*scores[i])\n\nprint(scores)\navg_rmse = scores.mean()\nprint(\"\\n\\nAvg RMSE is \",scores.mean())", "The Rain data does helps us\nLets try for SVR with other kernels ...\nDegree 3 Polynomial", "# 5 Fold CV, to calculate avg RMSE\nclf = SVR(kernel='poly', gamma='auto', degree=3, coef0=2)\nscores = cross_val_score(clf, X_old, y.values.ravel(), cv=5, scoring='neg_mean_squared_error')\nfor i in range(0,5):\n scores[i] = sqrt(-1*scores[i])\n \nprint(scores)\navg_rmse = scores.mean()\nprint(\"\\n\\nAvg RMSE is \",scores.mean())", "Polynomial Kernel also does better than Linear Regression\nDegree 4 Polynomial", "# 5 Fold CV, to calculate avg RMSE\nclf = SVR(kernel='poly', gamma='auto', degree=4, coef0=2)\nscores = cross_val_score(clf, X_old, y.values.ravel(), cv=5, scoring='neg_mean_squared_error')\nfor i in range(0,5):\n scores[i] = sqrt(-1*scores[i])\n \nprint(scores)\navg_rmse = scores.mean()\nprint(\"\\n\\nAvg RMSE is \",scores.mean())" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
HWNi/data-512-a1
hcds-a1-data-curation.ipynb
mit
[ "A1 Data Curation\nStep1: Data Acquisition", "# Import packages that will be used in this assignment\nimport requests\nimport json\nimport pandas as pd\nimport matplotlib.pyplot as plt\n%matplotlib inline \nimport seaborn as sns\n", "To get the monthly traffic data on English Wikipedia from January 2008 through September 2017, we need to use 2 API endpoints, the Pagecounts API and the Pageviews API. The Pagecounts API provides monthy desktop and mobile traffic data from January 2008 through July 2016, and the Pageviews API provides monthy desktop, mobile-web, and mobile-app traffic data from July 2015 through September 2017. Once the user finishes the parameter settings for the API request, the traffic data will be returned in JSON format. The codes below will get you all pagecounts for English Wikipedia accessed through desktop from January 2008 through July 2016.", "# Collect desktop traffic data from January 2008 through July 2016 using the Pagecounts API\nendpoint_pagecounts = 'https://wikimedia.org/api/rest_v1/metrics/legacy/pagecounts/aggregate/{project}/{access}/{granularity}/{start}/{end}'\n\nparams_pc_desktop = {\n 'project' : 'en.wikipedia.org',\n 'access' : 'desktop-site',\n 'granularity' : 'monthly',\n 'start' : '2008010100',\n 'end' : '2016080100'#use the first day of the following month to ensure a full month of data is collected\n }\n\napi_call = requests.get(endpoint_pagecounts.format(**params_pc_desktop))\nresponse_pc_desktop = api_call.json()\nwith open('pagecounts_desktop-site_200801-201607.json', 'w') as outfile:\n json.dump(response_pc_desktop, outfile)\n", "The codes below will get you all pagecounts for English Wikipedia accessed through mobile from January 2008 through July 2016.", "# Collect mobile traffic data from January 2008 through July 2016 using the Pagecounts API\nendpoint_pagecounts = 'https://wikimedia.org/api/rest_v1/metrics/legacy/pagecounts/aggregate/{project}/{access}/{granularity}/{start}/{end}'\n\nparams_pc_mobile = {\n 'project' : 'en.wikipedia.org',\n 'access' : 'mobile-site',\n 'granularity' : 'monthly',\n 'start' : '2008010100',\n 'end' : '2016080100'\n }\n\napi_call = requests.get(endpoint_pagecounts.format(**params_pc_mobile))\nresponse_pc_mobile = api_call.json()\nwith open('pagecounts_mobile-site_200801-201607.json', 'w') as outfile:\n json.dump(response_pc_mobile, outfile)\n", "The codes below will get you all pageviews for English Wikipedia accessed through desktop from July 2015 through September 2017. Note that the data doesn't count traffic by web crawlers or spiders.", "# Collect desktop traffic data from July 2015 through September 2017 using the Pageviews API\nendPoint_pageviews = 'https://wikimedia.org/api/rest_v1/metrics/pageviews/aggregate/{project}/{access}/{agent}/{granularity}/{start}/{end}'\n\nheaders = {'User-Agent' : 'https://github.com/HWNi', 'From' : 'haowen2@uw.edu'}\n\nparams_pv_desktop = {\n 'project' : 'en.wikipedia.org',\n 'access' : 'desktop',\n 'agent' : 'user',\n 'granularity' : 'monthly',\n 'start' : '2015070100',\n 'end' : '2017100100'\n }\n\napi_call = requests.get(endPoint_pageviews.format(**params_pv_desktop))\nresponse_pv_desktop = api_call.json()\nwith open('pageviews_desktop_201507-201709.json', 'w') as outfile:\n json.dump(response_pv_desktop, outfile)\n", "The codes below will get you all pageviews for English Wikipedia accessed through mobile website from July 2015 through September 2017. Again, note that the data doesn't count traffic by web crawlers or spiders.", "# Collect mobile web traffic data from July 2015 through September 2017 using the Pageviews API\nendPoint_pageviews = 'https://wikimedia.org/api/rest_v1/metrics/pageviews/aggregate/{project}/{access}/{agent}/{granularity}/{start}/{end}'\n\nheaders = {'User-Agent' : 'https://github.com/HWNi', 'From' : 'haowen2@uw.edu'}\n\nparams_pv_mobile_web = {\n 'project' : 'en.wikipedia.org',\n 'access' : 'mobile-web',\n 'agent' : 'user',\n 'granularity' : 'monthly',\n 'start' : '2015070100',\n 'end' : '2017100100'\n }\n\napi_call = requests.get(endPoint_pageviews.format(**params_pv_mobile_web))\nresponse_pv_mobile_web = api_call.json()\nwith open('pageviews_mobile-web_201507-201709.json', 'w') as outfile:\n json.dump(response_pv_mobile_web, outfile)\n", "The codes below will get you all pageviews for English Wikipedia accessed through mobile app from July 2015 through September 2017. Again, note that the data doesn't count traffic by web crawlers or spiders.", "# Collect mobile app traffic data from July 2015 through September 2017 using the Pageviews API\nendPoint_pageviews = 'https://wikimedia.org/api/rest_v1/metrics/pageviews/aggregate/{project}/{access}/{agent}/{granularity}/{start}/{end}'\n\nheaders = {'User-Agent' : 'https://github.com/HWNi', 'From' : 'haowen2@uw.edu'}\n\nparams_pv_mobile_app = {\n 'project' : 'en.wikipedia.org',\n 'access' : 'mobile-app',\n 'agent' : 'user',\n 'granularity' : 'monthly',\n 'start' : '2015070100',\n 'end' : '2017100100'\n }\n\napi_call = requests.get(endPoint_pageviews.format(**params_pv_mobile_app))\nresponse_pv_mobile_app = api_call.json()\nwith open('pageviews_mobile-app_201507-201709.json', 'w') as outfile:\n json.dump(response_pv_mobile_app, outfile)\n", "Step 2: Data processing\nNow, we have 5 JSON files containing the traffic data we're interested in. In this step, we first iterate these 5 JSON files one by one and combine the data into a Python dictionary. Eventually, the key of the dictionary will be the list of time stamps (from January 2008 to September 2017). For each key (time stamp), we will append a list which contains 5 values: pagecounts accessed through desktop, pagecounts accessed through mobile, pageviews accessed through desktop, pageviews accessed through mobile web, and pageviews accessed through mobile app.", "data_cleaned = {}\n\nfor item in response_pc_desktop['items']:\n timeStamp = item['timestamp']\n data_cleaned[timeStamp] = [item['count'], 0, 0, 0, 0]\n \nfor item in response_pc_mobile['items']:\n timeStamp = item['timestamp']\n if timeStamp in data_cleaned:\n data_cleaned[timeStamp][1] = item['count'] \n else:\n data_cleaned[timeStamp] = [0, item['count'], 0, 0, 0]\n \nfor item in response_pv_desktop['items']:\n timeStamp = item['timestamp']\n if timeStamp in data_cleaned:\n data_cleaned[timeStamp][2] = item['views'] \n else:\n data_cleaned[timeStamp] = [0, 0, item['views'], 0, 0]\n \nfor item in response_pv_mobile_web['items']:\n timeStamp = item['timestamp']\n if timeStamp in data_cleaned:\n data_cleaned[timeStamp][3] = item['views'] \n else:\n data_cleaned[timeStamp] = [0, 0, 0, item['views'], 0]\n \nfor item in response_pv_mobile_app['items']:\n timeStamp = item['timestamp']\n if timeStamp in data_cleaned:\n data_cleaned[timeStamp][4] = item['views'] \n else:\n data_cleaned[timeStamp] = [0, 0, 0, 0, item['views']]", "After we get the dictionary, we could convert it into a Pandas dataframe and save the dataframe to a csv file", "df = pd.DataFrame.from_dict(data_cleaned, orient='index')\ndf_result = pd.DataFrame\ndf['timestamp'] = df.index\ndf['year'] = [t[0:4] for t in df['timestamp']]\ndf['month'] = [t[4:6] for t in df['timestamp']]\ndf['pagecount_all_views'] = df[0] + df[1]\ndf['pagecount_desktop_views'] = df[0]\ndf['pagecount_mobile_views'] = df[1]\ndf['pageview_all_views'] = df[2] + df[3] + df[4]\ndf['pageview_desktop_views'] = df[2]\ndf['pageview_mobile_views'] = df[3] + df[4]\ndf = df.loc[:, 'year' : 'pageview_mobile_views']\ndf.to_csv('en-wikipedia_traffic_200801-201709.csv', index=False)\ndf", "Step 3: Analysis\nIn the final step, we make a time series plot for the data we processed before. The X-axis of the plot will be a date range, and Y-axis of the plot will be the amount of traffic times 1 million(The author downscaled the traffic data by 1 million).", "dateRange = pd.date_range('2008-01', '2017-10', freq='M')\nscale = 1e-6\nsns.set_style(\"whitegrid\")\nfig = plt.figure(figsize=(18, 12))\nplt.plot(dateRange, df['pagecount_all_views'] * scale, linestyle = ':')\nplt.plot(dateRange, df['pagecount_desktop_views'] * scale)\nplt.plot(dateRange, df['pagecount_mobile_views'] * scale)\nplt.plot(dateRange, df['pageview_all_views'] * scale, linestyle = ':')\nplt.plot(dateRange, df['pagecount_desktop_views'] * scale)\nplt.plot(dateRange, df['pagecount_mobile_views'] * scale)\nplt.legend()\nplt.xlabel('Year')\nplt.ylabel('Amount of Traffic (* 1,000,000)')\nfig.savefig('en-wikipedia_traffic_200801-201709.jpg')" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
baumanab/noaa_requests
NOAA_sandbox.ipynb
gpl-3.0
[ "import pandas as pd\nimport numpy as np\nfrom pandas.io import json\nimport requests\nimport os\nimport sys\nimport string\n\n\nNOAA_Token_Here= 'enter as string'", "Play with some basic functions adapted from tide data functions\nQuery Builder", "def query_builder(start_dt, end_dt, station, offset= 1):\n\n \"\"\"Function accepts: a start and end datetime string in the form 'YYYYMMDD mm:ss'\n which are <= 1 year apart, a station ID, and an offset. \n Function assembles a query parameters/arguments dict and returns an API query and the \n query dictionary (query_dict). The relevant base URL is the NCDC endpoint \n 'http://www.ncdc.noaa.gov/cdo-web/api/v2/data?'.\"\"\"\n\n import urllib\n \n # API endpoint\n base_url= 'http://www.ncdc.noaa.gov/cdo-web/api/v2/data?'\n\n # dict of NOAA query parameters/arguments\n\n query_dict = dict(startdate= start_dt, enddate= end_dt, stationid= station,\n offset= offset, datasetid= 'GHCND', limit= 1000)\n\n # encode arguments\n\n encoded_args = urllib.urlencode(query_dict)\n \n # query\n query = base_url + encoded_args\n \n # decode url % (reconvert reserved characters to utf8 string)\n query= urllib.unquote(query)\n\n # create and return query from base url and encoded arguments\n return query, query_dict\n\nquery_1, query_dict= query_builder('2014-01-01', '2015-01-01', station= 'GHCND:USW00023174')\nprint(query_1)\n\nquery_2, query_dict= query_builder('2014-01-01', '2015-01-01', station= 'GHCND:USW00023174', offset= 1001)\nprint(query_2)", "Offset Generator", "def offsetter(response):\n \n \"\"\"\n Function accepts a restful query response (JSON)\n Function returns a dictionary of offsets to pull the entire query set\n where the set is limited to 1000 records per query. Function also \n returns a record count for use in validation.\n \"\"\"\n \n # get repeats and repeat range\n import math\n count= response['metadata']['resultset']['count']\n repeats= math.ceil(count/1000.)\n repeat_range= range(int(repeats))\n \n # get offsets dictionary\n \n offset= 1\n offsets= [1]\n for item in repeat_range[1:]:\n offset += 1000\n offsets.append(offset)\n \n \n # zip up the results and convert to dictionary\n offset_dict= dict(zip(repeat_range[1:], offsets[1:])) # the first call has been done already to get meta\n \n return offset_dict, count # for quality control \n \n ", "Query Generator\nTODO\n\nrefactor with a decorator\nmake key an attribute that can be hidden", "def execute_query(query):\n \n \"\"\"\n Function accepts an NOAA query for daily summaries for a specfic location\n and executes the query.\n Function returns a response (JSON)\n \"\"\"\n url = query\n # replace token with token provided by NOAA. Enter token as string\n headers = {'token': NOAA_Token_Here} # https://www.ncdc.noaa.gov/cdo-web/token\n response = requests.get(url, headers = headers)\n response = response.json()\n \n return response\n\nworking_1= execute_query(query_1)['results']\nworking_2 = execute_query(query_2)['results']", "Extract Results", "def extract_results(response):\n \n \"\"\"\n Function accepts a NOAA query response (JSON) return the results\n key values as well as the number of records (for use in validation).\n \"\"\"\n data= response['results']\n # for quality control to verify retrieval of all rows\n length= len(data)\n \n return data, length\n\ndef collator(results):\n \n \"\"\"\n Functions accepts the results key of an NOAA query response (JSON)\n and returns a tidy data set in PANDAS, where each record is an \n observation about a day.\n \"\"\"\n \n df= pd.DataFrame(results) \n df= df.drop(['attributes','station'], axis=1)\n df= df.pivot(index= 'date',columns= 'datatype', values= 'value').reset_index()\n \n return df\n\ndef get_ncdc(start_dt, end_dt, station):\n \n \"\"\"\n Function accepts a start date (MM-DD-YYY) an end date (MM-DD-YYYY)\n and a NOAA station ID. Date limit is 1 year.\n Function returns a tidy dataset in a PANDAS DataFrame where\n each row represents an observation about a day, a record count\n and a query parameters dictionary.\n \"\"\"\n \n \n # count for verifying retrieval of all rows\n record_count= 0\n # initial query\n query, query_dict= query_builder(start_dt, end_dt, station)\n response= execute_query(query)\n \n # extract results and count \n results, length= extract_results(response)\n record_count += length\n \n # get offsets for remaining queries\n off_d, count= offsetter(response)\n \n # execute remaining queries and operations\n for offset in off_d:\n query, _= query_builder(start_dt, end_dt, station, off_d[offset])\n print(query)\n response= execute_query(query)\n next_results, next_length= extract_results(response)\n \n record_count += next_length\n \n # concat results lists\n results += next_results\n \n assert record_count == count, 'record count != count'\n \n collated_data= collator(results)\n \n return collated_data, record_count, query_dict\n \n \n\ntest, qc, params = get_ncdc('2014-01-01', '2014-12-31', station= 'GHCND:USW00023174')\n\ntest.date.head()\n\ntest.date.tail()\n\ntest.info()\n\ntest[test.date.isnull()]\n\ny1, qc, params = get_ncdc('2014-05-03', '2015-05-02', station= 'GHCND:USW00023174')\ny2, qc, params = get_ncdc('2015-05-03', '2016-05-02', station= 'GHCND:USW00023174')\ny3, qc, params = get_ncdc('2016-05-03', '2017-05-02', station= 'GHCND:USW00023174')\n\ny1.info()\n\nyears= pd.concat([y1, y2, y3])\n\nyears.date.head()\n\nyears.date.tail()\n\nyears.to_csv('LAX_3years.csv', index= False)", "CSV Generator", "def gen_csv(df, query_dict):\n \"\"\"\n Arguments: PANDAS DataFrame, a query parameters dictionary\n Returns: A CSV of the df with dropped index and named by dict params\n \"\"\"\n \n # extract params\n station= query_dict['stationid']\n start= query_dict['startdate']\n end= query_dict['enddate']\n \n # using os.path in case of future expansion to other directories\n path= os.path.join(station + '_' + start + '_' + end + '.' + 'csv')\n \n # remove problem characters (will add more in future)\n exclude_chars= ':'\n path= path.replace(exclude_chars, \"_\")\n \n # export to csv\n \n my_csv= df.to_csv(path, index= False)\n \n return my_csv, path\n \n \n \n\nstuff, path= gen_csv(test, query_dict)\n\npath\n\nls *csv\n\n#!/usr/bin/env python\n# coding: utf-8\n\n\n\"\"\"Python code for querying NOAA daily summary weather and returnig a CSV per year\nfor a specfic station. Code is intended to be executed from CLI.\"\"\"\n\nimport sys\n\n# set path to tools library and import\nsys.path.append(r'noaa_weather_tools')\nimport noaa_weather_tools\n\nNOAA_Token_Here= 'enter token as string'\n\nprint(\"Check dt format('DD-MM-YYYY', and whether dates span <= 1 year from a current or past date\")\nprint(\"If dates exceed one year, NCDC query returns a null object\")\nprint(\"Need a token take a token, have a token, keep it to yourself @ https://www.ncdc.noaa.gov/cdo-web/token\")\nprint('start_dt: {}\\n end_dt: {}'.format(sys.argv[1], sys.argv[2]))\n\n\ndef noaa_dailysum_weather_processor(start_dt, end_dt, station):\n\n \"\"\"Function accepts a station ID, and beginning/end datetime as strings with date format as\n 'MM-DD-YYYY' which span <= 1 year from a current or past date, passing them to the query_builder function. \n Function creates a .csv file of NOAA (NCDC) Daily Summary data for a specific station.\"\"\"\n \n print(15 * '.' + \"reticulating splines\" + 5* '.' + \"getting records\") \n df, record_count, query_parameters= noaa_weather_tools.get_ncdc(start_dt, end_dt, station)\n \n print(15* '.' + \"exporting to csv\")\n my_csv, my_path= noaa_weather_tools.gen_csv(df, query_parameters)\n \n print(\"spines reticulated\")\n return my_csv\n\n\nnoaa_dailysum_weather_processor('2014-05-03', '2015-05-02', station= 'GHCND:USW00023174')\n\nls *csv", "Discarded Functions\n```python\ndef collator(response):\ndata= pd.DataFrame(response['results'])\n# for quality control to verify retrieval of all rows\nlength= len(data)\n\ndata= data.drop(['attributes','station'], axis=1)\ndata= data.pivot(index= 'date',columns= 'datatype', values= 'value').reset_index()\n\nreturn data, length\n\ndef get_ncdc(start_dt, end_dt, station):\n# count for verifying retrieval of all rows\nrow_count= 0\n# initial query\nquery, query_dict= query_builder(start_dt, end_dt, station)\nresponse= execute_query(query)\n\n# collate and count \ncollated_data, length= collator(response)\nrow_count += length\n\n# get offsets for remaining queries\noff_d, count= offsetter(response)\n\n# execute remaining queries and operations\nfor offset in off_d:\n query, _= query_builder(start_dt, end_dt, station, off_d[offset])\n print(query)\n response= execute_query(query)\n next_data, next_length= collator(response)\n\n row_count += next_length\n\n # stack DataFrames\n collated_data= pd.concat([collated_data, next_data])\n\nassert row_count == count, 'row count != count'\n\nreturn collated_data, row_count\n\n```" ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]