repo_name
stringlengths
6
77
path
stringlengths
8
215
license
stringclasses
15 values
cells
sequence
types
sequence
lithiumdenis/MLSchool
3. Котики и собачки.ipynb
mit
[ "import matplotlib\nimport matplotlib.pyplot as plt\n\n%matplotlib inline\nmatplotlib.rcParams.update({'font.size': 12})\n\n# увеличим дефолтный размер графиков\nfrom pylab import rcParams\nrcParams['figure.figsize'] = 18, 6\nrcParams['font.size'] = 16\nrcParams['axes.labelsize'] = 14\nrcParams['xtick.labelsize'] = 13\nrcParams['ytick.labelsize'] = 13\n\nimport pandas as pd\nimport numpy as np", "Данные\nВозьмите данные с https://www.kaggle.com/c/shelter-animal-outcomes .\nОбратите внимание, что в этот раз у нас много классов, почитайте в разделе Evaluation то, как вычисляется итоговый счет (score).\nВизуализация\n<div class=\"panel panel-info\" style=\"margin: 50px 0 0 0\">\n <div class=\"panel-heading\">\n <h3 class=\"panel-title\">Задание 1.</h3> \n </div>\n</div>\n\nВыясните, построив необходимые графики, влияет ли возраст, пол или фертильность животного на его шансы быть взятыми из приюта.\nПодготовим данные", "visual = pd.read_csv('data/CatsAndDogs/TRAIN2.csv')\n\n#Сделаем числовой столбец Outcome, показывающий, взяли животное из приюта или нет\n#Сначала заполним единицами, типа во всех случах хорошо\nvisual['Outcome'] = 'true'\n#Неудачные случаи занулим\nvisual.loc[visual.OutcomeType == 'Euthanasia', 'Outcome'] = 'false'\nvisual.loc[visual.OutcomeType == 'Died', 'Outcome'] = 'false'\n\n#Заменим строки, где в SexuponOutcome NaN, на что-нибудь осмысленное\nvisual.loc[visual.SexuponOutcome.isnull(), 'SexuponOutcome'] = 'Unknown Unknown'\n\n#Сделаем два отдельных столбца для пола и фертильности\nvisual['Gender'] = visual.SexuponOutcome.apply(lambda s: s.split(' ')[-1])\nvisual['Fertility'] = visual.SexuponOutcome.apply(lambda s: s.split(' ')[0])", "Сравним по возрасту", "mergedByAges = visual.groupby('AgeuponOutcome')['Outcome'].value_counts().to_dict()\n\nresults = pd.DataFrame(data = mergedByAges, index=[0]).stack().fillna(0).transpose()\nresults.columns = pd.Index(['true', 'false'])\nresults['total'] = results.true + results.false\nresults.sort_values(by='true', ascending=False, inplace=True)\nresults[['true', 'false']].plot(kind='bar', stacked=False, rot=45);", "Сравним по полу", "mergedByGender = visual.groupby('Gender')['Outcome'].value_counts().to_dict()\n\nresults = pd.DataFrame(data = mergedByGender, index=[0]).stack().fillna(0).transpose()\nresults.columns = pd.Index(['true', 'false'])\nresults['total'] = results.true + results.false\nresults.sort_values(by='true', ascending=False, inplace=True)\nresults[['true', 'false']].plot(kind='bar', stacked=True, rot=45);", "Сравним по фертильности", "mergedByFert = visual.groupby('Fertility')['Outcome'].value_counts().to_dict()\n\nresults = pd.DataFrame(data = mergedByFert, index=[0]).stack().fillna(0).transpose()\nresults.columns = pd.Index(['true', 'false'])\nresults['total'] = results.true + results.false\nresults.sort_values(by='true', ascending=False, inplace=True)\nresults[['true', 'false']].plot(kind='bar', stacked=True, rot=45);", "<b>Вывод по возрасту:</b> лучше берут не самых старых, но и не самых молодых\n<br>\n<b>Вывод по полу:</b> по большому счёту не имеет значения\n<br>\n<b>Вывод по фертильности:</b> лучше берут животных с ненарушенными репродуктивными способностями. Однако две следующие группы не сильно различаются по сути и, если их сложить, то разница не столь велика.\nПостроение моделей\n<div class=\"panel panel-info\" style=\"margin: 50px 0 0 0\">\n <div class=\"panel-heading\">\n <h3 class=\"panel-title\">Задание 2.</h3> \n </div>\n</div>\n\nПосмотрите тетрадку с генерацией новых признаков. Сделайте как можно больше релевантных признаков из всех имеющихся.\nНе забудьте параллельно обрабатывать отложенную выборку (test), чтобы в ней были те же самые признаки, что и в обучающей.\n<b>Возьмем исходные данные</b>", "train, test = pd.read_csv(\n 'data/CatsAndDogs/TRAIN2.csv' #наши данные\n #'data/CatsAndDogs/train.csv' #исходные данные\n), pd.read_csv(\n 'data/CatsAndDogs/TEST2.csv' #наши данные\n #'data/CatsAndDogs/test.csv' #исходные данные\n)\n\ntrain.head()\n\ntest.shape", "<b>Добавим новые признаки в train</b>", "#Сначала по-аналогии с визуализацией\n\n#Заменим строки, где в SexuponOutcome, Breed, Color NaN\ntrain.loc[train.SexuponOutcome.isnull(), 'SexuponOutcome'] = 'Unknown Unknown'\ntrain.loc[train.AgeuponOutcome.isnull(), 'AgeuponOutcome'] = '0 0'\ntrain.loc[train.Breed.isnull(), 'Breed'] = 'Unknown'\ntrain.loc[train.Color.isnull(), 'Color'] = 'Unknown'\n\n#Сделаем два отдельных столбца для пола и фертильности\ntrain['Gender'] = train.SexuponOutcome.apply(lambda s: s.split(' ')[-1])\ntrain['Fertility'] = train.SexuponOutcome.apply(lambda s: s.split(' ')[0])\n\n#Теперь что-то новое\n\n#Столбец, в котором отмечено, есть имя у животного или нет\ntrain['hasName'] = 1\ntrain.loc[train.Name.isnull(), 'hasName'] = 0\n\n#Столбец, в котором объединены порода и цвет\ntrain['breedColor'] = train.apply(lambda row: row['Breed'] + ' ' + str(row['Color']), axis=1)\n\n#Декомпозируем DateTime\n#Во-первых, конвертируем столбец в тип DateTime из строкового\ntrain['DateTime'] = pd.to_datetime(train['DateTime'])\n#А теперь декомпозируем\ntrain['dayOfWeek'] = train.DateTime.apply(lambda dt: dt.dayofweek)\ntrain['month'] = train.DateTime.apply(lambda dt: dt.month)\ntrain['day'] = train.DateTime.apply(lambda dt: dt.day)\ntrain['quarter'] = train.DateTime.apply(lambda dt: dt.quarter)\ntrain['hour'] = train.DateTime.apply(lambda dt: dt.hour)\ntrain['minute'] = train.DateTime.apply(lambda dt: dt.hour)\ntrain['year'] = train.DateTime.apply(lambda dt: dt.year)\n\n#Разбиение возраста\n#Сделаем два отдельных столбца для обозначения года/месяца и их количества\ntrain['AgeuponFirstPart'] = train.AgeuponOutcome.apply(lambda s: s.split(' ')[0])\ntrain['AgeuponSecondPart'] = train.AgeuponOutcome.apply(lambda s: s.split(' ')[-1])\n#Переведем примерно в среднем месяцы, годы и недели в дни с учетом окончаний s\ntrain['AgeuponSecondPartInDays'] = 0\ntrain.loc[train.AgeuponSecondPart == 'year', 'AgeuponSecondPartInDays'] = 365\ntrain.loc[train.AgeuponSecondPart == 'years', 'AgeuponSecondPartInDays'] = 365\ntrain.loc[train.AgeuponSecondPart == 'month', 'AgeuponSecondPartInDays'] = 30\ntrain.loc[train.AgeuponSecondPart == 'months', 'AgeuponSecondPartInDays'] = 30\ntrain.loc[train.AgeuponSecondPart == 'week', 'AgeuponSecondPartInDays'] = 7\ntrain.loc[train.AgeuponSecondPart == 'weeks', 'AgeuponSecondPartInDays'] = 7\n#Во-первых, конвертируем столбец в числовой тип из строкового\ntrain['AgeuponFirstPart'] = pd.to_numeric(train['AgeuponFirstPart'])\ntrain['AgeuponSecondPartInDays'] = pd.to_numeric(train['AgeuponSecondPartInDays'])\n\n#А теперь получим нормальное время жизни в днях\ntrain['LifetimeInDays'] = train['AgeuponFirstPart'] * train['AgeuponSecondPartInDays']\n\n#Удалим уж совсем бессмысленные промежуточные столбцы\ntrain = train.drop(['AgeuponSecondPartInDays', 'AgeuponSecondPart', 'AgeuponFirstPart'], axis=1)\ntrain.head()", "<b>Добавим новые признаки в test по-аналогии</b>", "#Сначала по-аналогии с визуализацией\n\n#Заменим строки, где в SexuponOutcome, Breed, Color NaN\ntest.loc[test.SexuponOutcome.isnull(), 'SexuponOutcome'] = 'Unknown Unknown'\ntest.loc[test.AgeuponOutcome.isnull(), 'AgeuponOutcome'] = '0 0'\ntest.loc[test.Breed.isnull(), 'Breed'] = 'Unknown'\ntest.loc[test.Color.isnull(), 'Color'] = 'Unknown'\n\n#Сделаем два отдельных столбца для пола и фертильности\ntest['Gender'] = test.SexuponOutcome.apply(lambda s: s.split(' ')[-1])\ntest['Fertility'] = test.SexuponOutcome.apply(lambda s: s.split(' ')[0])\n\n#Теперь что-то новое\n\n#Столбец, в котором отмечено, есть имя у животного или нет\ntest['hasName'] = 1\ntest.loc[test.Name.isnull(), 'hasName'] = 0\n\n#Столбец, в котором объединены порода и цвет\ntest['breedColor'] = test.apply(lambda row: row['Breed'] + ' ' + str(row['Color']), axis=1)\n\n#Декомпозируем DateTime\n#Во-первых, конвертируем столбец в тип DateTime из строкового\ntest['DateTime'] = pd.to_datetime(test['DateTime'])\n#А теперь декомпозируем\ntest['dayOfWeek'] = test.DateTime.apply(lambda dt: dt.dayofweek)\ntest['month'] = test.DateTime.apply(lambda dt: dt.month)\ntest['day'] = test.DateTime.apply(lambda dt: dt.day)\ntest['quarter'] = test.DateTime.apply(lambda dt: dt.quarter)\ntest['hour'] = test.DateTime.apply(lambda dt: dt.hour)\ntest['minute'] = test.DateTime.apply(lambda dt: dt.hour)\ntest['year'] = test.DateTime.apply(lambda dt: dt.year)\n\n#Разбиение возраста\n#Сделаем два отдельных столбца для обозначения года/месяца и их количества\ntest['AgeuponFirstPart'] = test.AgeuponOutcome.apply(lambda s: s.split(' ')[0])\ntest['AgeuponSecondPart'] = test.AgeuponOutcome.apply(lambda s: s.split(' ')[-1])\n#Переведем примерно в среднем месяцы, годы и недели в дни с учетом окончаний s\ntest['AgeuponSecondPartInDays'] = 0\ntest.loc[test.AgeuponSecondPart == 'year', 'AgeuponSecondPartInDays'] = 365\ntest.loc[test.AgeuponSecondPart == 'years', 'AgeuponSecondPartInDays'] = 365\ntest.loc[test.AgeuponSecondPart == 'month', 'AgeuponSecondPartInDays'] = 30\ntest.loc[test.AgeuponSecondPart == 'months', 'AgeuponSecondPartInDays'] = 30\ntest.loc[test.AgeuponSecondPart == 'week', 'AgeuponSecondPartInDays'] = 7\ntest.loc[test.AgeuponSecondPart == 'weeks', 'AgeuponSecondPartInDays'] = 7\n#Во-первых, конвертируем столбец в числовой тип из строкового\ntest['AgeuponFirstPart'] = pd.to_numeric(test['AgeuponFirstPart'])\ntest['AgeuponSecondPartInDays'] = pd.to_numeric(test['AgeuponSecondPartInDays'])\n\n#А теперь получим нормальное время жизни в днях\ntest['LifetimeInDays'] = test['AgeuponFirstPart'] * test['AgeuponSecondPartInDays']\n\n#Удалим уж совсем бессмысленные промежуточные столбцы\ntest = test.drop(['AgeuponSecondPartInDays', 'AgeuponSecondPart', 'AgeuponFirstPart'], axis=1)\n\ntest.head()", "<div class=\"panel panel-info\" style=\"margin: 50px 0 0 0\">\n <div class=\"panel-heading\">\n <h3 class=\"panel-title\">Задание 3.</h3> \n </div>\n</div>\n\nВыполните отбор признаков, попробуйте различные методы. Проверьте качество на кросс-валидации. \nВыведите топ самых важных и самых незначащих признаков.\nПредобработка данных", "np.random.seed = 1234\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn import preprocessing\n\n#####################Заменим NaN значения на слово Unknown##################\n#Уберем Nan значения из train\ntrain.loc[train.AnimalID.isnull(), 'AnimalID'] = 'Unknown'\ntrain.loc[train.Name.isnull(), 'Name'] = 'Unknown'\ntrain.loc[train.OutcomeType.isnull(), 'OutcomeType'] = 'Unknown'\ntrain.loc[train.AnimalType.isnull(), 'AnimalType'] = 'Unknown'\ntrain.loc[train.AgeuponOutcome.isnull(), 'AgeuponOutcome'] = 'Unknown'\ntrain.loc[train.LifetimeInDays.isnull(), 'LifetimeInDays'] = 'Unknown'\n\n#Уберем Nan значения из test\ntest.loc[test.AnimalID.isnull(), 'AnimalID'] = 'Unknown'\ntest.loc[test.Name.isnull(), 'Name'] = 'Unknown'\ntest.loc[test.AnimalType.isnull(), 'AnimalType'] = 'Unknown'\ntest.loc[test.AgeuponOutcome.isnull(), 'AgeuponOutcome'] = 'Unknown'\ntest.loc[test.LifetimeInDays.isnull(), 'LifetimeInDays'] = 'Unknown'\n\n#####################Закодируем слова числами################################\n\n#Закодировали AnimalID цифрами вместо названий в test & train\n#encAnimalID = preprocessing.LabelEncoder()\n#encAnimalID.fit(pd.concat((test['AnimalID'], train['AnimalID'])))\n#test['AnimalID'] = encAnimalID.transform(test['AnimalID'])\n#train['AnimalID'] = encAnimalID.transform(train['AnimalID'])\n\n#Закодировали имя цифрами вместо названий в test & train\nencName = preprocessing.LabelEncoder()\nencName.fit(pd.concat((test['Name'], train['Name'])))\ntest['Name'] = encName.transform(test['Name'])\ntrain['Name'] = encName.transform(train['Name'])\n\n#Закодировали DateTime цифрами вместо названий в test & train\nencDateTime = preprocessing.LabelEncoder()\nencDateTime.fit(pd.concat((test['DateTime'], train['DateTime'])))\ntest['DateTime'] = encDateTime.transform(test['DateTime'])\ntrain['DateTime'] = encDateTime.transform(train['DateTime'])\n\n#Закодировали OutcomeType цифрами вместо названий в train, т.к. в test их нет\nencOutcomeType = preprocessing.LabelEncoder()\nencOutcomeType.fit(train['OutcomeType'])\ntrain['OutcomeType'] = encOutcomeType.transform(train['OutcomeType'])\n\n#Закодировали AnimalType цифрами вместо названий в test & train\nencAnimalType = preprocessing.LabelEncoder()\nencAnimalType.fit(pd.concat((test['AnimalType'], train['AnimalType'])))\ntest['AnimalType'] = encAnimalType.transform(test['AnimalType'])\ntrain['AnimalType'] = encAnimalType.transform(train['AnimalType'])\n\n#Закодировали SexuponOutcome цифрами вместо названий в test & train\nencSexuponOutcome = preprocessing.LabelEncoder()\nencSexuponOutcome.fit(pd.concat((test['SexuponOutcome'], train['SexuponOutcome'])))\ntest['SexuponOutcome'] = encSexuponOutcome.transform(test['SexuponOutcome'])\ntrain['SexuponOutcome'] = encSexuponOutcome.transform(train['SexuponOutcome'])\n\n#Закодировали AgeuponOutcome цифрами вместо названий в test & train\nencAgeuponOutcome = preprocessing.LabelEncoder()\nencAgeuponOutcome.fit(pd.concat((test['AgeuponOutcome'], train['AgeuponOutcome'])))\ntest['AgeuponOutcome'] = encAgeuponOutcome.transform(test['AgeuponOutcome'])\ntrain['AgeuponOutcome'] = encAgeuponOutcome.transform(train['AgeuponOutcome'])\n\n#Закодировали Breed цифрами вместо названий в test & train\nencBreed = preprocessing.LabelEncoder()\nencBreed.fit(pd.concat((test['Breed'], train['Breed'])))\ntest['Breed'] = encBreed.transform(test['Breed'])\ntrain['Breed'] = encBreed.transform(train['Breed'])\n\n#Закодировали Color цифрами вместо названий в test & train\nencColor = preprocessing.LabelEncoder()\nencColor.fit(pd.concat((test['Color'], train['Color'])))\ntest['Color'] = encColor.transform(test['Color'])\ntrain['Color'] = encColor.transform(train['Color'])\n\n#Закодировали Gender цифрами вместо названий в test & train\nencGender = preprocessing.LabelEncoder()\nencGender.fit(pd.concat((test['Gender'], train['Gender'])))\ntest['Gender'] = encGender.transform(test['Gender'])\ntrain['Gender'] = encGender.transform(train['Gender'])\n\n#Закодировали Fertility цифрами вместо названий в test & train\nencFertility = preprocessing.LabelEncoder()\nencFertility.fit(pd.concat((test['Fertility'], train['Fertility'])))\ntest['Fertility'] = encFertility.transform(test['Fertility'])\ntrain['Fertility'] = encFertility.transform(train['Fertility'])\n\n#Закодировали breedColor цифрами вместо названий в test & train\nencbreedColor = preprocessing.LabelEncoder()\nencbreedColor.fit(pd.concat((test['breedColor'], train['breedColor'])))\ntest['breedColor'] = encbreedColor.transform(test['breedColor'])\ntrain['breedColor'] = encbreedColor.transform(train['breedColor'])\n\n####################################Предобработка#################################\nfrom sklearn.model_selection import cross_val_score\n#poly_features = preprocessing.PolynomialFeatures(3)\n\n#Подготовили данные так, что X_tr - таблица без AnimalID и OutcomeType, а в y_tr сохранены OutcomeType\nX_tr, y_tr = train.drop(['AnimalID', 'OutcomeType'], axis=1), train['OutcomeType']\n\n#Типа перевели dataFrame в array и сдалали над ним предварительную обработку\n#X_tr = poly_features.fit_transform(X_tr)\nX_tr.head()", "Статистические тесты", "from sklearn.feature_selection import SelectKBest\nfrom sklearn.feature_selection import chi2, f_classif, mutual_info_classif\n\nskb = SelectKBest(mutual_info_classif, k=15)\nx_new = skb.fit_transform(X_tr, y_tr)\n\nx_new", "Методы обертки", "from sklearn.feature_selection import RFE\nfrom sklearn.linear_model import LinearRegression\n\nnames = X_tr.columns.values\nlr = LinearRegression()\nrfe = RFE(lr, n_features_to_select=1)\nrfe.fit(X_tr,y_tr);\nprint(\"Features sorted by their rank:\")\nprint(sorted(zip(map(lambda x: round(x, 4), rfe.ranking_), names)))", "Отбор при помощи модели Lasso", "from sklearn.linear_model import Lasso\nclf = Lasso()\nclf.fit(X_tr, y_tr);\nclf.coef_\n\nfeatures = X_tr.columns.values\nprint('Всего Lasso выкинуло %s переменных' % (clf.coef_ == 0).sum())\nprint('Это признаки:')\nfor s in features[np.where(clf.coef_ == 0)[0]]:\n print(' * ', s)", "Отбор при помощи модели RandomForest", "from sklearn.ensemble import RandomForestRegressor\nclf = RandomForestRegressor()\nclf.fit(X_tr, y_tr);\nclf.feature_importances_\n\nimp_feature_idx = clf.feature_importances_.argsort()\nimp_feature_idx\n\nfeatures = X_tr.columns.values\n\nk = 0\n\nwhile k < len(features):\n print(features[k], imp_feature_idx[k])\n k += 1", "<b>Вывод по признакам:</b>\n<br>\n<b>Не нужны:</b> Name, DateTime, month, day, Breed, breedColor. Всё остальное менее однозначно, можно и оставить.\n<div class=\"panel panel-info\" style=\"margin: 50px 0 0 0\">\n <div class=\"panel-heading\">\n <h3 class=\"panel-title\">Задание 4.</h3> \n </div>\n</div>\n\nПопробуйте смешать разные модели с помощью <b>sklearn.ensemble.VotingClassifier</b>. Увеличилась ли точность? Изменилась ли дисперсия?", "#Для начала выкинем ненужные признаки, выявленные на прошлом этапе\nX_tr = X_tr.drop(['Name', 'DateTime', 'month', 'day', 'Breed', 'breedColor'], axis=1)\ntest = test.drop(['Name', 'DateTime', 'month', 'day', 'Breed', 'breedColor'], axis=1)\nX_tr.head()\n\nfrom sklearn.ensemble import VotingClassifier\n\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.naive_bayes import GaussianNB\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.neighbors import KNeighborsClassifier\n\nclf1 = LogisticRegression(random_state=1234)\nclf3 = GaussianNB()\nclf5 = KNeighborsClassifier()\n\neclf = VotingClassifier(estimators=[\n ('lr', clf1), ('gnb', clf3), ('knn', clf5)],\n voting='soft', weights=[1,1,10])\n\nscores = cross_val_score(eclf, X_tr, y_tr)\n\neclf = eclf.fit(X_tr, y_tr)\n\nprint('Best score:', scores.min())\n\n#delete AnimalID from test\nX_te = test.drop(['AnimalID'], axis=1)\nX_te.head()\n\ny_te = eclf.predict(X_te)\ny_te\n\nans_nn = pd.DataFrame({'AnimalID': test['AnimalID'], 'type': encOutcomeType.inverse_transform(y_te)})\nans_nn.head()\n\n#Зададим функцию для трансформации\ndef onehot_encode(df_train, column):\n from sklearn.preprocessing import LabelBinarizer\n \n cs = df_train.select_dtypes(include=['O']).columns.values\n if column not in cs:\n return (df_train, None)\n\n rest = [x for x in df_train.columns.values if x != column]\n\n lb = LabelBinarizer()\n train_data = lb.fit_transform(df_train[column])\n \n new_col_names = ['%s' % x for x in lb.classes_]\n\n if len(new_col_names) != train_data.shape[1]:\n new_col_names = new_col_names[::-1][:train_data.shape[1]]\n\n new_train = pd.concat((df_train.drop([column], axis=1), pd.DataFrame(data=train_data, columns=new_col_names)), axis=1)\n return (new_train, lb)\n\nans_nn, lb = onehot_encode(ans_nn, 'type')\n\nans_nn\nans_nn.head()", "Проверим, что никакие строчки при манипуляции с NaN не потерялись", "test.shape[0] == ans_nn.shape[0]\n\n#Сделаем нумерацию индексов не с 0, а с 1\nans_nn.index += 1 \n#Воткнем столбец с индексами как столбец в конкретное место\nans_nn.insert(0, 'ID', ans_nn.index)\n#delete AnimalID from test\nans_nn = ans_nn.drop(['AnimalID'], axis=1)\nans_nn.head()\n\n#Сохраним\nans_nn.to_csv('ans_catdog.csv', index=False)" ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
mne-tools/mne-tools.github.io
0.20/_downloads/a4d4c1a667c2374c09eed24ac047d840/plot_decoding_csp_eeg.ipynb
bsd-3-clause
[ "%matplotlib inline", "Motor imagery decoding from EEG data using the Common Spatial Pattern (CSP)\nDecoding of motor imagery applied to EEG data decomposed using CSP.\nHere the classifier is applied to features extracted on CSP filtered signals.\nSee https://en.wikipedia.org/wiki/Common_spatial_pattern and [1]. The EEGBCI\ndataset is documented in [2]. The data set is available at PhysioNet [3]_.\nReferences\n.. [1] Zoltan J. Koles. The quantitative extraction and topographic mapping\n of the abnormal components in the clinical EEG. Electroencephalography\n and Clinical Neurophysiology, 79(6):440--447, December 1991.\n.. [2] Schalk, G., McFarland, D.J., Hinterberger, T., Birbaumer, N.,\n Wolpaw, J.R. (2004) BCI2000: A General-Purpose Brain-Computer Interface\n (BCI) System. IEEE TBME 51(6):1034-1043.\n.. [3] Goldberger AL, Amaral LAN, Glass L, Hausdorff JM, Ivanov PCh, Mark RG,\n Mietus JE, Moody GB, Peng C-K, Stanley HE. (2000) PhysioBank,\n PhysioToolkit, and PhysioNet: Components of a New Research Resource for\n Complex Physiologic Signals. Circulation 101(23):e215-e220.", "# Authors: Martin Billinger <martin.billinger@tugraz.at>\n#\n# License: BSD (3-clause)\n\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.discriminant_analysis import LinearDiscriminantAnalysis\nfrom sklearn.model_selection import ShuffleSplit, cross_val_score\n\nfrom mne import Epochs, pick_types, events_from_annotations\nfrom mne.channels import make_standard_montage\nfrom mne.io import concatenate_raws, read_raw_edf\nfrom mne.datasets import eegbci\nfrom mne.decoding import CSP\n\nprint(__doc__)\n\n# #############################################################################\n# # Set parameters and read data\n\n# avoid classification of evoked responses by using epochs that start 1s after\n# cue onset.\ntmin, tmax = -1., 4.\nevent_id = dict(hands=2, feet=3)\nsubject = 1\nruns = [6, 10, 14] # motor imagery: hands vs feet\n\nraw_fnames = eegbci.load_data(subject, runs)\nraw = concatenate_raws([read_raw_edf(f, preload=True) for f in raw_fnames])\neegbci.standardize(raw) # set channel names\nmontage = make_standard_montage('standard_1005')\nraw.set_montage(montage)\n\n# strip channel names of \".\" characters\nraw.rename_channels(lambda x: x.strip('.'))\n\n# Apply band-pass filter\nraw.filter(7., 30., fir_design='firwin', skip_by_annotation='edge')\n\nevents, _ = events_from_annotations(raw, event_id=dict(T1=2, T2=3))\n\npicks = pick_types(raw.info, meg=False, eeg=True, stim=False, eog=False,\n exclude='bads')\n\n# Read epochs (train will be done only between 1 and 2s)\n# Testing will be done with a running classifier\nepochs = Epochs(raw, events, event_id, tmin, tmax, proj=True, picks=picks,\n baseline=None, preload=True)\nepochs_train = epochs.copy().crop(tmin=1., tmax=2.)\nlabels = epochs.events[:, -1] - 2", "Classification with linear discrimant analysis", "# Define a monte-carlo cross-validation generator (reduce variance):\nscores = []\nepochs_data = epochs.get_data()\nepochs_data_train = epochs_train.get_data()\ncv = ShuffleSplit(10, test_size=0.2, random_state=42)\ncv_split = cv.split(epochs_data_train)\n\n# Assemble a classifier\nlda = LinearDiscriminantAnalysis()\ncsp = CSP(n_components=4, reg=None, log=True, norm_trace=False)\n\n# Use scikit-learn Pipeline with cross_val_score function\nclf = Pipeline([('CSP', csp), ('LDA', lda)])\nscores = cross_val_score(clf, epochs_data_train, labels, cv=cv, n_jobs=1)\n\n# Printing the results\nclass_balance = np.mean(labels == labels[0])\nclass_balance = max(class_balance, 1. - class_balance)\nprint(\"Classification accuracy: %f / Chance level: %f\" % (np.mean(scores),\n class_balance))\n\n# plot CSP patterns estimated on full data for visualization\ncsp.fit_transform(epochs_data, labels)\n\ncsp.plot_patterns(epochs.info, ch_type='eeg', units='Patterns (AU)', size=1.5)", "Look at performance over time", "sfreq = raw.info['sfreq']\nw_length = int(sfreq * 0.5) # running classifier: window length\nw_step = int(sfreq * 0.1) # running classifier: window step size\nw_start = np.arange(0, epochs_data.shape[2] - w_length, w_step)\n\nscores_windows = []\n\nfor train_idx, test_idx in cv_split:\n y_train, y_test = labels[train_idx], labels[test_idx]\n\n X_train = csp.fit_transform(epochs_data_train[train_idx], y_train)\n X_test = csp.transform(epochs_data_train[test_idx])\n\n # fit classifier\n lda.fit(X_train, y_train)\n\n # running classifier: test classifier on sliding window\n score_this_window = []\n for n in w_start:\n X_test = csp.transform(epochs_data[test_idx][:, :, n:(n + w_length)])\n score_this_window.append(lda.score(X_test, y_test))\n scores_windows.append(score_this_window)\n\n# Plot scores over time\nw_times = (w_start + w_length / 2.) / sfreq + epochs.tmin\n\nplt.figure()\nplt.plot(w_times, np.mean(scores_windows, 0), label='Score')\nplt.axvline(0, linestyle='--', color='k', label='Onset')\nplt.axhline(0.5, linestyle='-', color='k', label='Chance')\nplt.xlabel('time (s)')\nplt.ylabel('classification accuracy')\nplt.title('Classification score over time')\nplt.legend(loc='lower right')\nplt.show()" ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
jgomezc1/medios
NOTEBOOKS/Ej4_Eingen.ipynb
mit
[ "Ejemplo 4. Valores y vectores propios\nSi el tensor de esfuerzos en un punto $P$, en el sistema de referencia $X,Y,Z$ está definidido por:\n$$\\begin{align}\n\\\n&\\sigma_{xx} = 200\\dfrac{kgf}{cm^2}; \\;\\;\\; \\sigma_{yy} =0\\dfrac{kgf}{cm^2}; \\;\\;\\; \\sigma_{zz} = 0\\dfrac{kgf}{cm^2} \\\\\n&\\tau_{xy} = \\tau_{yx} =100\\dfrac{kgf}{cm^2}, \\;\\;\\; \\tau_{xz} = \\tau_{zx} =300\\dfrac{kgf}{cm^2}; \\;\\;\\;\\tau_{yz} = \\tau_{zy} = 0 \\dfrac{kgf}{cm^2}\\\\\n\\end{align}$$\n\nDetermine los valores y direccionees principales:", "from IPython.display import Image,Latex\n#Image()\nImage(filename='FIGURES/Sorigen.png',width=400)", "Solución:\nInicialmente encontremos los valores principales $(\\lambda)$ a partir de la solución del polinomio característico: \n${\\lambda ^3} - {I_\\sigma}{\\lambda ^2} + {II_\\sigma}\\lambda - {III_\\sigma} = 0$\nDonde ${I_\\sigma}$, ${II_\\sigma}$ y ${III_\\sigma}$ son los invariantes 1, 2 y 3 respectivamente que están dados por: \n${I_\\sigma } = {\\sigma {xx}} + {\\sigma {yy}} + {\\sigma _{zz}}$\n${II_\\sigma } = {\\sigma {xx}}{\\sigma {yy}} + {\\sigma {xx}}{\\sigma {zz}} + {\\sigma {zz}}{\\sigma {yy}} - \\tau {xy}^2 - \\tau {xz}^2 - \\tau _{yz}^2$\n${III_\\sigma } = {\\sigma {xx}}{\\sigma {yy}}{\\sigma {zz}} + 2{\\tau {xy}}{\\tau {xz}}{\\tau {yz}} - {\\sigma {xx}}\\tau {yz}^2 - {\\sigma {yy}}\\tau {xz}^2 - {\\sigma {zz}}\\tau {xy}^2$", "import numpy as np\nfrom scipy import linalg\n\nS = np.array([\n [200,100,300.],\n [100,0,0],\n [300,0,0]])\n\nIS = S[0,0]+S[1,1]+S[2,2]\nIIS = S[0,0]*S[1,1]+S[1,1]*S[2,2]+S[0,0]*S[2,2]-(S[0,1]**2)-(S[0,2]**2)-(S[1,2]**2)\nIIIS = S[0,0]*S[1,1]*S[2,2]-S[0,0]*(S[1,2]**2)-S[1,1]*(S[0,2]**2)-S[2,2]*(S[0,1]**2)+2*S[1,2]*S[0,2]*S[0,1]\nprint\nprint 'Invariantes:', IS,IIS,IIIS\nprint ", "Resolviendo vía polinomio característico:", "coeff=[1.0,-IS,IIS,-IIIS]\nps=np.roots(coeff)\n\nprint\nprint \"Esfuerzos principales:\", np.sort(np.round(ps,1))\nprint", "Resolviendo vía librerías python con linalg.eig podemos encontrar valores (la) y direcciones principales (n) simultaneamente", "la, n= linalg.eigh(S)\nla = la.real\nprint\nprint \"Esfuerzos principales:\", np.round(la,1)\nprint\n#print S\n\nprint \nprint 'n=', np.round(n,2)\nprint ", "De esta manera escribamos en tensor asociado a las direcciones principales:", "print\nSp = np.array([\n [la[0],0,0],\n [0,la[1],0],\n [0,0,la[2]]])\nprint 'Sp =',np.round(Sp,1)\nprint\nImage(filename='FIGURES/Sprinc.png',width=400)", "Los vectores $i'$, $j'$ y $k'$ están dados por:", "print \"i'=\", np.round(n[:,0],2)\nprint \"j'=\", np.round(n[:,1],2)\nprint \"k'=\", np.round(n[:,2],2)\nprint", "Verifiquemos que se cumplen los invariantes en el tensor asociado a direcciones principales:", "IS = Sp[0,0]+Sp[1,1]+Sp[2,2]\nIIS =Sp[0,0]*Sp[1,1]+Sp[1,1]*Sp[2,2]+Sp[0,0]*Sp[2,2]-(Sp[0,1]**2)-(Sp[0,2]**2)-(Sp[1,2]**2)\nIIIS =Sp[0,0]*Sp[1,1]*Sp[2,2]-Sp[0,0]*(Sp[1,2]**2)-Sp[1,1]*(Sp[0,2]**2)-Sp[2,2]*(Sp[0,1]**2)+2*Sp[1,2]*Sp[0,2]*Sp[0,1]\nprint\nprint 'Invariantes:', IS,IIS,IIIS\nprint", "Para terminar se debe de tener en cuenta que las direcciones principales no son otra cosa que la matriz de cosenos directores que transformaría el tensor original al tensor en direcciones principales mediante la ecuación de transformación: \n\\begin{align}\n&[\\sigma']=[C][\\sigma][C]^T\\\n\\end{align}\nTeniendo en cuenta que n es dado por vectores columna entonces la matriz de cosenos directores está dada por:\n\\begin{align}\n&[C] = [n]^T\n\\end{align}", "C = n.T\nSp2 = np.dot(np.dot(C,S),C.T)\nprint\nprint 'Sp =', np.round(Sp2,1)\n\nfrom IPython.core.display import HTML\ndef css_styling():\n styles = open('./custom_barba.css', 'r').read()\n return HTML(styles)\ncss_styling()" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
judithyueli/pyFKF
.ipynb_checkpoints/FristExample-checkpoint.ipynb
mit
[ "Fast Kalman Filter for Temporal-spatial Data Analysis", "%matplotlib inline\n%load_ext autoreload\n%autoreload 2\nfrom CO2simulation import CO2simulation\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport visualizeCO2 as vco2", "Tracking a CO$_2$ Plume\nCO$_2$ from an industrial site can be compressed and injected into a deep saline aquifer for storage. This technology is called CO$_2$ capture and storage or CCS, proposed in (TODO) to combat global warming. As CO$_2$ is lighter than the saline water, it may leak through a natural fracture and contanimate the drinking water. Therefore, monitoring and predicting the long term fate of CO$_2$ at the deep aquifer level is crucial as it will provide an early warning for the CO$_2$ leakage. The goal is to interprete the time-series data recorded in the seismic sensors into spatial maps of a moving CO$_2$ plume, a problem very similar to CT scanning widely used in medical imaging.\nThe goal is\n* Predict and monitor the location of CO$_2$ plume\n* \nSimulating the Movement of CO$_2$ Plume\nHere is a simulated CO$_2$ plume for $5$ days resulted from injecting $300$ tons of CO$_2$ at a depth of $1657m$.\n$$ x_{k+1} = f(x_k) + w $$\nrun code that displays the simulated moving CO$_2$ plume, stored the plume data in SQL?? (TODO)", "CO2 = CO2simulation('low')\ndata = []\nx = []\nfor i in range(10):\n data.append(CO2.move_and_sense())\n x.append(CO2.x)\n\nparam = vco2.getImgParam('low')\nvco2.plotCO2map(x,param)\nplt.show()", "Simulating the Sensor Measurement\nThe sensor measures the travel time of a seismic signal from a source to a receiver.\n$$ y = Hx + v $$\n$x$ is the grid block value of CO$_2$ slowness, an idicator of how much CO$_2$ in a block. The product $Hx$ simulate the travel time measurements by integrating $x$ along a raypath. $v$ is the measurement noise.\nThe presence of CO$_2$ slows down the seismic signal and increases its travel time along a ray path. If the ray path does not intercepts the CO$_2$ plume, the travel time remains constant over time (Ray path 1), otherwise it tends to increase once the CO$_2$ plume intercepts the ray path (Ray path 2).", "reload(visualizeCO2)\nvco2.plotCO2data(data,0,47)", "TODO:\n\nFig: Run animation/image of the ray path (shooting from one source and receiver) on top of a CO$_2$ plume and display the travel time changes over time.\n\nFig: Show the time-series data (Path 1 and Path 2) at a receiver with and without noise.\n\n\noptional: run getraypath will give me all the index of the cells and the length of the ray path within each cell, this can help me compute the travel time along this particular ray path\n\n\nKalman filtering\nInitialization step\nDefine $x$, $P$. Before injection took place, there was no CO$_2$ in the aquifer.", "np.dot(1,5)\n\nrun runCO2simulation", "Implementing the Prediction Step\n$$ x_{k+1} = x_{k} + w_k $$\nNote here a simplified Random Walk forecast model is used to substitute $f(x)$. The advantage of using a random walk forecast model is that now we are dealing with a linear instead of nonlinear filtering problem, and the computational cost is much lower as we don't need to evaluate $f(x)$. However, when $dt$ is very large, this random walk forecast model will give poor predictions, and the prediction error cannot be well approximated by $w_k\\approx N(0,Q)$, a zero mean Gaussian process noise term. Therefore, the random walk forecast model is only useful when the measuremetns are sampled at a high frequency, and $Q$ has to be seclected to reflect the true model error.", "from filterpy.common import Q_discrete_white_noise\nkf.F = np.diag(np.ones(dim_x))\n# kf.Q = Q_discrete_white_noise(dim = dim_x, dt = 0.1, var = 2.35)\nkf.Q = 2.5\nkf.predict()\nprint kf.x[:10]", "Implementing the Update Step", "kf.H = CO2.H_mtx\nkf.R *= 0.5\nz = data[0]\nkf.update(z)", "TODO\n- Fig: Estimate at k, Forecast at k+1, Estimate at k+1, True at k+1\n- A table showing:\n x: the time CO2 reaches the monitoring well\n y: the time CO2 reaches the ray path\n PREDICT: x var y UPDATE: x var y\n- Fig: MSE vs time\n- Fig: Data fitting, slope 45 degree indicates a perfect fit\nUse HiKF instead of KF", "from HiKF import HiKF\nhikf = HiKF(dim_x, dim_z)\nhikf.x", "Filter design\nScalability\nKF has a quadratic cost ($\\mathcal{O}(N^2)$), which means for a typical problem size of $10^6$ the Kalman filter will take $80$ days to solve. \nTODO\n- Table: running time and computational storage cost comparison between KF and HiKF\nChoose $Q$ that represents the model error\nBy choosing an appropriate $Q/R$ ratio to optimize the filter preformance" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
pierresendorek/tensorflow_crescendo
tensorflow_fit_gaussian_mixture_model.ipynb
lgpl-3.0
[ "import numpy as np\n\nimport matplotlib.pyplot as plt\n%matplotlib inline\n\nn_samples = 10000", "Coefficients to find", "w_true = [3,3,3]\nw_true = w_true / np.sum(w_true)\n\nmu_true = [3,10,20]\n\nsigma_true = [2,4,1]", "Sampling the distribution", "def draw_from_gaussian_mixture(w, mu, sigma, n_samples):\n samples = []\n for i in range(n_samples):\n idx_comp = np.random.multinomial(1,w).argmax()\n samples.append( np.random.randn()*sigma[idx_comp] + mu[idx_comp] )\n return samples\n\nsamples = np.array(draw_from_gaussian_mixture(w_true, mu_true, sigma_true, n_samples))\n\nplt.plot(plt.hist(samples, bins=100)[0][0])\n\nfrom scipy.stats import norm\n\ndef plot_gaussian_mixture(w,mu,sigma,color=\"b\"):\n x = np.linspace(-5,30,200)\n y = []\n for i in range(len(x)):\n z = x[i]\n s=0\n for i in range(3):\n s+= norm(loc= mu[i], scale = sigma[i]).pdf(z) * w[i]\n y.append(s)\n plt.plot(x,y, color=color)\n\nplot_gaussian_mixture(w_true, mu_true, sigma_true)", "Finding coefficients with Tensorflow", "import tensorflow as tf", "Loss function", "import math\n\noneDivSqrtTwoPI = tf.constant(1 / math.sqrt(2*math.pi)) # normalisation factor for gaussian, not needed.\nmy_epsilon = tf.constant(1e-14)\ndef tf_normal(y, mu, sigma):\n result = tf.subtract(y, mu)\n result = tf.divide(result,sigma)\n result = -tf.square(result)/2\n return tf.divide(tf.exp(result),sigma)*oneDivSqrtTwoPI\n\n# On utilise un signe moins pour minimiser moins l'entropie\n\ndef get_density(out_pi, out_sigma, out_mu, y):\n result = tf_normal(y, out_mu, out_sigma)\n result = tf.multiply(result, out_pi)\n result = tf.reduce_sum(result, 1, keep_dims=True)\n return result\n\ndef get_lossfunc(out_pi, out_sigma, out_mu, y):\n result = get_density(out_pi, out_sigma, out_mu, y)\n result = -tf.log(result + my_epsilon) \n return tf.reduce_mean(result)\n\ndef get_mixture_coef(theta):\n out_pi, out_sigma, out_mu = tf.split(theta, num_or_size_splits=3,axis=1)\n max_pi = tf.reduce_max(out_pi, 1, keep_dims=True)\n out_pi = tf.subtract(out_pi, max_pi)\n out_pi = tf.exp(out_pi)\n normalize_pi = tf.divide(out_pi, tf.reduce_sum(out_pi, axis=1, keep_dims=True))\n out_sigma = tf.exp(out_sigma)\n return normalize_pi, out_sigma, out_mu\n\ntheta = tf.Variable(tf.random_normal([1,9], stddev=1.0, dtype=tf.float32), name=\"theta\")\n\nout_pi, out_sigma, out_mu = get_mixture_coef(theta)\n\nsamples_tf = tf.placeholder(dtype=tf.float32, shape=[None,1], name=\"samples\")\n\nloss = get_lossfunc(out_pi, out_sigma, out_mu, samples_tf)", "Optimizer", "train_op = tf.train.AdamOptimizer(learning_rate=0.05, epsilon=1E-12).minimize(loss)", "Init Session", "sess = tf.InteractiveSession()\nsess.run(tf.global_variables_initializer())\n\ndef do(x):\n return sess.run(x, feed_dict={samples_tf: samples.reshape(-1,1)})\n\nloss_list = []\n\nsess.run(get_density(out_pi, out_sigma, out_mu, samples_tf), feed_dict={samples_tf: samples.reshape(-1,1)})\n\nfor i in range(2000):\n sess.run(train_op, feed_dict={samples_tf:samples.reshape(-1,1)})\n loss_val = sess.run(loss, feed_dict={samples_tf:samples.reshape(-1,1)})\n loss_list.append(loss_val)\n\nplt.plot(np.log10(loss_list))\n\nout_pi, out_sigma, out_mu = do(get_mixture_coef(theta))\n\nplot_gaussian_mixture(out_pi[0],out_mu[0],out_sigma[0],\"r\")\nplot_gaussian_mixture(w_true, mu_true, sigma_true,\"b\")", "Commentaires\nLa densité de probabilité estimée (courbe rouge) ressemble beaucoup à la densité de probabilité que l'on voulait obtenir (courbe bleue).\nMission accomplie !\nTodo\n\nLe vecteur de paramètres doit être bien initialisé que la densité initiale couvre assez bien tous les échantillons. \nFaire une fonction pour mieux le paramétrer.\nUtiliser le stochastic gradient descent pour éviter les maxima locaux." ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
hughperkins/pub-prototyping
maths/logs.ipynb
apache-2.0
[ "Logs\n$$\n\\log(ab) = \\log(a) + \\log(b)\n$$", "import math\n\nprint(math.log(0.2) + math.log(0.5))\nprint(math.log(0.2 * 0.5))", "$$\n\\log \\exp(a) = a\n$$\n$$\n\\log (\\exp(a) \\exp(b))\n= \\log(\\exp(a)) + \\log(\\exp(b))\n= a + b\n$$", "print(math.log(math.exp(0.2) * math.exp(0.7)))\nprint(0.2 + 0.7)", "$$\n\\log \\frac{\\exp(a)}{\\exp(b)}\n= \\log(\\exp(a)) - \\log(\\exp(b))\n= a - b\n$$\nUsing product rule, and confirming from https://en.wikipedia.org/wiki/Exponential_function:\n$$\n\\frac{d}{dx}\n\\exp(f(x))\n= \\frac{df(x)}{dx}\n\\exp(f(x))\n$$" ]
[ "markdown", "code", "markdown", "code", "markdown" ]
yala/introdeeplearning
draft/part1.ipynb
mit
[ "import cPickle as p\nimport numpy as np\nimport tensorflow as tf\nimport word2vec\nimport random", "NLP Lab, Part I\nWelcome to the first lab of 6.S191!\nAdministrivia\nThings to install:\n- tensorflow\n- word2vec\nLab Objectives:\n\nLearn Machine Learning methodology basics (train/dev/test sets)\nLearn some Natural Language Processing basics (word embeddings with word2vec) \nLearn the basics of tensorflow, build your first deep neural nets (MLP, RNN) and get results!\n\nAnd we'll be doing all of this in te context of Twitter sentiment analysis. Given a tweet like:\nomg 6.S196 is so cool #deeplearning #mit\nWe want an algorithm to label this tweet as positive or negative. It's intractable to try to solve this task via some lexical rules, so instead, we're going to use deep learning to embed these tweets into some deep latent space where distinguishing between the two is realtively simple.\nMachine Learning Basics\nGiven some dataset with tweets $X$, and sentiments $Y$, we want to learn a function $f$, such that $Y = f(X)$.\nIn our context, $f$ is deep neural network parameterized by some network weights $\\Theta$, and we're going to do our learning via gradient descent. \nObjective Function\nTo start, we need someway to measure how good our $f$ is, so we can take a gradient in respective to that performance and move in the right direction. We call this performance evaluation our Loss function, L , and this is something we want to minimize. \nSince we are doing classification (pos vs neg), a common loss function to use is cross entropy.\n$$L( \\Theta ) = - \\Sigma_i ( f(x_i)*log(y_i) + (1-f(x_i))log(1-y_i) ) $$ where $f(x)$ is the probablity a tweet $x$ is positive, which we want to be 1 given it's postive and 0 given that it's negative and $y$ is the correct answer. We can access this function with tf.nn.sigmoid_cross_entropy_with_logits, which will come handy in code. Given that $f$ is parameterized by $\\Theta$, we can take the gradient $\\frac{dL}{d\\Theta}$, and we learn by updating our parameters to minimize the loss.\nNote that this loss is 0 if the prediction is correct and very large if we predict something has 0 probablity of being positive when it is positive.\nMethodology\nTo measure how well we're doing, we can't just look at how well our model performs on it's training data. It could be just memorizing the training data and perform terribly on data it hasn't seen before. To really measure how $f$ performs in the wild, we need to present it with unseen data, and we can tune our hyper-parameters (like learning rate, num layers etc.) over this first unseen set, which we call our development (or validation) set. However, given that we optimized our hyper-parameters to the development set, to get a true fair assesment of the model, we test it in respect to a held-out test set at the end, and generaly report those numbers.\nIn summary:\nNamely, we training on one set, i.e. a training set,\nevaluate and tune our hyper paremeters in regards to our performance on the dev set,\nand report finals results on a completely heldout test set. \nLet's load these now, this ratio of sizes if fairly standard.", "trainSet = p.load( open('data/train.p','rb'))\ndevSet = p.load( open('data/dev.p','rb'))\ntestSet = p.load( open('data/test.p','rb'))\n\n## Let's look at the size of what we have here. Note, we could use a much larger train set, \n## but we keep it mid-size so you can run this whole thing off your laptop\nlen(trainSet), len(devSet), len(testSet)\n", "NLP Basics\nThe first question we need to address is how do we represent a tweet? how do we represent a word?\nOne way to do this is with one_hot vectors for each word. Where a given word $w_i= [0,0,...,1,..0]$.\nHowever, in this representation, words like \"love\" and \"adore\" are as similar as \"love\" and \"hate\", because the cosine similarity is 0 in both cases. Another issue is that these vectors are huge in order to represent the whole vocab. To get around this issue the NLP community developed a techique called Word Embeddings. \nWord2Vec\nThe basic idea is we represent a word with a vector $\\phi$ by the context the word appears in. By training a neural network to predict the context of words across a large training set, we can use the weights of that neural networks to get a dense, and useful representation that captures context. Word Embeddings capture all kinds of useful semantic relationships. For example, one cool emergent property is $ \\phi(king) - \\phi(queen) = \\phi(man) - \\phi(woman)$. To learn more about the magic behind word embeddings we recommend Chris Colahs \"blog post\". A common tool for generating Word Embeddings is word2vec, which is what we'll be using today.", "## Note, these tweets were preprocessings to remove non alphanumeric chars, replace unfrequent words, and padded to same length.\n## Note, we're going to train our embeddings on only our train set in order to keep our dev/test tests fair \ntrainSentences = [\" \".join(tweetPair[0]) for tweetPair in trainSet]\nprint trainSentences[0]\np.dump(trainSentences, open('data/trainSentences.p','wb'))\n## Word2vec module expects a file containing a list of strings, a target to store the model, and then the size of the\n## embedding vector\nword2vec.word2vec('data/trainSentences.p','data/emeddings.bin', 100, verbose=True)\n\nw2vModel = word2vec.load('data/emeddings.bin')\nprint w2vModel.vocab\n\n## Each word looks something like represented by a 100 dimension vector like this\nprint \"embedding for the word fun\", w2vModel['fun']", "Now lets look at the words most similar to the word \"fun\"", "indices, cosineSim = w2vModel.cosine('fun')\nprint w2vModel.vocab[indices]\n\nword_embeddings = w2vModel.vectors\nvocab_size = len(w2vModel.vocab)", "Feel free to play around here test the properties of your embeddings, how they cluster etc. In the interest of time, we're going to move on straight to models.\nNow in order to use these embeddings, we have to represent each tweet as a list of indices into the embedding matrix.\nThis preprocessing code is available in processing.py if you are interested. \nTensorflow Basics\nTensorflow is a hugely popular library for building neural nets. The general workflow in building models in tensorflow is as follows:\n- Specify a computation graph (The struture and computations of your neural net)\n- Use your session to feed data into the graph and fetch things from the graph (like the loss, and train operation)\nInside the graph, we put our neural net, our loss function, and our optimizer and once this is constructed, we can feed in the data, fetch the loss and the train op to train it.\nHere is a toy example putting 2 and 2 together, and initializing some random weight matrix.", "session = tf.Session()\n# 1.BUILD GRAPH\n# Set placeholders with a type for data you'll eventually feed in (like tweets and sentiments)\na = tf.placeholder(tf.int32)\nb = tf.placeholder(tf.int32)\n# Set up variables, like weight matrices. \n# Using tf.get_variable, specify the name, shape, type and initliazer of the variable.\nW = tf.get_variable(\"ExampleMatrix\", [2, 2], tf.float32, tf.random_normal_initializer(stddev=1.0 / 2))\n# Set up the operations you need, like matrix multiplications, non-linear layers, and your loss function minimizer\nc = a*b\n# 2.RUN GRAPH\n# Initialize any variables you have (just W in this case)\ntf.global_variables_initializer().run(session=session)\n# Specify the values tensor you want returned, and ops you want run\nfetch = {'c':c, 'W':W}\n# Fill in the place holders\nfeed_dict = {\n a: 2,\n b: 2,\n}\n# Run and get back fetch filled in\nresults = session.run( fetch, feed_dict = feed_dict)\n\nprint( results['c'])\nprint( results['W'])\n# Close session\nsession.close()\n# Reset the graph so it doesn't get in the way later\ntf.reset_default_graph()", "Building an MLP\nMLP or Multi-layer perceptron is a basic archetecture where where we multiply our representation with some matrix W and add some bias b and then apply some nonlineanity like tanh at each layer. Layers are fully connected to the next. As the network gets deeper, it's expressive power grows exponentially and so they can draw some pretty fancy decision boundaries. In this exercise, you'll build your own MLP, with 1 hidden layer (layer that isn't input or output), with 100 dimensions.\nTo make training more stable and efficient, we'll do this we'll actually evalaute 20 tweets at a time, and take gradients and respect to the loss on the 20. We call this idea training with mini-batches.\nDefining the Graph\nStep 1: Placeholders, Variables with specified shapes\n\nLet start off with placeholders for our tweets, and lets use a minibatch of size 20.\nRemember each tweet is will be represented as a vector of sentence length (20) word_ids , and since we are packing mini-batch size number of tweets in the graph a time tweets per iteration, we need a matrix of minibatch * sentence length. Feel free to check out the placeholder api here\nSet up a placeholder for your labels, namely the mini-batch size length vector of sentiments.\nSet up a placeholder for our pretrained word embeddings. This will take shape vocab_size * embedding_size\nSet up a variable for your weight matrix, and bias. Check out the variable api here Let's use a hidden dimension size of 100 (so 100 neurons in the next layer) \nFor the Weight matrix, use tf.random_normal_initializer(stddev=1.0 / hidden_dim_size), as this does something called symetry breaking and keeps the neural network from getting stuck at the start.\nFor the bias vector, use tf.constant_initializer(0)", "\"TODO\"", "Step 2: Putting in the Operations\n\nLoad the word embeddings for the word ids. You can do this using tf.nn.embedding_lookup. Remember to use your embeddings placeholder. You should end up with a Tensor of dimensions batch_size * sentence_length * embedding size.\nTo represent a whole tweet, let's use a neural bag of words. This means we represent each tweet by the words that occur in it; it's a basic representation but gets us pretty far. To do this in a neural way, we can just average the embeddings in the tweet, leaving a single vector of embedding size for each tweet. You should end up with a Tensor of dimensions batch_size * embedding size.\nApply projection to the hidden layer of size 100 (ie. multiply the input by a weight vector and add a bias )\nApply a nonlinearity like tf.tanh\nProject this to the output layer of size 1 (ie. multiply the input by a wieght vector and add a bias). Put this in a python variable called logits.", "\"TODO\"", "Set up loss function, and optimizer to minimize it. We'll be using Adam as our optimizer", "## Make sure to call your output embedding logits, and your sentiments placeholder sentiments in python\nloss = tf.nn.sigmoid_cross_entropy_with_logits(logits, sentiments)\nloss = tf.reduce_sum(loss)\noptimizer = tf.train.AdamOptimizer(1e-2).minimize(loss)", "Run the Graph\nStep 3: Set up training, and fetch optimizer at each iteration to train the model\n\nFirst initialize all variables as in the toy example\nSample 20 random tweet,sentiment pairs for our feed_dict dictionary. Remember to feed in the embedding matrix.\nfetch dictionary, the ops we want to run and tensors we want back\nExecute this many times to train", "trainSet = p.load( open('data/trainTweets_preprocessed.p','rb'))\nrandom.shuffle(trainSet)\n\n\" TODO Init vars\"\n\nlosses = []\nfor i in range(5000):\n trainTweet = np.array( [ t[0] for t in trainSet[i: i+ minibatch_size]])\n trainLabels = np.array( [int(t[1]) for t in trainSet[i: i+ minibatch_size] ])\n \n results = \"TODO, run graph with data\"\n losses.append(results['loss'])\n if i % 500 == 0:\n print(\"Iteration\",i,\"Loss\", sum(losses[-500:-1])/500. if i > 0 else losses[-1])\n ", "Step 4: Check validation results, and tune\n\nTry running the graph on validation data, without fetching the train op.\nSee how the results compare. If the train loss is much lower than the development loss, we may be overfitting. If the train loss is still high, try experimenting with the model archetecture to increase it's capacity.", "validationSet = p.load( open('data/devTweets_preprocessed.p','rb'))\nrandom.shuffle(validationSet)\n\nlosses = []\nfor i in range(20000/20):\n valTweet = np.array( [ t[0] for t in validationSet[i: i+ minibatch_size]])\n valLabels = np.array( [int(t[1]) for t in validationSet[i: i+ minibatch_size] ])\n\n results = \"TODO\" \n losses.append(results['loss'])\nprint(\"Dev Loss\", sum(losses)*1./len(losses))", "Future Steps:\nThings to try on your own:\n- Adding in a tensor for accuracy, and log it at each step.\n- Iterate over whole validation dataset to get more stable validation score\n- Try tensorboard and graphing accuracy over both sets time.\n- experiment with different archetectures that maximize validation score. Maybe bag of words, which doesn't distinguish between \"bad not good\" and \"good not bad\" isn't a good enough representation. \n- test it on the test data\n- Do the RNN tutorial!\nSolutions!\nDo not look unless you really have to. Ask TA's for help first. Fight for the intuition, you'll get more out of it.", "# Step 1:\ntf.reset_default_graph()\nsession = tf.Session()\n\n\nminibatch_size = 20\ntweet_length = 20\nembedding_size = 100\nhidden_dim_size = 100\noutput_size = 1\ninit_bias = 0\n\ntweets = tf.placeholder(tf.int32, shape=[minibatch_size,tweet_length])\nsentiments = tf.placeholder(tf.float32, shape=[minibatch_size])\nembeddingMatrix = tf.placeholder(tf.float32, shape =[vocab_size, embedding_size] )\nW_hidden = tf.get_variable(\"W_hidden\", [embedding_size, hidden_dim_size], tf.float32, tf.random_normal_initializer(stddev=1.0 / hidden_dim_size))\nb_hidden = tf.get_variable(\"b_hidden\", [hidden_dim_size], initializer=tf.constant_initializer(init_bias))\nW_output = tf.get_variable(\"W_output\", [hidden_dim_size, output_size], tf.float32, tf.random_normal_initializer(stddev=1.0 / hidden_dim_size))\nb_output = tf.get_variable(\"b_output\", [output_size], initializer=tf.constant_initializer(init_bias))\n\n# Step 2:\ntweet_embedded = tf.nn.embedding_lookup(embeddingMatrix, tweets)\naveragedTweets = tf.reduce_mean(tweet_embedded, axis=1)\nhidden_proj = tf.matmul( averagedTweets, W_hidden) + b_hidden\nnon_linearity = tf.nn.tanh(hidden_proj)\nlogits = tf.matmul( non_linearity, W_output)+ b_output\nlogits = tf.reshape(logits, shape=[minibatch_size])\n\n## Make sure to call your output embedding logits, and your sentiments placeholder sentiments in python\nloss = tf.nn.sigmoid_cross_entropy_with_logits(logits, sentiments)\nloss = tf.reduce_sum(loss)\noptimizer = tf.train.AdamOptimizer().minimize(loss)\n\n# Step 3:\ntrainSet = p.load( open('data/trainTweets_preprocessed.p','rb'))\nrandom.shuffle(trainSet)\n\ntf.global_variables_initializer().run(session=session)\n\nlosses = []\nfor i in range(5000):\n trainTweet = np.array( [ t[0] for t in trainSet[i: i+ minibatch_size]])\n trainLabels = np.array( [int(t[1]) for t in trainSet[i: i+ minibatch_size] ])\n \n feed_dict = {\n embeddingMatrix: word_embeddings,\n tweets: trainTweet,\n sentiments: trainLabels\n }\n fetch = {\n 'loss': loss,\n 'trainOp': optimizer\n }\n results = session.run(fetch, feed_dict=feed_dict)\n losses.append(results['loss'])\n if i % 500 == 0:\n print(\"Iteration\",i,\"Loss\", sum(losses[-500:-1])/500. if i > 0 else losses[-1])\n \n\n# Step 4:\nvalidationSet = p.load( open('data/devTweets_preprocessed.p','rb'))\nrandom.shuffle(validationSet)\n\nlosses = []\nfor i in range(20000/20):\n valTweet = np.array( [ t[0] for t in validationSet[i: i+ minibatch_size]])\n valLabels = np.array( [int(t[1]) for t in validationSet[i: i+ minibatch_size] ])\n feed_dict = {\n embeddingMatrix: word_embeddings,\n tweets: valTweet,\n sentiments: valLabels\n }\n fetch = {\n 'loss': loss,\n }\n results = session.run(fetch, feed_dict=feed_dict)\n losses.append(results['loss'])\nprint(\"Dev Loss\", sum(losses)*1./len(losses))" ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
JamesSample/icpw
toc_report_feb_2019_part2.ipynb
mit
[ "%matplotlib inline\nimport nivapy3 as nivapy\nimport numpy as np\nimport pandas as pd\nimport os\nimport seaborn as sn\nimport matplotlib.pyplot as plt\nimport toc_trends_analysis as resa2_trends\nimport warnings\n\nwarnings.filterwarnings(\"ignore\", message=\"Mean of empty slice\")\nplt.style.use('ggplot')\n\n# Connect to NIVABASE\neng = nivapy.da.connect()", "TOC Thematic Report - February 2019 (Part 2: Annual trends)\n1. Get list of stations", "# Select projects\nprj_grid = nivapy.da.select_resa_projects(eng)\nprj_grid\n\nprj_df = prj_grid.get_selected_df()\nprint (len(prj_df))\nprj_df\n\n# Get stations\nstn_df = nivapy.da.select_resa_project_stations(prj_df, eng)\nprint(len(stn_df))\nstn_df.head()\n\n# Map\nnivapy.spatial.quickmap(stn_df, popup='station_code')", "2. Calculate annual trends", "# User input\n# Specify projects of interest\nproj_list = ['ICPWaters US', 'ICPWaters NO', 'ICPWaters CA',\n 'ICPWaters UK', 'ICPWaters FI', 'ICPWaters SE',\n 'ICPWaters CZ', 'ICPWaters IT', 'ICPWaters PL',\n 'ICPWaters CH', 'ICPWaters LV', 'ICPWaters EE',\n 'ICPWaters IE', 'ICPWaters MD', 'ICPWaters DE']\n\n# Specify results folder\nres_fold = (r'../../../Thematic_Trends_Report_2019/results')", "1. 1990 to 2016", "# Specify period of interest\nst_yr, end_yr = 1990, 2016\n\n# Build output paths\nplot_fold = os.path.join(res_fold, 'trends_plots_%s-%s' % (st_yr, end_yr))\nres_csv = os.path.join(res_fold, 'res_%s-%s.csv' % (st_yr, end_yr))\ndup_csv = os.path.join(res_fold, 'dup_%s-%s.csv' % (st_yr, end_yr))\nnd_csv = os.path.join(res_fold, 'nd_%s-%s.csv' % (st_yr, end_yr))\n\n# Run analysis \nres_df, dup_df, nd_df = resa2_trends.run_trend_analysis(proj_list, \n eng,\n st_yr=st_yr, \n end_yr=end_yr,\n plot=False, \n fold=plot_fold)\n\n# Delete mk_std_dev col as not relevant here\ndel res_df['mk_std_dev']\n\n# Write output\nres_df.to_csv(res_csv, index=False)\ndup_df.to_csv(dup_csv, index=False)\nif nd_df is not None:\n nd_df.to_csv(nd_csv, index=False)", "There are lots of warnings printed above, but the main one of interest is:\nSome stations have no relevant data in the period specified.\n\nWhich station(s) are missing data?", "# Get stations with no data\nstn_df[stn_df['station_id'].isin(nd_df['station_id'])]", "It seems that one Irish station has no associated data. This is as expected, because all the data supplied by Julian for this site comes from \"near-shore\" sampling (rather than \"open water\") and these have been omitted from the data upload - see here for details.\n3. Basic checking\n3.1. Boxplots", "# Set up plot\nfig = plt.figure(figsize=(20,10))\nsn.set(style=\"ticks\", palette=\"muted\", \n color_codes=True, font_scale=2)\n\n# Horizontal boxplots\nax = sn.boxplot(x=\"mean\", y=\"par_id\", data=res_df,\n whis=np.inf, color=\"c\")\n\n# Add \"raw\" data points for each observation, with some \"jitter\"\n# to make them visible\nsn.stripplot(x=\"mean\", y=\"par_id\", data=res_df, jitter=True, \n size=3, color=\".3\", linewidth=0)\n\n# Remove axis lines\nsn.despine(trim=True)", "4. Data restructuring\nThe code below is taken from here. It is used to generate output files in the format requested by Heleen.\n4.1. Combine datasets", "# Change 'period' col to 'data_period' and add 'analysis_period'\nres_df['data_period'] = res_df['period']\ndel res_df['period']\n\nres_df['analysis_period'] = '1990-2016'\n\n# Join\ndf = pd.merge(res_df, stn_df, how='left', on='station_id')\n\n# Re-order columns\ndf = df[['station_id', \n 'station_code', 'station_name', \n 'latitude', 'longitude', 'analysis_period', 'data_period', \n 'par_id', 'non_missing', 'n_start', 'n_end', 'mean', 'median', \n 'std_dev', 'mk_stat', 'norm_mk_stat', 'mk_p_val', 'trend',\n 'sen_slp']]\n\ndf.head()", "7.2. Check record completeness\nSee e-mail from Heleen received 25/10/2016 at 15:56. The 'non_missing' threshold is based of 65% of the data period (e.g. 65% of 27 years for 1990 to 2016).", "def include(row):\n if ((row['analysis_period'] == '1990-2016') & \n (row['n_start'] >= 2) &\n (row['n_end'] >= 2) &\n (row['non_missing'] >= 18)):\n return 'yes'\n elif ((row['analysis_period'] == '1990-2004') & \n (row['n_start'] >= 2) &\n (row['n_end'] >= 2) & \n (row['non_missing'] >= 10)):\n return 'yes'\n elif ((row['analysis_period'] == '2002-2016') & \n (row['n_start'] >= 2) &\n (row['n_end'] >= 2) & \n (row['non_missing'] >= 10)):\n return 'yes'\n else:\n return 'no'\n\ndf['include'] = df.apply(include, axis=1)", "4.3. SO4 at Abiskojaure\nSO4 for this station ('station_id=36458' in the \"core\" dataset) should be removed. See here.", "# Remove sulphate-related series at Abiskojaure\ndf = df.query('not((station_id==36458) and ((par_id==\"ESO4\") or '\n '(par_id==\"ESO4X\") or '\n '(par_id==\"ESO4_ECl\")))')", "7.4. Relative slope", "# Relative slope\ndf['rel_sen_slp'] = df['sen_slp'] / df['median'] ", "7.5. Tidy", "# Remove unwanted cols\ndf.drop(labels=['mean', 'n_end', 'n_start', 'mk_stat', 'norm_mk_stat'],\n axis=1, inplace=True)\n\n# Reorder columns\ndf = df[['station_id', 'station_code', \n 'station_name', 'latitude', 'longitude', 'analysis_period',\n 'data_period', 'par_id', 'non_missing', 'median', 'std_dev',\n 'mk_p_val', 'trend', 'sen_slp', 'rel_sen_slp', 'include']]\n\n# Write to output\nout_path = os.path.join(res_fold, 'toc_core_trends_long_format.csv')\ndf.to_csv(out_path, index=False, encoding='utf-8')\n\ndf.head()", "7.6. Convert to \"wide\" format", "del df['data_period']\n\n# Melt to \"long\" format\nmelt_df = pd.melt(df, \n id_vars=['station_id', 'station_code', \n 'station_name', 'latitude', 'longitude', \n 'analysis_period', 'par_id', 'include'],\n var_name='stat')\n\n# Get only values where include='yes'\nmelt_df = melt_df.query('include == \"yes\"')\ndel melt_df['include']\n\n# Build multi-index on everything except \"value\"\nmelt_df.set_index(['station_id', 'station_code', \n 'station_name', 'latitude', 'longitude', 'par_id', \n 'analysis_period', \n 'stat'], inplace=True)\n\n# Unstack levels of interest to columns\nwide_df = melt_df.unstack(level=['par_id', 'analysis_period', 'stat'])\n\n# Drop unwanted \"value\" level in index\nwide_df.columns = wide_df.columns.droplevel(0)\n\n# Replace multi-index with separate components concatenated with '_'\nwide_df.columns = [\"_\".join(item) for item in wide_df.columns]\n\n# Reset multiindex on rows\nwide_df = wide_df.reset_index()\n\n# Save output\nout_path = os.path.join(res_fold, 'toc_trends_wide_format.csv')\nwide_df.to_csv(out_path, index=False, encoding='utf-8')\n\nwide_df.head()" ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
uber/pyro
tutorial/source/gp.ipynb
apache-2.0
[ "Gaussian Processes\nIntroduction\nGaussian Processes have been used in supervised, unsupervised, and even reinforcement learning problems and are described by an elegant mathematical theory (for an overview of the subject see [1, 4]). They are also very attractive conceptually, since they offer an intuitive way to define priors over functions. And finally, since Gaussian Processes are formulated in a Bayesian setting, they come equipped with a powerful notion of uncertainty. \nHappily, Pyro offers some support for Gaussian Processes in the pyro.contrib.gp module. The goal of this tutorial is to give a brief introduction to Gaussian Processes (GPs) in the context of this module. We will mostly be focusing on how to use the GP interface in Pyro and refer the reader to the references for more details about GPs in general.\nThe model we're interested in is defined by\n$$f \\sim \\mathcal{GP}\\left(0, \\mathbf{K}_f(x, x')\\right)$$\nand\n$$y = f(x) + \\epsilon,\\quad \\epsilon \\sim \\mathcal{N}\\left(0, \\beta^{-1}\\mathbf{I}\\right).$$\nHere $x, x' \\in\\mathbf{X}$ are points in the input space and $y\\in\\mathbf{Y}$ is a point in the output space. $f$ is a draw from the GP prior specified by the kernel $\\mathbf{K}_f$ and represents a function from $\\mathbf{X}$ to $\\mathbf{Y}$. Finally, $\\epsilon$ represents Gaussian observation noise.\nWe will use the radial basis function kernel (RBF kernel) as the kernel of our GP:\n$$ k(x,x') = \\sigma^2 \\exp\\left(-\\frac{\\|x-x'\\|^2}{2l^2}\\right).$$\nHere $\\sigma^2$ and $l$ are parameters that specify the kernel; specifically, $\\sigma^2$ is a variance or amplitude squared and $l$ is a lengthscale. We'll get some intuition for these parameters below.\nImports\nFirst, we import necessary modules.", "import os\nimport matplotlib.pyplot as plt\nimport torch\n\nimport pyro\nimport pyro.contrib.gp as gp\nimport pyro.distributions as dist\n\nsmoke_test = ('CI' in os.environ) # ignore; used to check code integrity in the Pyro repo\nassert pyro.__version__.startswith('1.7.0')\npyro.set_rng_seed(0)", "Throughout the tutorial we'll want to visualize GPs. So we define a helper function for plotting:", "# note that this helper function does three different things: \n# (i) plots the observed data; \n# (ii) plots the predictions from the learned GP after conditioning on data; \n# (iii) plots samples from the GP prior (with no conditioning on observed data)\n\ndef plot(plot_observed_data=False, plot_predictions=False, n_prior_samples=0, \n model=None, kernel=None, n_test=500):\n\n plt.figure(figsize=(12, 6))\n if plot_observed_data:\n plt.plot(X.numpy(), y.numpy(), 'kx') \n if plot_predictions:\n Xtest = torch.linspace(-0.5, 5.5, n_test) # test inputs\n # compute predictive mean and variance\n with torch.no_grad():\n if type(model) == gp.models.VariationalSparseGP:\n mean, cov = model(Xtest, full_cov=True)\n else:\n mean, cov = model(Xtest, full_cov=True, noiseless=False)\n sd = cov.diag().sqrt() # standard deviation at each input point x\n plt.plot(Xtest.numpy(), mean.numpy(), 'r', lw=2) # plot the mean\n plt.fill_between(Xtest.numpy(), # plot the two-sigma uncertainty about the mean\n (mean - 2.0 * sd).numpy(),\n (mean + 2.0 * sd).numpy(),\n color='C0', alpha=0.3)\n if n_prior_samples > 0: # plot samples from the GP prior\n Xtest = torch.linspace(-0.5, 5.5, n_test) # test inputs\n noise = (model.noise if type(model) != gp.models.VariationalSparseGP\n else model.likelihood.variance)\n cov = kernel.forward(Xtest) + noise.expand(n_test).diag()\n samples = dist.MultivariateNormal(torch.zeros(n_test), covariance_matrix=cov)\\\n .sample(sample_shape=(n_prior_samples,))\n plt.plot(Xtest.numpy(), samples.numpy().T, lw=2, alpha=0.4)\n\n plt.xlim(-0.5, 5.5)", "Data\nThe data consist of $20$ points sampled from\n$$ y = 0.5\\sin(3x) + \\epsilon, \\quad \\epsilon \\sim \\mathcal{N}(0, 0.2).$$\nwith $x$ sampled uniformly from the interval $[0, 5]$.", "N = 20\nX = dist.Uniform(0.0, 5.0).sample(sample_shape=(N,))\ny = 0.5 * torch.sin(3*X) + dist.Normal(0.0, 0.2).sample(sample_shape=(N,))\n\nplot(plot_observed_data=True) # let's plot the observed data", "Define model\nFirst we define a RBF kernel, specifying the values of the two hyperparameters variance and lengthscale. Then we construct a GPRegression object. Here we feed in another hyperparameter, noise, that corresponds to $\\epsilon$ above.", "kernel = gp.kernels.RBF(input_dim=1, variance=torch.tensor(5.),\n lengthscale=torch.tensor(10.))\ngpr = gp.models.GPRegression(X, y, kernel, noise=torch.tensor(1.))", "Let's see what samples from this GP function prior look like. Note that this is before we've conditioned on the data. The shape these functions take&mdash;their smoothness, their vertical scale, etc.&mdash;is controlled by the GP kernel.", "plot(model=gpr, kernel=kernel, n_prior_samples=2)", "For example, if we make variance and noise smaller we will see function samples with smaller vertical amplitude:", "kernel2 = gp.kernels.RBF(input_dim=1, variance=torch.tensor(0.1),\n lengthscale=torch.tensor(10.))\ngpr2 = gp.models.GPRegression(X, y, kernel2, noise=torch.tensor(0.1))\nplot(model=gpr2, kernel=kernel2, n_prior_samples=2)", "Inference\nIn the above we set the kernel hyperparameters by hand. If we want to learn the hyperparameters from the data, we need to do inference. In the simplest (conjugate) case we do gradient ascent on the log marginal likelihood. In pyro.contrib.gp, we can use any PyTorch optimizer to optimize parameters of a model. In addition, we need a loss function which takes inputs are the pair model and guide and returns an ELBO loss (see SVI Part I tutorial).", "optimizer = torch.optim.Adam(gpr.parameters(), lr=0.005)\nloss_fn = pyro.infer.Trace_ELBO().differentiable_loss\nlosses = []\nnum_steps = 2500 if not smoke_test else 2\nfor i in range(num_steps): \n optimizer.zero_grad()\n loss = loss_fn(gpr.model, gpr.guide)\n loss.backward()\n optimizer.step()\n losses.append(loss.item())\n\n# let's plot the loss curve after 2500 steps of training\nplt.plot(losses);", "Let's see if we're learned anything reasonable:", "plot(model=gpr, plot_observed_data=True, plot_predictions=True)", "Here the thick red curve is the mean prediction and the blue band represents the 2-sigma uncertainty around the mean. It seems we learned reasonable kernel hyperparameters, as both the mean and uncertainty give a reasonable fit to the data. (Note that learning could have easily gone wrong if we e.g. chose too large of a learning rate or chose bad initital hyperparameters.)\nNote that the kernel is only well-defined if variance and lengthscale are positive. Under the hood Pyro is using PyTorch constraints (see docs) to ensure that hyperparameters are constrained to the appropriate domains. Let's see the constrained values we've learned.", "gpr.kernel.variance.item()\n\ngpr.kernel.lengthscale.item()\n\ngpr.noise.item()", "The period of the sinusoid that generated the data is $T = 2\\pi/3 \\approx 2.09$ so learning a lengthscale that's approximiately equal to a quarter period makes sense.\nFit the model using MAP\nWe need to define priors for the hyperparameters.", "# Define the same model as before.\npyro.clear_param_store()\nkernel = gp.kernels.RBF(input_dim=1, variance=torch.tensor(5.),\n lengthscale=torch.tensor(10.))\ngpr = gp.models.GPRegression(X, y, kernel, noise=torch.tensor(1.))\n\n# note that our priors have support on the positive reals\ngpr.kernel.lengthscale = pyro.nn.PyroSample(dist.LogNormal(0.0, 1.0))\ngpr.kernel.variance = pyro.nn.PyroSample(dist.LogNormal(0.0, 1.0))\n\noptimizer = torch.optim.Adam(gpr.parameters(), lr=0.005)\nloss_fn = pyro.infer.Trace_ELBO().differentiable_loss\nlosses = []\nnum_steps = 2500 if not smoke_test else 2\nfor i in range(num_steps):\n optimizer.zero_grad()\n loss = loss_fn(gpr.model, gpr.guide)\n loss.backward()\n optimizer.step()\n losses.append(loss.item())\nplt.plot(losses);\n\nplot(model=gpr, plot_observed_data=True, plot_predictions=True)", "Let's inspect the hyperparameters we've learned:", "# tell gpr that we want to get samples from guides\ngpr.set_mode('guide')\nprint('variance = {}'.format(gpr.kernel.variance))\nprint('lengthscale = {}'.format(gpr.kernel.lengthscale))\nprint('noise = {}'.format(gpr.noise))", "Note that the MAP values are different from the MLE values due to the prior.\nSparse GPs\nFor large datasets computing the log marginal likelihood is costly due to the expensive matrix operations involved (e.g. see Section 2.2 of [1]). A variety of so-called 'sparse' variational methods have been developed to make GPs viable for larger datasets. This is a big area of research and we won't be going into all the details. Instead we quickly show how we can use SparseGPRegression in pyro.contrib.gp to make use of these methods.\nFirst, we generate more data.", "N = 1000\nX = dist.Uniform(0.0, 5.0).sample(sample_shape=(N,))\ny = 0.5 * torch.sin(3*X) + dist.Normal(0.0, 0.2).sample(sample_shape=(N,))\nplot(plot_observed_data=True)", "Using the sparse GP is very similar to using the basic GP used above. We just need to add an extra parameter $X_u$ (the inducing points).", "# initialize the inducing inputs\nXu = torch.arange(20.) / 4.0\n\n# initialize the kernel and model\npyro.clear_param_store()\nkernel = gp.kernels.RBF(input_dim=1)\n# we increase the jitter for better numerical stability\nsgpr = gp.models.SparseGPRegression(X, y, kernel, Xu=Xu, jitter=1.0e-5)\n\n# the way we setup inference is similar to above\noptimizer = torch.optim.Adam(sgpr.parameters(), lr=0.005)\nloss_fn = pyro.infer.Trace_ELBO().differentiable_loss\nlosses = []\nnum_steps = 2500 if not smoke_test else 2\nfor i in range(num_steps):\n optimizer.zero_grad()\n loss = loss_fn(sgpr.model, sgpr.guide)\n loss.backward()\n optimizer.step()\n losses.append(loss.item())\nplt.plot(losses);\n\n# let's look at the inducing points we've learned\nprint(\"inducing points:\\n{}\".format(sgpr.Xu.data.numpy()))\n# and plot the predictions from the sparse GP\nplot(model=sgpr, plot_observed_data=True, plot_predictions=True)", "We can see that the model learns a reasonable fit to the data. There are three different sparse approximations that are currently implemented in Pyro:\n\n\n\"DTC\" (Deterministic Training Conditional)\n\n\n\"FITC\" (Fully Independent Training Conditional)\n\n\n\"VFE\" (Variational Free Energy) \n\n\nBy default, SparseGPRegression will use \"VFE\" as the inference method. We can use other methods by passing a different approx flag to SparseGPRegression.\nMore Sparse GPs\nBoth GPRegression and SparseGPRegression above are limited to Gaussian likelihoods. We can use other likelihoods with GPs&mdash;for example, we can use the Bernoulli likelihood for classification problems&mdash;but the inference problem becomes more difficult. In this section, we show how to use the VariationalSparseGP module, which can handle non-Gaussian likelihoods. So we can compare to what we've done above, we're still going to use a Gaussian likelihood. The point is that the inference that's being done under the hood can support other likelihoods.", "# initialize the inducing inputs\nXu = torch.arange(10.) / 2.0\n\n# initialize the kernel, likelihood, and model\npyro.clear_param_store()\nkernel = gp.kernels.RBF(input_dim=1)\nlikelihood = gp.likelihoods.Gaussian()\n# turn on \"whiten\" flag for more stable optimization\nvsgp = gp.models.VariationalSparseGP(X, y, kernel, Xu=Xu, likelihood=likelihood, whiten=True)\n\n# instead of defining our own training loop, we will\n# use the built-in support provided by the GP module\nnum_steps = 1500 if not smoke_test else 2\nlosses = gp.util.train(vsgp, num_steps=num_steps)\nplt.plot(losses);\n\nplot(model=vsgp, plot_observed_data=True, plot_predictions=True)", "That's all there is to it. For more details on the pyro.contrib.gp module see the docs. And for example code that uses a GP for classification see here.\nReference\n[1] Deep Gaussian processes and variational propagation of uncertainty,<br />&nbsp;&nbsp;&nbsp;&nbsp;\nAndreas Damianou\n[2] A unifying framework for sparse Gaussian process approximation using power expectation propagation,<br />&nbsp;&nbsp;&nbsp;&nbsp;\nThang D. Bui, Josiah Yan, and Richard E. Turner\n[3] Scalable variational Gaussian process classification,<br />&nbsp;&nbsp;&nbsp;&nbsp;\nJames Hensman, Alexander G. de G. Matthews, and Zoubin Ghahramani\n[4] Gaussian Processes for Machine Learning,<br />&nbsp;&nbsp;&nbsp;&nbsp;\nCarl E. Rasmussen, and Christopher K. I. Williams\n[5] A Unifying View of Sparse Approximate Gaussian Process Regression,<br />&nbsp;&nbsp;&nbsp;&nbsp;\nJoaquin Quinonero-Candela, and Carl E. Rasmussen" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
M-R-Houghton/euroscipy_2015
scikit_image/lectures/00_images_are_arrays.v3.ipynb
mit
[ "from __future__ import division, print_function\n%matplotlib inline", "Images are numpy arrays\nImages are represented in scikit-image using standard numpy arrays. This allows maximum inter-operability with other libraries in the scientific Python ecosystem, such as matplotlib and scipy.\nLet's see how to build a grayscale image as a 2D array:", "import numpy as np\nfrom matplotlib import pyplot as plt, cm\n\nrandom_image = np.random.random([500, 500])\n\nplt.imshow(random_image, cmap=cm.gray, interpolation='nearest');", "The same holds for \"real-world\" images:", "from skimage import data\n\ncoins = data.coins()\n\nprint(type(coins), coins.dtype, coins.shape)\nplt.imshow(coins, cmap=cm.gray, interpolation='nearest');", "A color image is a 3D array, where the last dimension has size 3 and represents the red, green, and blue channels:", "cat = data.chelsea()\nprint(\"Shape:\", cat.shape)\nprint(\"Values min/max:\", cat.min(), cat.max())\n\nplt.imshow(cat, interpolation='nearest');", "These are just numpy arrays. Making a red square is easy using just array slicing and manipulation:", "cat[10:110, 10:110, :] = [255, 0, 0] # [red, green, blue]\nplt.imshow(cat);", "Images can also include transparent regions by adding a 4th dimension, called an alpha layer.\nData types and image values\nIn literature, one finds different conventions for representing image values:\n0 - 255 where 0 is black, 255 is white\n 0 - 1 where 0 is black, 1 is white\nscikit-image supports both conventions--the choice is determined by the\ndata-type of the array.\nE.g., here, I generate two valid images:", "linear0 = np.linspace(0, 1, 2500).reshape((50, 50))\nlinear1 = np.linspace(0, 255, 2500).reshape((50, 50)).astype(np.uint8)\n\nprint(\"Linear0:\", linear0.dtype, linear0.min(), linear0.max())\nprint(\"Linear1:\", linear1.dtype, linear1.min(), linear1.max())\n\nfig, (ax0, ax1) = plt.subplots(1, 2)\nax0.imshow(linear0, cmap='gray')\nax1.imshow(linear1, cmap='gray');", "The library is designed in such a way that any data-type is allowed as input,\nas long as the range is correct (0-1 for floating point images, 0-255 for unsigned bytes,\n0-65535 for unsigned 16-bit integers).\nThis is achieved through the use of a few utility functions, such as img_as_float and img_as_ubyte:", "from skimage import img_as_float, img_as_ubyte\n\nimage = data.chelsea()\n\nimage_float = img_as_float(image)\nimage_ubyte = img_as_ubyte(image)\n\nprint(\"type, min, max:\", image_float.dtype, image_float.min(), image_float.max())\nprint(\"type, min, max:\", image_ubyte.dtype, image_ubyte.min(), image_ubyte.max())\n\nprint(\"231/255 =\", 231/255.)", "Your code would then typically look like this:\npython\ndef my_function(any_image):\n float_image = img_as_float(any_image)\n # Proceed, knowing image is in [0, 1]\nWe recommend using the floating point representation, given that\nscikit-image mostly uses that format internally.\nDisplaying images using matplotlib\nBefore we get started, a quick note about plotting images---specifically, plotting gray-scale images with Matplotlib. First, let's grab an example image from scikit-image.", "from skimage import data\n\nimage = data.camera()", "Also, we'll want to make sure we have numpy and matplotlib imported.", "import matplotlib.pyplot as plt\nimport numpy as np", "If we plot a gray-scale image using the default colormap, \"jet\", and a gray-scale color\nmap, \"gray\", you can easily see the difference:", "fig, (ax_jet, ax_gray) = plt.subplots(ncols=2, figsize=(10, 5))\nax_jet.imshow(image, cmap='jet')\nax_gray.imshow(image, cmap='gray');", "We can get a better idea of the ill effects by zooming into the man's face.", "face = image[80:160, 200:280]\nfig, (ax_jet, ax_gray) = plt.subplots(ncols=2)\nax_jet.imshow(face, cmap='jet')\nax_gray.imshow(face, cmap='gray');", "Notice how the face looks distorted and splotchy with the \"jet\" colormap. Also, this colormap distorts the concepts of light and dark, and there are artificial boundaries created by the different color hues. Is that a beauty mark on the man's upper lip? No, it's just an artifact of this ridiculous colormap.\nHere's another example:", "X, Y = np.ogrid[-5:5:0.1, -5:5:0.1]\nR = np.sqrt(X**2 + Y**2)\n\nfig, (ax_jet, ax_gray) = plt.subplots(1, 2)\nax_jet.imshow(R, cmap='jet')\nax_gray.imshow(R, cmap='gray');", "Woah! See all those non-existing contours?\nYou can add the following setting at the top of any script\nto change the default colormap:", "plt.rcParams['image.cmap'] = 'gray'", "Don't worry: color images are unaffected by this change.\nIn addition, we'll set the interpolation to 'nearest neighborhood' so that it's easier to distinguish individual pixels in your image (the default is 'bicubic'--see the exploration below).", "plt.rcParams['image.interpolation'] = 'nearest'", "You can also set both of these explicitly in the imshow command:", "plt.imshow(R, cmap='gray', interpolation='nearest');", "Interactive demo: interpolation and color maps", "from IPython.html.widgets import interact, fixed\nfrom matplotlib import cm as colormaps\n\n@interact(image=fixed(face),\n cmap=sorted([c for c in colormaps.datad.keys() if not c.endswith('_r')],\n key=lambda x: x.lower()),\n interpolation=['nearest', 'bilinear', 'bicubic',\n 'spline16', 'spline36', 'hanning', 'hamming',\n 'hermite', 'kaiser', 'quadric', 'catrom',\n 'gaussian', 'bessel', 'mitchell', 'sinc', 'lanczos'])\ndef imshow_params(image, cmap='jet', interpolation='bicubic'):\n fig, axes = plt.subplots(1, 5, figsize=(15, 4))\n \n axes[0].imshow(image, cmap='gray', interpolation='nearest')\n axes[0].set_title('Original')\n \n axes[1].imshow(image[:5, :5], cmap='gray', interpolation='nearest')\n axes[1].set_title('Top 5x5 block')\n axes[1].set_xlabel('No interpolation')\n\n axes[2].imshow(image, cmap=cmap, interpolation=interpolation)\n axes[2].set_title('%s colormap' % cmap)\n axes[2].set_xlabel('%s interpolation' % interpolation)\n \n axes[3].imshow(image[:5, :5], cmap=cmap, interpolation=interpolation)\n axes[3].set_title('%s colormap' % cmap)\n axes[3].set_xlabel('%s interpolation' % interpolation)\n \n axes[4].imshow(R, cmap=cmap, interpolation=interpolation)\n axes[4].set_title('%s colormap' % cmap)\n axes[4].set_xlabel('%s interpolation' % interpolation)\n \n for ax in axes:\n ax.set_xticks([])\n ax.set_yticks([])", "Image I/O\nMostly, we won't be using input images from the scikit-image example data sets. Those images are typically stored in JPEG or PNG format. Since scikit-image operates on NumPy arrays, any image reader library that provides arrays will do. Options include matplotlib, pillow, imageio, imread, etc.\nscikit-image conveniently wraps many of these in the io submodule, and will use whatever option is available:", "from skimage import io\n\nimage = io.imread('../images/balloon.jpg')\n\nprint(type(image))\nplt.imshow(image);", "We also have the ability to load multiple images, or multi-layer TIFF images:", "ic = io.imread_collection('../images/*.png')\n\nprint(type(ic), '\\n\\n', ic)\n\nf, axes = plt.subplots(nrows=1, ncols=len(ic), figsize=(15, 10))\n\nfor i, image in enumerate(ic):\n axes[i].imshow(image, cmap='gray')\n axes[i].axis('off')", "<span class=\"exercize\">Exercise: draw the letter H</span>\nDefine a function that takes as input an RGB image and a pair of coordinates (row, column), and returns the image (optionally a copy) with green letter H overlaid at those coordinates. The coordinates should point to the top-left corner of the H.\nThe arms and strut of the H should have a width of 3 pixels, and the H itself should have a height of 24 pixels and width of 20 pixels.\nStart with the following template:", "def draw_H(image, coords, color=(0.8, 0.8, 0.8), in_place=True):\n out = image.copy()\n # your code goes here\n return out", "Test your function like so:", "cat = data.chelsea()\ncat_H = draw_H(cat, (50, -50))\nplt.imshow(cat_H);", "<span class=\"exercize\">Exercise: RGB intensity plot</span>\nPlot the intensity of each channel of the image along a given row.\nStart with the following template:", "def plot_intensity(image, row):\n # Fill in the three lines below\n red_values = ...\n green_values = ...\n blue_values = ...\n \n plt.figure()\n plt.plot(red_values)\n plt.plot(green_values)\n plt.plot(blue_values)\n \n pass", "Test your function here:", "plot_intensity(cat, 50)\nplot_intensity(cat, 100)", "<div style=\"height: 400px;\"></div>", "%reload_ext load_style\n%load_style ../themes/tutorial.css" ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
dataventures/workshops
3/1-Clustering.ipynb
mit
[ "Clustering\nClustering is an unsupervised method that tries to find structure in a set of objects. Specifically, we want to find clusters, groups within our object set where objects in a cluster are more similar to each other than they are to objects outside of the cluster. Note that this definition is quite vague - there are many different ways to conceptualize how clusters may occur in data, and today we will look at K-Means and hierarchical clustering in particular.", "from IPython.display import Image\nfrom IPython.core.display import HTML ", "K-Means Clustering\nMotivation\nGiven a set of objects, we specify that we want $k$ clusters. Each cluster has a mean representing it, and we assign each point to the clusters based on which cluster mean its closest to. For each point, the reconstruction error is defined to be its distance to its cluster mean. This gives us the total reconstruction error, the sum of all the individual reconstruction errors, as an error value that we want to minimize. \nFormally, we have a set of object vectors ${x_n}{n = 1}^N$ and a set of $K$ cluster means ${\\mu_k}{k = 1}^K$, where $x_n, \\mu_k \\in \\mathbb{R}^D$. We represent each object's cluster assignment with the $K$ dimensional vector $r_n$, where $r_{nk} = 1$ if $x_n$ is in cluster $k$, and 0 otherwise. This gives us the individual reconstruction error\n$$J_n(r_n, {\\mu_k}) = \\sum_{k = 1}^K r_{nk} \\cdot |x_n - \\mu_k|^2$$\nand the total reconstruction error \n$$J({r_n}, {\\mu_k}) = \\sum_{n = 1}^N \\sum_{k = 1}^K r_{nk} \\cdot |x_n - \\mu_k|^2$$\nAs you can see, the reconstruction error on a set of objects is a function of assignments and means. How would you go about choosing the assignments ${r_n}$ and means ${\\mu_k}$ that minimize the reconstruction error? Lloyd's Algorithm proposes a two step error minimization.\nStep 1: minimize $J$ by updating the $r_n$, assigning each $x_n$ to its closest cluster mean.\nStep 2: minimize $J$ by recalculting the $\\mu_k$ to be the average over all vectors assigned to cluster $k$.\nRepeating this process until the assignments do not change, the algorithm will converge upon a local minima. The algorithm can be optimized by starting with reasonable distributions for cluster centers rather than choosing randomly (K-Means ++) or adding the condition that the cluster mean must be a point (K-Medoids). \nApplication\nSKLearn has a K-Means implementation that is documented here. In this example, we use K-Means to classify flowers into 3 classes based on their properties. Note that choosing the right $k$ is crucial. The true number of categories within the dataset is 3, so with this knowlegde, we can let $k$ be 3 to get the most logical split. However, if we didn't know that the dataset consisted of three types of flowers, choosing $k$ to be a value like 7 might result in less logical clusters.", "import numpy as np\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom sklearn.cluster import KMeans\nfrom sklearn.cluster import AgglomerativeClustering\nfrom sklearn import datasets\n\ncenters = [[1, 1], [-1, -1], [1, -1]]\niris = datasets.load_iris()\nX = iris.data\ny = iris.target\n\nestimators = {'K-Means 3': KMeans(n_clusters=3),\n 'K-Means 8': KMeans(n_clusters=7)}\n\nfignum = 1\nfor name, est in estimators.items():\n fig = plt.figure(fignum, figsize=(4, 3))\n plt.clf()\n ax = Axes3D(fig, rect=[0, 0, .95, 1], elev=48, azim=134)\n\n plt.cla()\n est.fit(X)\n labels = est.labels_\n\n ax.scatter(X[:, 3], X[:, 0], X[:, 2], c=labels.astype(np.float))\n\n ax.w_xaxis.set_ticklabels([])\n ax.w_yaxis.set_ticklabels([])\n ax.w_zaxis.set_ticklabels([])\n ax.set_xlabel('Petal width')\n ax.set_ylabel('Sepal length')\n ax.set_zlabel('Petal length')\n ax.set_title(name)\n fignum = fignum + 1\n\nplt.show()", "Hierarchical Clustering\nMotivation\nK-Means is one of the most common and simple clustering methods, but it has a couple of key limitations. First off, it is nondeterministic as it depends on the initial choice of cluster means, and Lloyd's Algorithm only arrives upon local minima rather than the global minimum. Furthermore, the algorithm requires you to decide what $k$ is, as we saw earlier. Finally, K-Means can be inflexible, as the only way cluster centers are derived is through the mean distance from all of the points assigned to the cluster. Because of this construction, K-Means doesn't perform well on clusters that are connected but not compact.\nHierarchical clustering solves many of these issues. The motivation between hierarchical clustering is building up clusters as a hierarchy. All objects start out in their individual groups, and at each level, the groups that are a certain distance apart are joined to form a larger group. A variety of different distance metrics can be used in building up these groups to result in different types of hierarchical clusters. \n\nA dendrogram is a way of visualizing the groups as they are aggregated together in the hierarchy. As you can see, hierarchical clustering not only resolves some of the problems explained concerning K-Means - it also prevents a very nice way of representing the structure of the clusters, identifying subclusters within the clusters.\nApplication\nOnce again, we can use the SKLearn hierarchical clustering implementation, in the same way as we used clustering. However, there are many resources on the page documenting the different distance metrics that you can use.", "estimators = {'Hierarchical 3': AgglomerativeClustering(n_clusters=3)}\n\nfignum = 1\nfor name, est in estimators.items():\n fig = plt.figure(fignum, figsize=(4, 3))\n plt.clf()\n ax = Axes3D(fig, rect=[0, 0, .95, 1], elev=48, azim=134)\n\n plt.cla()\n est.fit(X)\n labels = est.labels_\n\n ax.scatter(X[:, 3], X[:, 0], X[:, 2], c=labels.astype(np.float))\n\n ax.w_xaxis.set_ticklabels([])\n ax.w_yaxis.set_ticklabels([])\n ax.w_zaxis.set_ticklabels([])\n ax.set_xlabel('Petal width')\n ax.set_ylabel('Sepal length')\n ax.set_zlabel('Petal length')\n ax.set_title(name)\n fignum = fignum + 1\n\nplt.show()", "Challenge\nLook at the other clustering methods provided by SKLearn, and consider their use cases. Pick three to run on the sample iris dataset that you think will produce the most accurate clusters. Tune parameters and look at the different options to try and get your clusters as close to the ground truth as possible. This challenge hopes to help you familiarize yourself with the documentation that SKLearn provides and figure out the best clustering method given a problem to solve. We only covered two clustering models in depth to give you a taste of what clustering can do, but there many more clustering models out there all with their own optimal use cases.", "np.random.seed(5)\n\ncenters = [[1, 1], [-1, -1], [1, -1]]\niris = datasets.load_iris()\nX = iris.data\ny = iris.target\n\n# TODO: choose three additional estimators here that will give the best results.\nestimators = {'Hierarchical 3': AgglomerativeClustering(n_clusters=3),\n 'K-Means 3': KMeans(n_clusters=3),\n 'K-Means 7': KMeans(n_clusters=7)}\n\nfignum = 1\nfor name, est in estimators.items():\n fig = plt.figure(fignum, figsize=(4, 3))\n plt.clf()\n ax = Axes3D(fig, rect=[0, 0, .95, 1], elev=48, azim=134)\n\n plt.cla()\n est.fit(X)\n labels = est.labels_\n\n ax.scatter(X[:, 3], X[:, 0], X[:, 2], c=labels.astype(np.float))\n\n ax.w_xaxis.set_ticklabels([])\n ax.w_yaxis.set_ticklabels([])\n ax.w_zaxis.set_ticklabels([])\n ax.set_xlabel('Petal width')\n ax.set_ylabel('Sepal length')\n ax.set_zlabel('Petal length')\n ax.set_title(name)\n fignum = fignum + 1\n\n# Plot the ground truth\nfig = plt.figure(fignum, figsize=(4, 3))\nplt.clf()\nax = Axes3D(fig, rect=[0, 0, .95, 1], elev=48, azim=134)\nplt.cla()\n\nfor name, label in [('Setosa', 0), ('Versicolour', 1), ('Virginica', 2)]:\n ax.text3D(X[y == label, 3].mean(),\n X[y == label, 0].mean() + 1.5,\n X[y == label, 2].mean(), name,\n horizontalalignment='center',\n bbox=dict(alpha=.5, edgecolor='w', facecolor='w'))\ny = np.choose(y, [1, 2, 0]).astype(np.float)\nax.scatter(X[:, 3], X[:, 0], X[:, 2], c=y)\n\nax.w_xaxis.set_ticklabels([])\nax.w_yaxis.set_ticklabels([])\nax.w_zaxis.set_ticklabels([])\nax.set_xlabel('Petal width')\nax.set_ylabel('Sepal length')\nax.set_zlabel('Petal length')\nax.set_title('Ground Truth')\nplt.show()" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
swirlingsand/self-driving-car-nanodegree-nd013
CarND-LetNet/LeNet-Lab.ipynb
mit
[ "LeNet Lab\n\nSource: Yan LeCun\nLoad Data\nLoad the MNIST data, which comes pre-loaded with TensorFlow.\nYou do not need to modify this section.", "from tensorflow.examples.tutorials.mnist import input_data\n\nmnist = input_data.read_data_sets(\"MNIST_data/\", reshape=False)\nX_train, y_train = mnist.train.images, mnist.train.labels\nX_validation, y_validation = mnist.validation.images, mnist.validation.labels\nX_test, y_test = mnist.test.images, mnist.test.labels\n\nassert(len(X_train) == len(y_train))\nassert(len(X_validation) == len(y_validation))\nassert(len(X_test) == len(y_test))\n\nprint()\nprint(\"Image Shape: {}\".format(X_train[0].shape))\nprint()\nprint(\"Training Set: {} samples\".format(len(X_train)))\nprint(\"Validation Set: {} samples\".format(len(X_validation)))\nprint(\"Test Set: {} samples\".format(len(X_test)))", "The MNIST data that TensorFlow pre-loads comes as 28x28x1 images.\nHowever, the LeNet architecture only accepts 32x32xC images, where C is the number of color channels.\nIn order to reformat the MNIST data into a shape that LeNet will accept, we pad the data with two rows of zeros on the top and bottom, and two columns of zeros on the left and right (28+2+2 = 32).\nYou do not need to modify this section.", "import numpy as np\n\n# Pad images with 0s\nX_train = np.pad(X_train, ((0,0),(2,2),(2,2),(0,0)), 'constant')\nX_validation = np.pad(X_validation, ((0,0),(2,2),(2,2),(0,0)), 'constant')\nX_test = np.pad(X_test, ((0,0),(2,2),(2,2),(0,0)), 'constant')\n \nprint(\"Updated Image Shape: {}\".format(X_train[0].shape))", "Visualize Data\nView a sample from the dataset.\nYou do not need to modify this section.", "import random\nimport numpy as np\nimport matplotlib.pyplot as plt\n%matplotlib inline\n\nindex = random.randint(1, len(X_train))\nimage = X_train[index].squeeze()\n\nplt.figure(figsize=(1,1))\nplt.imshow(image, cmap=\"gray\")\nprint(y_train[index])", "Preprocess Data\nShuffle the training data.\nYou do not need to modify this section.", "from sklearn.utils import shuffle\n\nX_train, y_train = shuffle(X_train, y_train)", "Setup TensorFlow\nThe EPOCH and BATCH_SIZE values affect the training speed and model accuracy.\nYou do not need to modify this section.", "import tensorflow as tf\n\nEPOCHS = 20\nBATCH_SIZE = 64", "TODO: Implement LeNet-5\nImplement the LeNet-5 neural network architecture.\nThis is the only cell you need to edit.\nInput\nThe LeNet architecture accepts a 32x32xC image as input, where C is the number of color channels. Since MNIST images are grayscale, C is 1 in this case.\nArchitecture\nLayer 1: Convolutional. The output shape should be 28x28x6.\nActivation. Your choice of activation function.\nPooling. The output shape should be 14x14x6.\nLayer 2: Convolutional. The output shape should be 10x10x16.\nActivation. Your choice of activation function.\nPooling. The output shape should be 5x5x16.\nFlatten. Flatten the output shape of the final pooling layer such that it's 1D instead of 3D. The easiest way to do is by using tf.contrib.layers.flatten, which is already imported for you.\nLayer 3: Fully Connected. This should have 120 outputs.\nActivation. Your choice of activation function.\nLayer 4: Fully Connected. This should have 84 outputs.\nActivation. Your choice of activation function.\nLayer 5: Fully Connected (Logits). This should have 10 outputs.\nOutput\nReturn the result of the 2nd fully connected layer.", "from tensorflow.contrib.layers import flatten\n\ndef LeNet(x): \n # Hyperparameters\n mu = 0\n sigma = 0.1\n \n # Layer 1: Convolutional. Input = 32x32x1. Output = 28x28x6.\n convolutional_1_weights = tf.Variable(tf.truncated_normal(shape=(5,5,1,6), mean = mu, stddev = sigma))\n convolutional_1_bias = tf.Variable(tf.zeros(6)) # set to 6 as output is 6\n convolutional_1 = tf.nn.conv2d(x, convolutional_1_weights, strides=[1,1,1,1], padding='VALID') + convolutional_1_bias\n \n # Activation.\n convolutional_1 = tf.nn.relu(convolutional_1)\n\n # Pooling. Input = 28x28x6. Output = 14x14x6.\n # Stride of 2 reduces output by 2\n convolutional_1 = tf.nn.max_pool(convolutional_1, ksize=[1,2,2,1], strides=[1,2,2,1], padding='VALID')\n ## END Layer 1\n \n \n # Layer 2: Convolutional. Output = 10x10x16.\n convolutional_2_weights = tf.Variable(tf.truncated_normal(shape=(5,5,6,16), mean = mu, stddev = sigma))\n convolutional_2_bias = tf.Variable(tf.zeros(16))\n # pass the first layer\n convolutional_2 = tf.nn.conv2d(convolutional_1, convolutional_2_weights, strides=[1,1,1,1], padding='VALID' ) + convolutional_2_bias\n \n # Activation.\n convolutional_2 = tf.nn.relu(convolutional_2)\n \n # Pooling. Input = 10x10x16. Output = 5x5x16.\n convolutional_2 = tf.nn.max_pool(convolutional_2, ksize=[1,2,2,1], strides=[1,2,2,1], padding='VALID')\n \n # Flatten. Input = 5x5x16. Output = 400.\n fully_connected_0 = flatten(convolutional_2)\n ### End Layer 2\n \n \n # Layer 3: Fully Connected. Input = 400. Output = 120.\n fully_connected_1_weights = tf.Variable(tf.truncated_normal(shape=(400,120), mean=mu, stddev=sigma))\n fully_connected_1_bias = tf.Variable(tf.zeros(120))\n fully_connected_1 = tf.matmul(fully_connected_0, fully_connected_1_weights) + fully_connected_1_bias\n \n # Activation.\n fully_connected_1 = tf.nn.relu(fully_connected_1)\n \n # Layer 4: Fully Connected. Input = 120. Output = 84.\n # shape = (input, output)\n fully_connected_2_weights = tf.Variable(tf.truncated_normal(shape=(120,84), mean=mu, stddev=sigma))\n fully_connected_2_bias = tf.Variable(tf.zeros(84))\n fully_connected_2 = tf.matmul(fully_connected_1, fully_connected_2_weights) + fully_connected_2_bias\n \n # Activation.\n fully_connected_2 = tf.nn.relu(fully_connected_2)\n \n # Layer 5: Fully Connected. Input = 84. Output = 10.\n fully_connected_3_weights = tf.Variable(tf.truncated_normal(shape=(84,10), mean=mu, stddev=sigma))\n fully_connected_3_bias = tf.Variable(tf.zeros(10))\n logits = tf.matmul(fully_connected_2, fully_connected_3_weights) + fully_connected_3_bias\n \n return logits", "Features and Labels\nTrain LeNet to classify MNIST data.\nx is a placeholder for a batch of input images.\ny is a placeholder for a batch of output labels.\nYou do not need to modify this section.", "x = tf.placeholder(tf.float32, (None, 32, 32, 1))\ny = tf.placeholder(tf.int32, (None))\n\n# added this to fix bug CUDA_ERROR_ILLEGAL_ADDRESS / kernal crash\nwith tf.device('/cpu:0'):\n one_hot_y = tf.one_hot(y, 10)", "Training Pipeline\nCreate a training pipeline that uses the model to classify MNIST data.\nYou do not need to modify this section.", "rate = 0.001\n\nlogits = LeNet(x)\ncross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits, one_hot_y)\nloss_operation = tf.reduce_mean(cross_entropy)\noptimizer = tf.train.AdamOptimizer(learning_rate = rate)\ntraining_operation = optimizer.minimize(loss_operation)", "Model Evaluation\nEvaluate how well the loss and accuracy of the model for a given dataset.\nYou do not need to modify this section.", "correct_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(one_hot_y, 1))\naccuracy_operation = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\nsaver = tf.train.Saver()\n\ndef evaluate(X_data, y_data):\n num_examples = len(X_data)\n total_accuracy = 0\n sess = tf.get_default_session()\n for offset in range(0, num_examples, BATCH_SIZE):\n batch_x, batch_y = X_data[offset:offset+BATCH_SIZE], y_data[offset:offset+BATCH_SIZE]\n accuracy = sess.run(accuracy_operation, feed_dict={x: batch_x, y: batch_y})\n total_accuracy += (accuracy * len(batch_x))\n return total_accuracy / num_examples", "Train the Model\nRun the training data through the training pipeline to train the model.\nBefore each epoch, shuffle the training set.\nAfter each epoch, measure the loss and accuracy of the validation set.\nSave the model after training.\nYou do not need to modify this section.", "with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n num_examples = len(X_train)\n \n print(\"Training...\")\n print()\n for i in range(EPOCHS):\n X_train, y_train = shuffle(X_train, y_train)\n for offset in range(0, num_examples, BATCH_SIZE):\n end = offset + BATCH_SIZE\n batch_x, batch_y = X_train[offset:end], y_train[offset:end]\n sess.run(training_operation, feed_dict={x: batch_x, y: batch_y})\n \n validation_accuracy = evaluate(X_validation, y_validation)\n print(\"EPOCH {} ...\".format(i+1))\n print(\"Validation Accuracy = {:.3f}\".format(validation_accuracy))\n print()\n \n saver.save(sess, '.\\lenet')\n print(\"Model saved\")", "Evaluate the Model\nOnce you are completely satisfied with your model, evaluate the performance of the model on the test set.\nBe sure to only do this once!\nIf you were to measure the performance of your trained model on the test set, then improve your model, and then measure the performance of your model on the test set again, that would invalidate your test results. You wouldn't get a true measure of how well your model would perform against real data.\nYou do not need to modify this section.", "with tf.Session() as sess:\n saver.restore(sess, tf.train.latest_checkpoint('.'))\n\n test_accuracy = evaluate(X_test, y_test)\n print(\"Test Accuracy = {:.3f}\".format(test_accuracy))" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
folivetti/PIPYTHON
ListaEX_02.ipynb
mit
[ "Exercício 1: Crie três funções:\n1) Uma função chamada VerificaTriangulo() que recebe como parâmetro o comprimento dos três lados de um possível triângulo. Essa função deve retornar True caso esses comprimentos podem formar um triângulo e False caso contrário. \nPara 3 segmentos com comprimento x, y e z, respectivamente, formarem um triângulo, eles devem obdecer a TODAS as seguintes condições:\n\nx + y > z\nx + z > y\ny + z > x\n\n2) Uma função chamada TipoTriangulo() que recebe os mesmos parâmetros e retorna o tipo de triângulo que os segmentos formariam:\n\n\"equilátero\" se os três lados forem iguais\n\"isóceles\" se dois dos três lados forem iguais\n\"escaleno\" se os três lados forem diferentes\n\n3) Uma função chamada Triangulo() que também receberá os mesmos parâmetros e retornará o tipo de triângulo, caso os segmentos formem um, ou a string \"não é triângulo\", caso contrário.", "\n\n\n\n\n\n\n\n \nprint (Triangulo(5,5,5)) # equilatero\nprint (Triangulo(5,5,7)) # isóceles\nprint (Triangulo(3,4,5)) # escaleno\nprint (Triangulo(5,5,11)) # não é triângulo", "Exercício 2: Crie uma função para determinar se um ano é bissexto. \nO ano é bissexto se for múltiplo de 400 ou múltiplo de 4 e não múltiplo de 100. Utilize o operador de resto da divisão (%) para determinar se um número é múltiplo de outro.", "\n\n\n\n\nprint (Bissexto(2000)) # True\nprint (Bissexto(2004)) # True\nprint (Bissexto(1900)) # False", "Exercício 3: Crie uma função que receba três valores x, y, z como parâmetros e retorne-os em ordem crescente.\nO Python permite que você faça comparações relacionais entre as 3 variáveis em uma única instrução:\nPython\nx &lt; y &lt; z", "\n\n\n\n\n\n\nprint (Crescente(1,2,3))\nprint (Crescente(1,3,2))\nprint (Crescente(2,1,3))\nprint (Crescente(2,3,1))\nprint (Crescente(3,1,2))\nprint (Crescente(3,2,1))\nprint (Crescente(1,2,2))", "Exercício 4: O peso ideial de uma pessoa segue a seguinte tabela:\n|Altura|Peso Homem|Peso Mulher|\n|--|--|--|\n|1,5 m|50 kg|48 kg|\n|1,7 m|74 kg|68 kg|\n|1,9 m|98 kg|88 kg|\n|2,1 m|122 kg|108 kg|\nFaça uma função que receba como parâmetro o gênero, altura e peso da pessoa e retorne True se ela está com o peso ideal.", "\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nprint (PesoIdeal(\"masculino\", 1.87, 75)) # True\nprint (PesoIdeal(\"masculino\", 1.92, 200)) # False\nprint (PesoIdeal(\"feminino\", 1.87, 90)) # False\nprint (PesoIdeal(\"feminino\", 1.6, 40)) # True", "Exercício 5: Crie uma função que receba as coordenadas cx, cy, o raio r correspondentes ao centro e raio de uma circunferência e receba também coordenadas x, y de um ponto.\nA função deve retornar True se o ponto está dentro da circunferência e False, caso contrário.", "\n\n\n\n\n\n \nprint (Circunferencia(0,0,10,5,5) ) # True\nprint (Circunferencia(0,0,10,15,5)) # False", "Exercício 5b: Crie uma função chamada Circunferencia que recebe como entrada as coordenadas do centro cx e cy e o raio r da circunferência. Essa função deve criar uma outra função chamada VerificaPonto que recebe como entrada as coordenadas x e y de um ponto e retorna True caso o ponto esteja dentro da circunferência, ou False caso contrário. \nA função Circunferencia deve retornar a função Verifica.", "\n\n\n\nVerifica = Circunferencia(0,0,10)\nprint (Verifica(5,5))\nprint (Verifica(15,5))", "Exercício 6:\nA Estrela da Morte é uma arma desenvolvida pelo império para dominar o universo.\n\nUm telescópio digital foi desenvolvido pelas forças rebeldes para detectar o local dela.\nMas tal telescópio só consegue mostrar o contorno das circunferências encontradas indicando o centro e o raio delas.\nSabendo que uma Estrela da Morte é definida por:\n\nO raio de uma circunferência for 10 vezes maior que o raio da outra\nA circunferência menor se encontrar totalmente dentro da maior\nO contorno da circunferência menor está a pelo menos 2 unidades de distância do contorno da maior\n\nFaça uma função (utilizando os exercícios anteriores), para detectar se duas circunferências definidas por (cx1,cy1,r1) e (cx2,cy2,r2) podem formar uma Estrela da Morte.\nBônus: plote as circunferências utilizando a biblioteca gráfica.", "import math\n\n\n\n\n\n\n\n\n\n\n\nprint (EstrelaMorte(0,0,20,3,3,10))\nprint (EstrelaMorte(0,0,200,3,3,10))\nprint (EstrelaMorte(0,0,200,195,3,10))", "Exercício 7: Crie uma função para determinar as raízes reais da equação do segundo grau:\n$$\na.x^{2} + b.x + c = 0\n$$\nFaça com que a função retorne:\n\nUma raíz quando $b^2 = 4ac$\nRaízes complexas quando $b^2 < 4ac$\nRaízes reais, caso contrário\n\nUtilize a biblioteca cmath para calcular a raíz quadrada para números complexos.", "import math, cmath\n\n\n\n\n\n\n\n\n\nprint (RaizSegundoGrau(2,4,2) ) # -1.0\nprint (RaizSegundoGrau(2,2,2)) # -0.5 - 0.9j, -0.5+0.9j\nprint (RaizSegundoGrau(2,6,2)) # -2.6, -0.38" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
udacity/deep-learning
gan_mnist/Intro_to_GANs_Solution.ipynb
mit
[ "Generative Adversarial Network\nIn this notebook, we'll be building a generative adversarial network (GAN) trained on the MNIST dataset. From this, we'll be able to generate new handwritten digits!\nGANs were first reported on in 2014 from Ian Goodfellow and others in Yoshua Bengio's lab. Since then, GANs have exploded in popularity. Here are a few examples to check out:\n\nPix2Pix \nCycleGAN\nA whole list\n\nThe idea behind GANs is that you have two networks, a generator $G$ and a discriminator $D$, competing against each other. The generator makes fake data to pass to the discriminator. The discriminator also sees real data and predicts if the data it's received is real or fake. The generator is trained to fool the discriminator, it wants to output data that looks as close as possible to real data. And the discriminator is trained to figure out which data is real and which is fake. What ends up happening is that the generator learns to make data that is indistiguishable from real data to the discriminator.\n\nThe general structure of a GAN is shown in the diagram above, using MNIST images as data. The latent sample is a random vector the generator uses to contruct it's fake images. As the generator learns through training, it figures out how to map these random vectors to recognizable images that can foold the discriminator.\nThe output of the discriminator is a sigmoid function, where 0 indicates a fake image and 1 indicates an real image. If you're interested only in generating new images, you can throw out the discriminator after training. Now, let's see how we build this thing in TensorFlow.", "%matplotlib inline\n\nimport pickle as pkl\nimport numpy as np\nimport tensorflow as tf\nimport matplotlib.pyplot as plt\n\nfrom tensorflow.examples.tutorials.mnist import input_data\nmnist = input_data.read_data_sets('MNIST_data')", "Model Inputs\nFirst we need to create the inputs for our graph. We need two inputs, one for the discriminator and one for the generator. Here we'll call the discriminator input inputs_real and the generator input inputs_z. We'll assign them the appropriate sizes for each of the networks.", "def model_inputs(real_dim, z_dim):\n inputs_real = tf.placeholder(tf.float32, (None, real_dim), name='input_real') \n inputs_z = tf.placeholder(tf.float32, (None, z_dim), name='input_z')\n \n return inputs_real, inputs_z", "Generator network\n\nHere we'll build the generator network. To make this network a universal function approximator, we'll need at least one hidden layer. We should use a leaky ReLU to allow gradients to flow backwards through the layer unimpeded. A leaky ReLU is like a normal ReLU, except that there is a small non-zero output for negative input values.\nVariable Scope\nHere we need to use tf.variable_scope for two reasons. Firstly, we're going to make sure all the variable names start with generator. Similarly, we'll prepend discriminator to the discriminator variables. This will help out later when we're training the separate networks.\nWe could just use tf.name_scope to set the names, but we also want to reuse these networks with different inputs. For the generator, we're going to train it, but also sample from it as we're training and after training. The discriminator will need to share variables between the fake and real input images. So, we can use the reuse keyword for tf.variable_scope to tell TensorFlow to reuse the variables instead of creating new ones if we build the graph again.\nTo use tf.variable_scope, you use a with statement:\npython\nwith tf.variable_scope('scope_name', reuse=False):\n # code here\nHere's more from the TensorFlow documentation to get another look at using tf.variable_scope.\nLeaky ReLU\nTensorFlow doesn't provide an operation for leaky ReLUs, so we'll need to make one . For this you can use take the outputs from a linear fully connected layer and pass them to tf.maximum. Typically, a parameter alpha sets the magnitude of the output for negative values. So, the output for negative input (x) values is alpha*x, and the output for positive x is x:\n$$\nf(x) = max(\\alpha * x, x)\n$$\nTanh Output\nThe generator has been found to perform the best with $tanh$ for the generator output. This means that we'll have to rescale the MNIST images to be between -1 and 1, instead of 0 and 1.", "def generator(z, out_dim, n_units=128, reuse=False, alpha=0.01):\n with tf.variable_scope('generator', reuse=reuse):\n # Hidden layer\n h1 = tf.layers.dense(z, n_units, activation=None)\n # Leaky ReLU\n h1 = tf.maximum(alpha * h1, h1)\n \n # Logits and tanh output\n logits = tf.layers.dense(h1, out_dim, activation=None)\n out = tf.tanh(logits)\n \n return out", "Discriminator\nThe discriminator network is almost exactly the same as the generator network, except that we're using a sigmoid output layer.", "def discriminator(x, n_units=128, reuse=False, alpha=0.01):\n with tf.variable_scope('discriminator', reuse=reuse):\n # Hidden layer\n h1 = tf.layers.dense(x, n_units, activation=None)\n # Leaky ReLU\n h1 = tf.maximum(alpha * h1, h1)\n \n logits = tf.layers.dense(h1, 1, activation=None)\n out = tf.sigmoid(logits)\n \n return out, logits", "Hyperparameters", "# Size of input image to discriminator\ninput_size = 784\n# Size of latent vector to generator\nz_size = 100\n# Sizes of hidden layers in generator and discriminator\ng_hidden_size = 128\nd_hidden_size = 128\n# Leak factor for leaky ReLU\nalpha = 0.01\n# Smoothing \nsmooth = 0.1", "Build network\nNow we're building the network from the functions defined above.\nFirst is to get our inputs, input_real, input_z from model_inputs using the sizes of the input and z.\nThen, we'll create the generator, generator(input_z, input_size). This builds the generator with the appropriate input and output sizes.\nThen the discriminators. We'll build two of them, one for real data and one for fake data. Since we want the weights to be the same for both real and fake data, we need to reuse the variables. For the fake data, we're getting it from the generator as g_model. So the real data discriminator is discriminator(input_real) while the fake discriminator is discriminator(g_model, reuse=True).", "tf.reset_default_graph()\n# Create our input placeholders\ninput_real, input_z = model_inputs(input_size, z_size)\n\n# Build the model\ng_model = generator(input_z, input_size, n_units=g_hidden_size, alpha=alpha)\n# g_model is the generator output\n\nd_model_real, d_logits_real = discriminator(input_real, n_units=d_hidden_size, alpha=alpha)\nd_model_fake, d_logits_fake = discriminator(g_model, reuse=True, n_units=d_hidden_size, alpha=alpha)", "Discriminator and Generator Losses\nNow we need to calculate the losses, which is a little tricky. For the discriminator, the total loss is the sum of the losses for real and fake images, d_loss = d_loss_real + d_loss_fake. The losses will by sigmoid cross-entropys, which we can get with tf.nn.sigmoid_cross_entropy_with_logits. We'll also wrap that in tf.reduce_mean to get the mean for all the images in the batch. So the losses will look something like \npython\ntf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=logits, labels=labels))\nFor the real image logits, we'll use d_logits_real which we got from the discriminator in the cell above. For the labels, we want them to be all ones, since these are all real images. To help the discriminator generalize better, the labels are reduced a bit from 1.0 to 0.9, for example, using the parameter smooth. This is known as label smoothing, typically used with classifiers to improve performance. In TensorFlow, it looks something like labels = tf.ones_like(tensor) * (1 - smooth)\nThe discriminator loss for the fake data is similar. The logits are d_logits_fake, which we got from passing the generator output to the discriminator. These fake logits are used with labels of all zeros. Remember that we want the discriminator to output 1 for real images and 0 for fake images, so we need to set up the losses to reflect that.\nFinally, the generator losses are using d_logits_fake, the fake image logits. But, now the labels are all ones. The generator is trying to fool the discriminator, so it wants to discriminator to output ones for fake images.", "# Calculate losses\nd_loss_real = tf.reduce_mean(\n tf.nn.sigmoid_cross_entropy_with_logits(logits=d_logits_real, \n labels=tf.ones_like(d_logits_real) * (1 - smooth)))\nd_loss_fake = tf.reduce_mean(\n tf.nn.sigmoid_cross_entropy_with_logits(logits=d_logits_fake, \n labels=tf.zeros_like(d_logits_fake)))\nd_loss = d_loss_real + d_loss_fake\n\ng_loss = tf.reduce_mean(\n tf.nn.sigmoid_cross_entropy_with_logits(logits=d_logits_fake,\n labels=tf.ones_like(d_logits_fake)))", "Optimizers\nWe want to update the generator and discriminator variables separately. So we need to get the variables for each part build optimizers for the two parts. To get all the trainable variables, we use tf.trainable_variables(). This creates a list of all the variables we've defined in our graph.\nFor the generator optimizer, we only want to generator variables. Our past selves were nice and used a variable scope to start all of our generator variable names with generator. So, we just need to iterate through the list from tf.trainable_variables() and keep variables to start with generator. Each variable object has an attribute name which holds the name of the variable as a string (var.name == 'weights_0' for instance). \nWe can do something similar with the discriminator. All the variables in the discriminator start with discriminator.\nThen, in the optimizer we pass the variable lists to var_list in the minimize method. This tells the optimizer to only update the listed variables. Something like tf.train.AdamOptimizer().minimize(loss, var_list=var_list) will only train the variables in var_list.", "# Optimizers\nlearning_rate = 0.002\n\n# Get the trainable_variables, split into G and D parts\nt_vars = tf.trainable_variables()\ng_vars = [var for var in t_vars if var.name.startswith('generator')]\nd_vars = [var for var in t_vars if var.name.startswith('discriminator')]\n\nd_train_opt = tf.train.AdamOptimizer(learning_rate).minimize(d_loss, var_list=d_vars)\ng_train_opt = tf.train.AdamOptimizer(learning_rate).minimize(g_loss, var_list=g_vars)", "Training", "batch_size = 100\nepochs = 100\nsamples = []\nlosses = []\n# Only save generator variables\nsaver = tf.train.Saver(var_list=g_vars)\nwith tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n for e in range(epochs):\n for ii in range(mnist.train.num_examples//batch_size):\n batch = mnist.train.next_batch(batch_size)\n \n # Get images, reshape and rescale to pass to D\n batch_images = batch[0].reshape((batch_size, 784))\n batch_images = batch_images*2 - 1\n \n # Sample random noise for G\n batch_z = np.random.uniform(-1, 1, size=(batch_size, z_size))\n \n # Run optimizers\n _ = sess.run(d_train_opt, feed_dict={input_real: batch_images, input_z: batch_z})\n _ = sess.run(g_train_opt, feed_dict={input_z: batch_z})\n \n # At the end of each epoch, get the losses and print them out\n train_loss_d = sess.run(d_loss, {input_z: batch_z, input_real: batch_images})\n train_loss_g = g_loss.eval({input_z: batch_z})\n \n print(\"Epoch {}/{}...\".format(e+1, epochs),\n \"Discriminator Loss: {:.4f}...\".format(train_loss_d),\n \"Generator Loss: {:.4f}\".format(train_loss_g)) \n # Save losses to view after training\n losses.append((train_loss_d, train_loss_g))\n \n # Sample from generator as we're training for viewing afterwards\n sample_z = np.random.uniform(-1, 1, size=(16, z_size))\n gen_samples = sess.run(\n generator(input_z, input_size, n_units=g_hidden_size, reuse=True, alpha=alpha),\n feed_dict={input_z: sample_z})\n samples.append(gen_samples)\n saver.save(sess, './checkpoints/generator.ckpt')\n\n# Save training generator samples\nwith open('train_samples.pkl', 'wb') as f:\n pkl.dump(samples, f)", "Training loss\nHere we'll check out the training losses for the generator and discriminator.", "fig, ax = plt.subplots()\nlosses = np.array(losses)\nplt.plot(losses.T[0], label='Discriminator')\nplt.plot(losses.T[1], label='Generator')\nplt.title(\"Training Losses\")\nplt.legend()", "Generator samples from training\nHere we can view samples of images from the generator. First we'll look at images taken while training.", "def view_samples(epoch, samples):\n fig, axes = plt.subplots(figsize=(7,7), nrows=4, ncols=4, sharey=True, sharex=True)\n for ax, img in zip(axes.flatten(), samples[epoch]):\n ax.xaxis.set_visible(False)\n ax.yaxis.set_visible(False)\n im = ax.imshow(img.reshape((28,28)), cmap='Greys_r')\n \n return fig, axes\n\n# Load samples from generator taken while training\nwith open('train_samples.pkl', 'rb') as f:\n samples = pkl.load(f)", "These are samples from the final training epoch. You can see the generator is able to reproduce numbers like 1, 7, 3, 2. Since this is just a sample, it isn't representative of the full range of images this generator can make.", "_ = view_samples(-1, samples)", "Below I'm showing the generated images as the network was training, every 10 epochs. With bonus optical illusion!", "rows, cols = 10, 6\nfig, axes = plt.subplots(figsize=(7,12), nrows=rows, ncols=cols, sharex=True, sharey=True)\n\nfor sample, ax_row in zip(samples[::int(len(samples)/rows)], axes):\n for img, ax in zip(sample[::int(len(sample)/cols)], ax_row):\n ax.imshow(img.reshape((28,28)), cmap='Greys_r')\n ax.xaxis.set_visible(False)\n ax.yaxis.set_visible(False)", "It starts out as all noise. Then it learns to make only the center white and the rest black. You can start to see some number like structures appear out of the noise like 1s and 9s.\nSampling from the generator\nWe can also get completely new images from the generator by using the checkpoint we saved after training. We just need to pass in a new latent vector $z$ and we'll get new samples!", "saver = tf.train.Saver(var_list=g_vars)\nwith tf.Session() as sess:\n saver.restore(sess, tf.train.latest_checkpoint('checkpoints'))\n sample_z = np.random.uniform(-1, 1, size=(16, z_size))\n gen_samples = sess.run(\n generator(input_z, input_size, n_units=g_hidden_size, reuse=True, alpha=alpha),\n feed_dict={input_z: sample_z})\n_ = view_samples(0, [gen_samples])" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
KIPAC/StatisticalMethods
tutorials/cepheids_all_galaxies.ipynb
gpl-2.0
[ "Tutorial: The Cepheid Period-Luminosity Relation for Multiple Galaxies\nSo far (in the cepheids and cepheids_one_galaxy notebooks), we have fit a hierarchical model describing the period-luminosity relation and its intrinsic scatter to data from a single galaxy. Next, we're interested in how similar, or not, these scaling relation parameters ($a$, $b$ and $\\sigma$) are among the galaxies in the data set.\nA sensible place to start is to do an identical fit independently to each galaxy to see how compatible the parameter values are. Then we'll fit a model with another level of hierarchy that assumes these parameters come from a parent distribution. The width of that parent distribution will be a parameter that tells us how similar things are from galaxy to galaxy.\nStart by restoring the previous notebook:", "exec(open('tbc.py').read()) # define TBC and TBC_above\nimport dill\n\n# may need to change the load path\nTBC() # dill.load_session('../ignore/cepheids_one.db')\n\nexec(open('tbc.py').read()) # (re-)define TBC and TBC_above", "1. Data\nWe should now have all the data loaded, named as it was before. As a reminder, these are the NGC numbers of the galaxies in the data set:", "ngc_numbers", "2. Independent fits for each galaxy\nThis class will package up the fitting using the \"4b\" method from the previous notebook (emcee plus analytic integration). In particular, it relies on the log_prior, log_posterior and log_likelihood_B functions (as well as the data, among other previous global-scope definitions). If you want to use a different approach instead, feel free.\nThere are various defaults here (e.g. nsteps, burn, maxlag) that you might want to tweak, but in principle they should work well enough for this problem.", "class singleFitter:\n def __init__(self, ngc):\n '''\n ngc: NGC identifier of the galaxy to fit\n '''\n self.ngc = ngc\n self.data = data[ngc] # from global scope\n # reproducing this for paranoia's sake\n self.param_names = ['a', 'b', 'sigma']\n self.param_labels = [r'$a$', r'$b$', r'$\\sigma$']\n def _logpost_vecarg_B(self, pvec):\n params = {name:pvec[i] for i,name in enumerate(self.param_names)}\n return log_posterior(self.data, log_likelihood_B, **params)\n def fit(self, guess, nsteps=7500):\n npars = len(self.param_names)\n nwalkers = 2*npars\n sampler = emcee.EnsembleSampler(nwalkers, npars, self._logpost_vecarg_B)\n start = np.array([np.array(guess)*(1.0 + 0.01*np.random.randn(npars)) for j in range(nwalkers)])\n %time sampler.run_mcmc(start, nsteps)\n plt.rcParams['figure.figsize'] = (16.0, 3.0*npars)\n fig, ax = plt.subplots(npars, 1);\n cr.plot_traces(sampler.chain[:min(8,nwalkers),:,:], ax, labels=self.param_labels);\n self.sampler = sampler\n self.nwalkers = nwalkers\n self.npars = npars\n self.nsteps = nsteps\n def burnin(self, burn=1000, maxlag=1000):\n tmp_samples = [self.sampler.chain[i,burn:,:] for i in range(self.nwalkers)]\n print('R =', cr.GelmanRubinR(tmp_samples))\n print('neff =', cr.effective_samples(tmp_samples, maxlag=maxlag))\n print('NB: Since walkers are not independent, these will be optimistic!')\n self.samples = self.sampler.chain[:,burn:,:].reshape(self.nwalkers*(self.nsteps-burn), self.npars)\n del self.sampler\n # make it simpler/more readable to access the parameter samples\n # (could have been fancier and more robust by using self.param_names here)\n self.a = self.samples[:,0]\n self.b = self.samples[:,1]\n self.sigma = self.samples[:,2]\n def thin(self, thinto=1000):\n j = np.round(np.linspace(0, self.samples.shape[0]-1, thinto)).astype(int)\n self.a = self.samples[j,0]\n self.b = self.samples[j,1]\n self.sigma = self.samples[j,2]", "Let's set up and run each of these fits, which hopefully shouldn't take too long. As always, you are responsible for looking over the trace plots and making sure everything is ok.", "independent_fits = [singleFitter(ngc) for ngc in ngc_numbers]\n\nindependent_fits[0].fit(guessvec)\n\nindependent_fits[1].fit(guessvec)\n\nindependent_fits[2].fit(guessvec)\n\nindependent_fits[3].fit(guessvec)\n\nindependent_fits[4].fit(guessvec)\n\nindependent_fits[5].fit(guessvec)\n\nindependent_fits[6].fit(guessvec)\n\nindependent_fits[7].fit(guessvec)\n\nindependent_fits[8].fit(guessvec)", "Based on the plots above, remove some burn-in. Check that the quantitative diagnostics are acceptable as they are printed out.", "TBC(1) # burn = ...\n\nfor f in independent_fits:\n print('NGC', f.ngc)\n f.burnin(burn=burn) # optionally, set maxlag here also\n print('')", "Now we'll use pygtc to plot all the individual posteriors, and see how they compare.", "plotGTC([f.samples for f in independent_fits], paramNames=param_labels, \n chainLabels=['NGC'+str(f.ngc) for f in independent_fits],\n figureSize=8, customLabelFont={'size':12}, customTickFont={'size':12}, customLegendFont={'size':16});", "Visually, would you say that it's likely that all the scaling parameters, or some subset, are universal?\n\nTBC commentary\n\n2. A hierarchical model for all galaxies\nOn the basis of the last section, it should be clear that at least one of the scaling parameters in question is not universal amongst galaxies in the data set, and at least one may well be. Further, it isn't obvious that there is any particular correlation or anticorrelation between the galaxy-to-galaxy differences in these parameters. If we were doing this as a research project, the latter would be an important thing to investigate, along with possible physical explanations for outliers. But we'll keep it relatively simple here.\nLet's add a level of hierarchy to the model by assuming that the values of $a$ for each galaxy come from a normal distribution with mean $\\mu_a$ and standard deviation $\\tau_a$, and similarly $b$ and $\\sigma$ come from their own normal distributions. We will not consider the possibility that, for example, all 3 come from a joint, multivariate normal distribution with possible correlations between them, although that could easily be justified. In practice, fitting for independent distributions for each parameter is a reasonable first step, much as fitting each galaxies data independently in Section 1 was a reasonable zeroth step.\nMake the relatively simple modifications to your PGM and probabilistic expressions from Section 2 of the previous notebook to accomodate this model.\n\nTBC probabilistic expressions and PGM\n\nWe will adopt wide, uniform priors on the new hyperparameters of the model, to make life easier.\n3. Strategy\nEven more than last time, the total number of free parameters in the model is, technically, staggering. We already know some ways of reducing the overhead associated with each galaxy. For example, using the analytic integration approach from the previous notebook, we have have only 3 parameters left to sample per galaxy, for a total of $3N_\\mathrm{gal}+6=33$ parameters. Brute force sampling of these 33 parameters is not unthinkable, although in practice it may or may not be a headache.\nAnother option is to make use of the samples we obtained in Section 1. These are samples of the posterior (for each galaxy) when the priors on the scaling parameters are very wide and uniform, i.e. constant over the domain where the likelihood is significantly non-zero. They are, therefore, also samples from a PDF that is proportional to the likelihood function. To see why that might be helpful, consider the posterior for the hyperparameters of the new model, $\\vec{\\alpha} = (\\mu_a,\\tau_a,\\mu_b,\\tau_b,\\mu_\\sigma,\\tau_\\sigma)$, marginalized over all the pesky $a_i$, $b_i$ and $\\sigma_i$ parameters:\n$p(\\vec{\\alpha}|\\mathrm{data}) \\propto p(\\vec{\\alpha}) \\prod_{i=1}^{N_\\mathrm{gal}} \\int da_i db_i d\\sigma_i \\, p(a_i,b_i,\\sigma_i|\\vec{\\alpha}) \\, p(\\mathrm{data}|a_i,b_i,\\sigma_i)$.\nTo restate what we said above, our individual fits (with uniform priors) give us samples from PDFs\n$q(a_i,b_i,\\sigma_i|\\mathrm{data}) \\propto p(\\mathrm{data}|a_i,b_i,\\sigma_i)$.\nWe can do this integral by simple monte carlo as\n$p(\\vec{\\alpha}|\\mathrm{data}) \\propto p(\\vec{\\alpha}) \\prod_{i=1}^{N_\\mathrm{gal}} \\frac{1}{n_i}\\sum_{k=1}^{n_i} p(a_{ik},b_{ik},\\sigma_{ik}|\\vec{\\alpha})$,\nwhere the $n_i$ samples of $(a_{ik},b_{ik},\\sigma_{ik}) \\sim q(a_i,b_i,\\sigma_i|\\mathrm{data})$. Our samples from Section 1 happen to satisfy this. (Had we used a non-uniform prior before, we could do something similar, but would need to divide by that prior density in the sum above.) This approach has the advantage that we only need to sample the 6 parameters in $\\vec{\\alpha}$ to constrain our hierarchical model, since a lot of work is already done. On the other hand, carrying out the sums for each galaxy can become its own numerical challenge.\nIf we're really stuck in terms of computing power, we could consider a more compressed version of this, by approximating the posterior from each individual galaxy fit as a 3-dimensional Gaussian, or some other simple function. This approximation may or may not be a significant concession on our parts; here it's clearly a bit sketchy in the case of $\\sigma$, which has a hard cut at $\\sigma=0$ that at least one individual galaxy is consistent with. But, with this approximation, the integral in the first equation above could be done analytically, much as we simplified things for the single-galaxy analysis.\nFinally, not that this is an exhaustive list, we could again consider whether conjugate Gibbs sampling is an option. Since the normal distribution has nice conjugacies, we could consider a scheme where we sample $\\mu_a|\\tau_a,{a_i}$, then $\\tau_a|\\mu_a,{a_i}$, then similarly for $\\mu_b$, $\\tau_b$, $\\mu_\\sigma$ and $\\tau_\\sigma$, and then all the individual $a_i$, $b_i$, $\\sigma_i$ and $M_{ij}$ parameters as we did with LRGS in the previous notebook (accounting for the normal \"prior\" on $a_i$ given by $\\mu_a$ and $\\tau_a$, etc.). Or we could conjugate-Gibbs sample the $\\mu$'s and $\\tau$'s, while using some other method entirely for the galaxy-specific parameters. (We will not actually walk through this, since (a) LRGS (in python) doesn't implement Gaussian priors on the intercept/slope parameters, even though it's a simple addition; (b) I don't feel like dragging yet another code into the mix; and (c) the Gaussian parent distribution is not conjugate for the $\\sigma$ parameters, so we'd have to use a different sampling method for those parameters anyway.)\n4. Obtain the posterior\n4a. Brute force\nLet's again start by trying brute force, although in this case we'll still use the analytic integration method from the last notebook rather than the brutest force, which would have a free absolute magnitude for every cepheid in every galaxy. We can make use of our array of singleFitter objects, and specifically their _logpost_vecarg_B methods to do that part of the calculation.\nThe prototypes below assume the 33 parameters are ordered as: $(\\mu_a,\\tau_a,\\mu_b,\\tau_b,\\mu_\\sigma,\\tau_\\sigma,a_1,b_1,\\sigma_1,a_2,b_2,\\sigma_2,\\ldots)$. Also, Let's... not include all the individual galaxy parameters in these lists of parameter names:", "param_names_all = ['mu_a', 'tau_a', 'mu_b', 'tau_b', 'mu_s', 'tau_s']\nparam_labels_all = [r'$\\mu_a$', r'$\\tau_a$', r'$\\mu_b$', r'$\\tau_b$', r'$\\mu_\\sigma$', r'$\\tau_\\sigma$']", "Complete the log-likelihood function for this part. Similarly to the way we dealt with Mtrue before, the galparams argument will end up being an array containing $(a_1,b_1,\\sigma_1,a_2,b_2,\\sigma_2,\\ldots)$, from which we can extract arrays of $a_i$, $b_i$ and $\\sigma_i$ if we want. The line given to you accounts for the $\\prod_{i=1}^{N_\\mathrm{gal}} p(\\mathrm{data}|a_i,b_i,\\sigma_i)$ part, ultimately calling log_likelihood_B and log_prior from the last notebook (see comments below).", "def log_likelihood_all_A(mu_a, tau_a, mu_b, tau_b, mu_s, tau_s, galparams):\n lnp = np.sum([f._logpost_vecarg_B(galparams[(0+3*i):(3+3*i)]) for i,f in enumerate(independent_fits)])\n TBC() # lnp += ... more stuff ...\n return lnp\n\nTBC_above()", "As a consequence of the code above calling _logpost_vecarg_B (note post), the old priors for the $a_i$, $b_i$ and $\\sigma_i$ will be included in the return value. This is ok only because we're using uniform priors, so in the log those priors are either a finite constant or $-\\infty$. In general, we would need to divide the old priors out somewhere in the new posterior calculation. Even better, we would not write such dangerously lazy code.\nBut for our limited purposes, it should work. The bottom line is that we don't need to worry about the priors for the $a_i$, $b_i$ and $\\sigma_i$ in the function below, just the hyperparameters of their parent distributions.\nAgain like the last notebook, we will make galparams an optional argument to the log-prior function, so we can re-use the function later, when the $a_i$, $b_i$ and $\\sigma_i$ are not being sampled.", "def log_prior_all(mu_a, tau_a, mu_b, tau_b, mu_s, tau_s, galparams=None):\n TBC()\n\nTBC_above()", "You can have the log-posterior functions.", "def log_posterior_all(loglike, **params):\n lnp = log_prior_all(**params)\n if lnp != -np.inf:\n lnp += loglike(**params)\n return lnp\n\ndef logpost_vecarg_all_A(pvec):\n params = {name:pvec[i] for i,name in enumerate(param_names_all)}\n params['galparams'] = pvec[len(param_names_all):]\n return log_posterior_all(log_likelihood_all_A, **params)", "Based on the triangle plot in the first section, guess rough starting values for $(\\mu_a,\\tau_a,\\mu_b,\\tau_b,\\mu_\\sigma,\\tau_\\sigma)$. (NB: make this a list rather than the usual dictionary.) We'll re-use the previous guess for the galaxy-specific parameters.", "TBC() # guess_all = [list of hyperparameter starting values]\n\nguess_all_A = np.array(guess_all + guessvec*9)", "Quick check that the functions above work:", "logpost_vecarg_all_A(guess_all_A)", "Below, we run emcee as before.\nIMPORTANT\nYou should find this to be more tractable than the \"brute force\" solution in the previous notebook, but still very slow compared to what we normally see in class. Again, you do not need to run this version long enough to get what we would normally consider acceptable results, in terms of convergence and number of independent samples. Just convince yourself that it's functioning, and see how it performs. Again, please do not turn in a notebook where the sampling cell below takes longer than $\\sim30$ seconds to evaluate.", "%%time\n\nnsteps = 100 # or whatever\n\nnpars = len(guess_all_A)\nnwalkers = 2*npars\nsampler = emcee.EnsembleSampler(nwalkers, npars, logpost_vecarg_all_A)\nstart = np.array([np.array(guess_all_A)*(1.0 + 0.01*np.random.randn(npars)) for j in range(nwalkers)])\nsampler.run_mcmc(start, nsteps)\nprint('Yay!')", "Look at the traces (we'll only include one of the galaxy's scaling parameters).", "npars = len(guess_all)+3\nplt.rcParams['figure.figsize'] = (16.0, 3.0*npars)\nfig, ax = plt.subplots(npars, 1);\ncr.plot_traces(sampler.chain[:min(8,nwalkers),:,:npars], ax, labels=param_labels_all+param_labels);\nnpars = len(guess_all_A)", "Go through the usual motions, making sure to set burn and maxlag to something appropriate for the length of the chain.", "TBC()\n# burn = ...\n# maxlag = ...\n\ntmp_samples = [sampler.chain[i,burn:,:9] for i in range(nwalkers)]\nprint('R =', cr.GelmanRubinR(tmp_samples))\nprint('neff =', cr.effective_samples(tmp_samples, maxlag=maxlag))\nprint('NB: Since walkers are not independent, these will be optimistic!')\nprint(\"Plus, there's a good chance that the results in this section are garbage...\")", "As before, we'll be comparing the posteriors from the methods we attempt:", "samples_all_A = sampler.chain[:,burn:,:].reshape(nwalkers*(nsteps-burn), npars)\nplotGTC([samples_all_A[:,:9]], paramNames=param_labels_all+param_labels, chainLabels=['emcee/brute'],\n figureSize=12, customLabelFont={'size':12}, customTickFont={'size':12}, customLegendFont={'size':16});", "To be more thorough, we would also want to see how well the new hierarchical part of the model fits, meaning whether the posteriors of $a_i$, $b_i$ and $\\sigma_i$ are collectively consistent with being drawn from their respective fitted Gaussians. Things might look slightly different than the plots we made above, since those fits used uniform priors rather than the hierarchical model. With only 9 galaxies, it seems unlikely that we could really rule out a Gaussian distribution, and it's tangential to the point of this tutorial. So this can be an exercise for the reader, if you want.\n4b. Sampling with numerical marginalization\nLet's see how we do trying to marginalize out the per-galaxy parameters by simple monte carlo, as described above,\n$p(\\mathrm{data}|\\vec{\\alpha}) = \\prod_{i=1}^{N_\\mathrm{gal}} \\frac{1}{n_i}\\sum_{k=1}^{n_i} p(a_{ik},b_{ik},\\sigma_{ik}|\\vec{\\alpha})$.\nNote that, because we are taking a sum of probabilities above, we do actually need to work with probabilities, as opposed to log-probabilities. You might reasonably worry about numerical stability here, but in this case a naive implementation seems to be ok. (In general, what we would need to check is whether the summands contributing most of the sum are easily floating-point representable, i.e. not so tiny that they underflow. We could always renormalize the summands to avoid this, since we will just end up taking the log afterwards.)\nImplement the log-likelihood for this approach below.", "def log_likelihood_all_B(mu_a, tau_a, mu_b, tau_b, mu_s, tau_s):\n TBC()\n \nTBC_above()", "This is for free:", "def logpost_vecarg_all_B(pvec):\n params = {name:pvec[i] for i,name in enumerate(param_names_all)}\n return log_posterior_all(log_likelihood_all_B, **params)", "The usual sanity check:", "logpost_vecarg_all_B(guess_all)", "Let's get an idea of how computationally expensive all these sums are by running a very short chain.", "nsteps = 10\n\nnpars = len(guess_all)\nnwalkers = 2*npars\nsampler = emcee.EnsembleSampler(nwalkers, npars, logpost_vecarg_all_B)\nstart = np.array([np.array(guess_all)*(1.0 + 0.01*np.random.randn(npars)) for j in range(nwalkers)])\n%time sampler.run_mcmc(start, nsteps)\nprint('Yay?')", "For me this comes out to about 7 seconds for 10 steps - slower than we'd ideally like, at least without more serious computing resources than my laptop. (If you run longer, though, you should see performance better than in part A.)\nHowever, its worth asking if we can get away with using fewer samples. In principle, we are well justified in doing this, since the effective number of independent samples estimated for some of the individual fits are only $\\sim500$ (when I ran them, anyway).\nNote that the cell below is destructive, in that we can't easily get the original chains back after running it. Keep that in mind if you plan to play around, or improve on the code at the start of the notebook.", "for f in independent_fits:\n f.thin(500)", "With only 500 samples left in the sum for each galaxy, it should be possible to get results that appear basically converged with a couple of minutes runtime (and you should do so). Nevertheless, before turning in the notebook, please reduce the number of steps such that the sampling cell below takes longer than $\\sim30$ seconds to evaluate. (You can leave a comment saying what number of steps you actually used, if you like.)", "%%time\n\nTBC() # nsteps = \n\nnpars = len(guess_all)\nnwalkers = 2*npars\nsampler = emcee.EnsembleSampler(nwalkers, npars, logpost_vecarg_all_B)\nstart = np.array([np.array(guess_all)*(1.0 + 0.01*np.random.randn(npars)) for j in range(nwalkers)])\nsampler.run_mcmc(start, nsteps);\nprint('Yay!')", "Let's see how it does:", "plt.rcParams['figure.figsize'] = (16.0, 3.0*npars)\nfig, ax = plt.subplots(npars, 1);\ncr.plot_traces(sampler.chain[:min(8,nwalkers),:,:npars], ax, labels=param_labels_all);", "The sampler is probably struggling to move around efficiently, but you could imagine running patiently for a while and ending up with something useful. Let's call this approach viable, but not ideal. Still, make sure you have reasonable convergence before continuing.", "TBC()\n# burn = ...\n# maxlag = ...\n\ntmp_samples = [sampler.chain[i,burn:,:] for i in range(nwalkers)]\nprint('R =', cr.GelmanRubinR(tmp_samples))\nprint('neff =', cr.effective_samples(tmp_samples, maxlag=maxlag))\nprint('NB: Since walkers are not independent, these will be optimistic!')", "And now the burning question: how does the posterior compare with the brute force version?", "samples_all_B = sampler.chain[:,burn:,:].reshape(nwalkers*(nsteps-burn), npars)\nplotGTC([samples_all_A[:,:len(param_names_all)], samples_all_B], paramNames=param_labels_all, chainLabels=['emcee/brute', 'emcee/SMC'],\n figureSize=10, customLabelFont={'size':12}, customTickFont={'size':12}, customLegendFont={'size':16});", "Checkpoint: Your posterior is compared with our solution by the cell below. Keep in mind they may have very different numbers of samples - we let ours run for several minutes.", "sol = np.loadtxt('solutions/ceph2.dat.gz')\nplotGTC([sol, samples_all_B], paramNames=param_labels_all, chainLabels=['solution', 'my emcee/SMC'],\n figureSize=8, customLabelFont={'size':12}, customTickFont={'size':12}, customLegendFont={'size':16});", "Comment on things like the efficiency, accuracy, and/or utility of the two approaches in parts A and B.\n\nTBC commentary\n\nAn extra step in this case would be needed to ensure that our results are robust - we would want to make sure that we are using enough samples of the individual fit posteriors that our final constraints are converged in that sense. Realistically, this would probably mean running each of those fits for significantly longer, and then using more samples in this final step, with all the computing overhead that entails.\nJust to mention it, there is a fun pathology that can come up with this technique, though thankfully it doesn't appear to have done so here. In brief, when the intrinsic scatter for some quantity is consistent with zero, the sampler may pick up on the effective scatter which is present because we have only discrete samples as we integrate. In other words, there's a lower limit to how well values of $a$ (for example) for the different galaxies can agree in practice, but this lower limit is an artifact of the discrete sampling. This scatter can in principle provide a very strong, unwanted signal, resulting in a sharp peak in the posterior samples at small values of $\\tau_a$. Yet, this spike would be spurious, as it wouldn't exist if we were doing the integrations in some another way that allowed multiple galaxies to have exactly the same value of $a$. As we can see here, this issue doesn't always arise, but it's something to watch out for.\nFinishing up\nOn the basis of your results, for which of the population parameters is the intrinsic scatter consistent with zero?\n\nTBC\n\nThat's as far as we'll go with this, although there are plenty more questions one could ask. Is the intrinsic scatter, in fact, Gaussian? What about the distribution of scaling relation parameters among galaxies? What is the formal evidence for a model like the one we used, versus identical scaling relations for every galaxy, versus completely unrelated ones? And so on." ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
oliverlee/pydy
examples/npendulum/n-pendulum-control.ipynb
bsd-3-clause
[ "Introduction\nSeveral pieces of the puzzle have come together lately to really demonstrate the power of the scientific python software packages to handle complex dynamic and controls problems (i.e. IPython notebooks, matplotlib animations, python-control, and our software packages: sympy.physics.mechanics and PyDy).\nThis blog post by Wolfram demonstrates Mathematica's ability to symbolically derive the equations of motion for the n-link pendulum and stabilize it with an LQR controller. This blog post inspired us to replicate the example with all free and open source software.\nIn this example problem, we derive the equations of motion of an n-link pendulum on a laterally sliding cart and then develop a controller to stabilize it. Balancing a single inverted pendulum is a classic problem that is often a student's first experience with non-linear dynamics and control. The problem here is extended to a general n-link pendulum in which the equations of motion quickly get messy with greater than 2 links.\nThe diagram below shows the general description of the problem.", "from IPython.display import SVG\nSVG(filename='n-pendulum-with-cart.svg')", "Setup\nThis example depends on the following software:\n\nIPython\nNumPy\nSciPy\nSymPy >= 0.7.6\nmatplotlib\n\nThe easiest way to install the Python packages it is to use conda:\n$ conda install ipython-notebook numpy scipy sympy matplotlib\nTo create animations you need a video encoder like ffmpeg installed. \nEquations of Motion\nWe'll start by generating the equations of motion for the system with SymPy mechanics. The functionality that mechanics provides is much more in depth than Mathematica's functionality. In the Mathematica example, Lagrangian mechanics were implemented manually with Mathematica's symbolic functionality. mechanics provides an assortment of functions and classes to derive the equations of motion for arbitrarily complex (i.e. configuration constraints, nonholonomic motion constraints, etc) multibody systems in a very natural way. First we import the necessary functionality from SymPy.", "from __future__ import division, print_function\n\nimport sympy as sm\nimport sympy.physics.mechanics as me", "We can enable mathematical rendering of the resulting equations in the notebook with the following command.", "me.init_vprinting()", "Now specify the number of links, $n$. I'll start with 5 since the Wolfram folks only showed four.", "n = 5", "mechanics will need the generalized coordinates, generalized speeds, and the input force which are all time dependent variables and the bob masses, link lengths, and acceleration due to gravity which are all constants. Time, $t$, is also made available because we will need to differentiate with respect to time.", "q = me.dynamicsymbols('q:{}'.format(n + 1)) # Generalized coordinates\nu = me.dynamicsymbols('u:{}'.format(n + 1)) # Generalized speeds\nf = me.dynamicsymbols('f') # Force applied to the cart\n \nm = sm.symbols('m:{}'.format(n + 1)) # Mass of each bob\nl = sm.symbols('l:{}'.format(n)) # Length of each link\ng, t = sm.symbols('g t') # Gravity and time", "Now we can create and inertial reference frame $I$ and define the point, $O$, as the origin.", "I = me.ReferenceFrame('I') # Inertial reference frame\nO = me.Point('O') # Origin point\nO.set_vel(I, 0) # Origin's velocity is zero", "Secondly, we define the define the first point of the pendulum as a particle which has mass. This point can only move laterally and represents the motion of the \"cart\".", "P0 = me.Point('P0') # Hinge point of top link\nP0.set_pos(O, q[0] * I.x) # Set the position of P0 \nP0.set_vel(I, u[0] * I.x) # Set the velocity of P0\nPa0 = me.Particle('Pa0', P0, m[0]) # Define a particle at P0", "Now we can define the $n$ reference frames, particles, gravitational forces, and kinematical differential equations for each of the pendulum links. This is easily done with a loop.", "frames = [I] # List to hold the n + 1 frames\npoints = [P0] # List to hold the n + 1 points\nparticles = [Pa0] # List to hold the n + 1 particles\nforces = [(P0, f * I.x - m[0] * g * I.y)] # List to hold the n + 1 applied forces, including the input force, f\nkindiffs = [q[0].diff(t) - u[0]] # List to hold kinematic ODE's\n\nfor i in range(n):\n Bi = I.orientnew('B' + str(i), 'Axis', [q[i + 1], I.z]) # Create a new frame\n Bi.set_ang_vel(I, u[i + 1] * I.z) # Set angular velocity\n frames.append(Bi) # Add it to the frames list\n\n Pi = points[-1].locatenew('P' + str(i + 1), l[i] * Bi.x) # Create a new point\n Pi.v2pt_theory(points[-1], I, Bi) # Set the velocity\n points.append(Pi) # Add it to the points list\n \n Pai = me.Particle('Pa' + str(i + 1), Pi, m[i + 1]) # Create a new particle\n particles.append(Pai) # Add it to the particles list\n\n forces.append((Pi, -m[i + 1] * g * I.y)) # Set the force applied at the point\n \n kindiffs.append(q[i + 1].diff(t) - u[i + 1]) # Define the kinematic ODE: dq_i / dt - u_i = 0", "With all of the necessary point velocities and particle masses defined, the KanesMethod class can be used to derive the equations of motion of the system automatically.", "kane = me.KanesMethod(I, q_ind=q, u_ind=u, kd_eqs=kindiffs) # Initialize the object\nfr, frstar = kane.kanes_equations(forces, particles) # Generate EoM's fr + frstar = 0", "The equations of motion are quite long as can been seen below. This is the general nature of most non-simple mutlibody problems. That is why a SymPy is so useful; no more mistakes in algebra, differentiation, or copying hand written equations. Note that trigsimp can take quite a while to complete for extremely large expressions. Below we print $\\tilde{M}$ and $\\tilde{f}$ from $\\tilde{M}\\dot{u}=\\tilde{f}$ to show the size of the expressions.", "sm.trigsimp(kane.mass_matrix)", "$\\tilde{M}$ is a function of the constant parameters and the configuration.", "me.find_dynamicsymbols(kane.mass_matrix)\n\nsm.trigsimp(kane.forcing)", "$\\tilde{f}$ is a function of the constant parameters, configuration, speeds, and the applied force.", "me.find_dynamicsymbols(kane.forcing)", "Simulation\nNow that the symbolic equations of motion are available we can simulate the pendulum's motion. We will need some more SymPy functionality and several NumPy functions, and most importantly the integration function from SciPy, odeint.", "import numpy as np\nfrom numpy.linalg import solve\nfrom scipy.integrate import odeint", "First, define some numeric values for all of the constant parameters in the problem.", "arm_length = 1. / n # The maximum length of the pendulum is 1 meter\nbob_mass = 0.01 / n # The maximum mass of the bobs is 10 grams\nparameters = [g, m[0]] # Parameter definitions starting with gravity and the first bob\nparameter_vals = [9.81, 0.01 / n] # Numerical values for the first two\nfor i in range(n): # Then each mass and length\n parameters += [l[i], m[i + 1]] \n parameter_vals += [arm_length, bob_mass]", "Mathematica has a really nice NDSolve function for quickly integrating their symbolic differential equations. We make use of SymPy's lambdify function to do something similar, i.e. to create functions that will evaluate the \"full\" mass matrix, $M$, and \"full\" forcing vector, $f$ from $M\\dot{x} = f(x, r, t)$ as a NumPy function.", "dynamic = q + u # Make a list of the states\ndynamic.append(f) # Add the input force\n\nM_func = sm.lambdify(dynamic + parameters, kane.mass_matrix_full) # Create a callable function to evaluate the mass matrix \nf_func = sm.lambdify(dynamic + parameters, kane.forcing_full) # Create a callable function to evaluate the forcing vector ", "To integrate the ODE's we need to define a function that returns the derivatives of the states given the current state and time.", "def right_hand_side(x, t, args):\n \"\"\"Returns the derivatives of the states.\n\n Parameters\n ----------\n x : ndarray, shape(2 * (n + 1))\n The current state vector.\n t : float\n The current time.\n args : ndarray\n The constants.\n\n Returns\n -------\n dx : ndarray, shape(2 * (n + 1))\n The derivative of the state.\n \n \"\"\"\n r = 0.0 # The input force is always zero \n arguments = np.hstack((x, r, args)) # States, input, and parameters\n dx = np.array(solve(M_func(*arguments), # Solving for the derivatives\n f_func(*arguments))).T[0]\n \n return dx", "Now that we have the right hand side function, the initial conditions are set such that the pendulum is in the vertical equilibrium and a slight initial rate is set for each speed to ensure the pendulum falls. The equations can then be integrated with SciPy's odeint function given a time series.", "x0 = np.hstack((0.0, # q0\n np.pi / 2 * np.ones(len(q) - 1), # q1...qn+1\n 1e-3 * np.ones(len(u)))) # u0...un+1\n\nt = np.linspace(0.0, 10.0, num=500) # Time vector\n\nx = odeint(right_hand_side, x0, t, args=(parameter_vals,)) # Numerical integration", "Plotting\nThe results of the simulation can be plotted with matplotlib. First, load the plotting functionality.", "import matplotlib.pyplot as plt\n\n%matplotlib inline\n\nfrom IPython.core.pylabtools import figsize\nfigsize(8.0, 6.0)", "The coordinate trajectories are plotted below.", "lines = plt.plot(t, x[:, :x.shape[1] // 2])\nlab = plt.xlabel('Time [sec]')\nleg = plt.legend(dynamic[:x.shape[1] // 2])", "And the generalized speed trajectories.", "lines = plt.plot(t, x[:, x.shape[1] // 2:])\nlab = plt.xlabel('Time [sec]')\nleg = plt.legend(dynamic[x.shape[1] // 2:])", "Animation\nmatplotlib now includes very nice animation functions for animating matplotlib plots. First we import the necessary functions for creating the animation.", "from matplotlib import animation\nfrom matplotlib.patches import Rectangle", "The following function was modeled from Jake Vanderplas's post on matplotlib animations. The default animation writer is used (typically ffmpeg), you can change it by adding writer argument to anim.save call.", "def animate_pendulum(t, states, length, filename=None):\n \"\"\"Animates the n-pendulum and optionally saves it to file.\n\n Parameters\n ----------\n t : ndarray, shape(m)\n Time array.\n states: ndarray, shape(m,p)\n State time history.\n length: float\n The length of the pendulum links.\n filename: string or None, optional\n If true a movie file will be saved of the animation. This may take some time.\n\n Returns\n -------\n fig : matplotlib.Figure\n The figure.\n anim : matplotlib.FuncAnimation\n The animation.\n\n \"\"\"\n # the number of pendulum bobs\n numpoints = states.shape[1] // 2\n\n # first set up the figure, the axis, and the plot elements we want to animate\n fig = plt.figure()\n \n # some dimesions\n cart_width = 0.4\n cart_height = 0.2\n \n # set the limits based on the motion\n xmin = np.around(states[:, 0].min() - cart_width / 2.0, 1)\n xmax = np.around(states[:, 0].max() + cart_width / 2.0, 1)\n \n # create the axes\n ax = plt.axes(xlim=(xmin, xmax), ylim=(-1.1, 1.1), aspect='equal')\n \n # display the current time\n time_text = ax.text(0.04, 0.9, '', transform=ax.transAxes)\n \n # create a rectangular cart\n rect = Rectangle([states[0, 0] - cart_width / 2.0, -cart_height / 2],\n cart_width, cart_height, fill=True, color='red',\n ec='black')\n ax.add_patch(rect)\n \n # blank line for the pendulum\n line, = ax.plot([], [], lw=2, marker='o', markersize=6)\n\n # initialization function: plot the background of each frame\n def init():\n time_text.set_text('')\n rect.set_xy((0.0, 0.0))\n line.set_data([], [])\n return time_text, rect, line,\n\n # animation function: update the objects\n def animate(i):\n time_text.set_text('time = {:2.2f}'.format(t[i]))\n rect.set_xy((states[i, 0] - cart_width / 2.0, -cart_height / 2))\n x = np.hstack((states[i, 0], np.zeros((numpoints - 1))))\n y = np.zeros((numpoints))\n for j in np.arange(1, numpoints):\n x[j] = x[j - 1] + length * np.cos(states[i, j])\n y[j] = y[j - 1] + length * np.sin(states[i, j])\n line.set_data(x, y)\n return time_text, rect, line,\n\n # call the animator function\n anim = animation.FuncAnimation(fig, animate, frames=len(t), init_func=init,\n interval=t[-1] / len(t) * 1000, blit=True, repeat=False)\n \n # save the animation if a filename is given\n if filename is not None:\n anim.save(filename, fps=30, codec='libx264')", "Now we can create the animation of the pendulum. This animation will show the open loop dynamics.", "animate_pendulum(t, x, arm_length, filename=\"open-loop.mp4\")\n\nfrom IPython.display import HTML\nhtml = \\\n\"\"\"\n<video width=\"640\" height=\"480\" controls>\n <source src=\"open-loop.mp4\" type=\"video/mp4\">\nYour browser does not support the video tag, check out the YouTube version instead: http://youtu.be/Nj3_npq7MZI.\n</video>\n\"\"\"\nHTML(html)", "Controller Design\nThe n-link pendulum can be balanced such that all of the links are inverted above the cart by applying the correct lateral force to the cart. We can design a full state feedback controller based from a linear model of the pendulum about its upright equilibrium point. We'll start by specifying the equilibrium point and parameters in dictionaries. We make sure to use SymPy types in the equilibrium point to ensure proper cancelations in the linearization.", "equilibrium_point = [sm.S(0)] + [sm.pi / 2] * (len(q) - 1) + [sm.S(0)] * len(u) \nequilibrium_dict = dict(zip(q + u, equilibrium_point))\nequilibrium_dict", "The KanesMethod class has method that linearizes the forcing vector about generic state and input perturbation vectors. The equilibrium point and numerical constants can then be substituted in to give the linear system in this form: $M\\dot{x}=F_Ax+F_Br$. The state and input matrices, $A$ and $B$, can then be computed by left side multiplication by the inverse of the mass matrix: $A=M^{-1}F_A$ and $B=M^{-1}F_B$.", "M, F_A, F_B, r = kane.linearize(new_method=True, op_point=equilibrium_dict)\n\nsm.simplify(M)\n\nsm.simplify(F_A)\n\nsm.simplify(F_B)", "Now the numerical $A$ and $B$ matrices can be formed. First substitute numerical parameter values into $M$, $F_A$, and $F_B$.", "parameter_dict = dict(zip(parameters, parameter_vals))\nparameter_dict\n\nM_num = sm.matrix2numpy(M.subs(parameter_dict), dtype=float)\nF_A_num = sm.matrix2numpy(F_A.subs(parameter_dict), dtype=float)\nF_B_num = sm.matrix2numpy(F_B.subs(parameter_dict), dtype=float)\n\nA = np.linalg.solve(M_num, F_A_num)\nB = np.linalg.solve(M_num ,F_B_num)\n\nprint(A)\n\nprint(B)", "Also convert equilibrium_point to a numeric array:", "equilibrium_point = np.asarray([x.evalf() for x in equilibrium_point], dtype=float)", "Now that we have a linear system, the SciPy package can be used to design an optimal controller for the system.", "from numpy.linalg import matrix_rank\nfrom scipy.linalg import solve_continuous_are", "First we can check to see if the system is, in fact, controllable. The rank of the controllability matrix must be equal to the number of rows in $A$, but the matrix_rank algorithm is numerically ill conditioned and for certain values of $n$ this will fail, as seen below for $n=5$. Nevertheless, the system is controllable, no matter the number of links.", "def controllable(a, b):\n \"\"\"Returns true if the system is controllable and false if not.\n Parameters\n ----------\n a : array_like, shape(n,n)\n The state matrix.\n b : array_like, shape(n,r)\n The input matrix.\n Returns\n -------\n controllable : boolean\n \"\"\"\n a = np.matrix(a)\n b = np.matrix(b)\n n = a.shape[0]\n controllability_matrix = []\n for i in range(n):\n controllability_matrix.append(a ** i * b)\n controllability_matrix = np.hstack(controllability_matrix)\n\n return np.linalg.matrix_rank(controllability_matrix) == n\n\ncontrollable(A, B)", "So now we can compute the optimal gains with a linear quadratic regulator. I chose identity matrices for the weightings for simplicity.", "Q = np.eye(A.shape[0])\nR = np.eye(B.shape[1])\nS = solve_continuous_are(A, B, Q, R);\nK = np.dot(np.dot(np.linalg.inv(R), B.T), S)\nK", "The gains can now be used to define the required input during simulation to stabilize the system. The input $r$ is simply the gain vector multiplied by the error in the state vector from the equilibrium point, $r(t)=K(x_{eq} - x(t))$.", "def right_hand_side(x, t, args):\n \"\"\"Returns the derivatives of the states.\n\n Parameters\n ----------\n x : ndarray, shape(2 * (n + 1))\n The current state vector.\n t : float\n The current time.\n args : ndarray\n The constants.\n\n Returns\n -------\n dx : ndarray, shape(2 * (n + 1))\n The derivative of the state.\n \n \"\"\"\n r = np.dot(K, equilibrium_point - x) # The controller \n arguments = np.hstack((x, r, args)) # States, input, and parameters\n dx = np.array(solve(M_func(*arguments), # Solving for the derivatives\n f_func(*arguments))).T[0]\n \n return dx", "Now we can simulate and animate the system to see if the controller works.", "x0 = np.hstack((0,\n np.pi / 2 * np.ones(len(q) - 1),\n 1 * np.ones(len(u))))\nt = np.linspace(0.0, 10.0, num=500)\nx = odeint(right_hand_side, x0, t, args=(parameter_vals,))", "The plots show that we seem to have a stable system.", "lines = plt.plot(t, x[:, :x.shape[1] // 2])\nlab = plt.xlabel('Time [sec]')\nleg = plt.legend(dynamic[:x.shape[1] // 2])\n\nlines = plt.plot(t, x[:, x.shape[1] // 2:])\nlab = plt.xlabel('Time [sec]')\nleg = plt.legend(dynamic[x.shape[1] // 2:])\n\nanimate_pendulum(t, x, arm_length, filename=\"closed-loop.mp4\")\n\nfrom IPython.display import HTML\nhtml = \\\n\"\"\"\n<video width=\"640\" height=\"480\" controls>\n <source src=\"closed-loop.mp4\" type=\"video/mp4\">\nYour browser does not support the video tag, check out the YouTube version instead: http://youtu.be/SpgBHqW9om0\n</video>\n\"\"\"\nHTML(html)", "The video clearly shows that the controller can balance all $n$ of the pendulum links. The weightings in the lqr design can be tweaked to give different performance if needed.\nThis example shows that the free and open source scientific Python tools for dynamics are easily comparable in ability and quality to a commercial package such as Mathematica. \nThe IPython notebook for this example can be downloaded from https://github.com/pydy/pydy/tree/master/examples/npendulum. You can try out different $n$ values. I've gotten the equations of motion to compute for an open loop simulation of 10 links. My computer ran out of memory when I tried to compute for $n=50$. The controller weightings and initial conditions will probably have to be adjusted for better performance for $n>5$, but it should work.", "# Install with pip install version_information\n%load_ext version_information\n%version_information numpy, sympy, scipy, matplotlib, control" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
zzsza/Datascience_School
10. 기초 확률론3 - 확률 분포 모형/13. 다변수 가우시안 정규 분포.ipynb
mit
[ "다변수 가우시안 정규 분포\n다변수 가우시안 정규 분포 혹은 간단히 다변수 정규 분포(MVN: Multivariate Normal)는 복수의 확률 변수를 모형화하는데 가장 많이 사용되는 분포이다. \n$D$차원 다변수 정규 분포의 확률 밀도 함수는 \n평균 벡터 $\\mu$ 와 공분산 행렬 $\\Sigma$ 라는 두 개의 모수를 가지며 다음과 같은 수식으로 정의된다. 이 때 공분산 행렬은 역행렬이 존재하는 대칭 행렬이어야 한다.\n$$ \\mathcal{N}(x ; \\mu, \\Sigma) = \\dfrac{1}{(2\\pi)^{D/2} |\\Sigma| ^{1/2}} \\exp \\left( -\\dfrac{1}{2} (x-\\mu)^T \\Sigma^{-1} (x-\\mu) \\right) $$\n이 식에서 각 기호의 의미는 다음과 같다.\n\n$x \\in \\mathbf{R}^D $ 확률 변수 벡터\n$\\mu \\in \\mathbf{R}^D $ 평균 벡터\n$\\Sigma \\in \\mathbf{R}^{D\\times D} $ 공분산 벡터\n$\\Sigma^{-1} \\in \\mathbf{R}^{D\\times D} $ 공분산 벡터의 역행렬\n\n공분산 벡터의 역행렬 $\\Sigma^{-1}$는 precision matrix 혹은 concentration matrix 라고도 한다.\nSciPy의 다변수 정규 분포 지원\nSciPy의 stats 서브패키지에는 다변수 정규 분포를 위한 multivariate_normal 클래스가 있다. mean 인수로 평균 벡터를, cov 인수로 공분산 행렬을 받는다.\n다변수 정규 분포의 예\n2차원($D=2$) 다변수 정규 분포의 예를 몇가지 살펴보자. \n우선 2차원이므로 확률 변수 벡터는 \n$$ \nx = \\begin{bmatrix}x_1 \\ x_2 \\end{bmatrix}\n$$\n이다.\n경우 1\n만약\n$$ \n\\mu = \\begin{bmatrix}2 \\ 3 \\end{bmatrix}. \\;\\;\\;\n\\Sigma = \\begin{bmatrix}1 & 0 \\ 0 & 1 \\end{bmatrix} \n$$\n이면 \n$$ \n; \\Sigma; ^{1/2} = 1. \\;\\;\\;\n\\Sigma^{-1} = \\begin{bmatrix}1 & 0 \\ 0 & 1 \\end{bmatrix}\n$$\n$$ \n(x-\\mu)^T \\Sigma^{-1} (x-\\mu) =\n\\begin{bmatrix}x_1 - 2 & x_2 - 3 \\end{bmatrix}\n\\begin{bmatrix}1 & 0 \\ 0 & 1 \\end{bmatrix} \n\\begin{bmatrix}x_1 - 2 \\ x_2 - 3 \\end{bmatrix}\n=\n(x_1 - 2)^2 + (x_2 - 3)^2\n$$\n$$\n\\mathcal{N}(x_1, x_2) = \\dfrac{1}{2\\pi}\n\\exp \\left( -\\dfrac{1}{2} \\left( (x_1 - 2)^2 + (x_2 - 3)^2 \\right) \\right)\n$$\n이 확률 밀도 함수의 모양은 다음과 같다.", "mu = [2, 3]\ncov = [[1, 0], [0, 1]]\nrv = sp.stats.multivariate_normal(mu, cov)\nxx = np.linspace(0, 4, 120)\nyy = np.linspace(1, 5, 150)\nXX, YY = np.meshgrid(xx, yy)\nplt.grid(False)\nplt.contourf(XX, YY, rv.pdf(np.dstack([XX, YY])))\nplt.axis(\"equal\")\nplt.show()", "경우 2\n만약\n$$ \n\\mu = \\begin{bmatrix}2 \\ 3 \\end{bmatrix}. \\;\\;\\;\n\\Sigma = \\begin{bmatrix}2 & 3 \\ 3 & 7 \\end{bmatrix} \n$$\n이면 \n$$ \n; \\Sigma ; = 5,\\;\\;\\;\n\\Sigma^{-1} = \\begin{bmatrix}1.4 & -0.6 \\ -0.6 & 0.4 \\end{bmatrix}\n$$\n$$ \n(x-\\mu)^T \\Sigma^{-1} (x-\\mu) =\n\\begin{bmatrix}x_1 - 2 & x_2 - 3 \\end{bmatrix}\n\\begin{bmatrix}1.4 & -0.6 \\ -0.6 & 0.4\\end{bmatrix}\n\\begin{bmatrix}x_1 - 2 \\ x_2 - 3 \\end{bmatrix}\n=\n\\dfrac{1}{10}\\left(14(x_1 - 2)^2 - 12(x_1 - 2)(x_2 - 3) + 4(x_2 - 3)^2\\right)\n$$\n$$\n\\mathcal{N}(x_1, x_2) = \\dfrac{1}{20\\pi}\n\\exp \\left( -\\dfrac{1}{10}\\left(7(x_1 - 2)^2 - 6(x_1 - 2)(x_2 - 3) + 2(x_2 - 3)^2\\right) \\right)\n$$\n이 확률 밀도 함수의 모양은 다음과 같다.", "mu = [2, 3]\ncov = [[2, 3],[3, 7]]\nrv = sp.stats.multivariate_normal(mu, cov)\nxx = np.linspace(0, 4, 120)\nyy = np.linspace(1, 5, 150)\nXX, YY = np.meshgrid(xx, yy)\nplt.grid(False)\nplt.contourf(XX, YY, rv.pdf(np.dstack([XX, YY])))\nplt.axis(\"equal\")\nplt.show()", "다변수 정규 분포의 최적화\n다변수 정규 분포를 모수에 대해 최적화하는 문제를 풀어보자. 어떤 함수에 로그를 취해도 최고점이나 최저점의 위치는 변하지 않기 때문에 문제를 쉽게 하기 위해 로그를 취한 분포함수를 최적화를 한다. \n$$ \\mathcal{N}(x ; \\mu, \\Sigma) = \\dfrac{1}{(2\\pi)^{D/2} |\\Sigma| ^{1/2}} \\exp \\left( -\\dfrac{1}{2} (x-\\mu)^T \\Sigma^{-1} (x-\\mu) \\right) $$\n$$ \\log \\mathcal{N}(x; \\mu, \\Sigma) = -\\dfrac{1}{2}\\log{2\\pi} -\\dfrac{1}{2} \\log|\\Sigma| - \\dfrac{1}{2} (x-\\mu)^T \\Sigma^{-1} (x-\\mu) $$\n여기에서 기호를 단순하게 하기 위해 precision matrix $\\Sigma ^{-1} $를 $ \\Lambda$ 로 표시하고 미분할 때 어차피 없어지는 상수항을 제외한 부분을 함수 $f$로 나타내자.\n$$ f = \\log| \\Lambda| - (x-\\mu)^T \\Lambda (x-\\mu) $$\n스칼라 함수 $f$ 를 평균 벡터 $\\mu$로 미분하면 다음과 같다.\n$$\n\\begin{eqnarray}\n\\dfrac{\\partial f}{\\partial \\mu} \n&=& \\dfrac{\\partial}{\\partial \\mu} \\big( \\log | \\Lambda| \\big) - \\dfrac{\\partial}{\\partial \\mu} \\big( (x-\\mu)^T \\Lambda (x-\\mu) \\big) \\\n&=& -(\\Lambda + \\Lambda^T)(x-\\mu) \\\n&=& 0\n\\end{eqnarray}\n$$\n$$\n\\therefore \\;\\; \\mu = x\n$$\n이번에는 두 개의 다변수 정규 분포 확률 변수 $X$, $Y$가 있을 경우를 생각해 보자. 두 확률 변수가 독립이며 모수 $\\mu$, $\\Lambda$가 같다고 가정하자.\n이 때 로그 정규 분포는 다음과 같다.\n$$ \n\\begin{eqnarray}\n\\dfrac{1}{2}f\n&=& \\log \\left( \\mathcal{N}(x; \\mu, \\Sigma)\\mathcal{N}(y; \\mu, \\Sigma)\\right) \\\n&=& \\log \\mathcal{N}(x; \\mu, \\Sigma) + \\log \\mathcal{N}(y; \\mu, \\Sigma) \\\n&=& - \\log|\\Sigma| - \\dfrac{1}{2} (x-\\mu)^T \\Lambda (x-\\mu) - \\dfrac{1}{2} (y-\\mu)^T \\Lambda (y-\\mu)\n\\end{eqnarray}\n$$\n이 함수 $f$를 평균 벡터 $\\mu$와 precision matrix $\\Lambda$ 로 미분하면 다음과 같다.\n$$\n\\begin{eqnarray}\n\\dfrac{\\partial f}{\\partial \\mu} \n&=& \\dfrac{\\partial}{\\partial \\mu} \\big( 2\\log | \\Lambda | \\big) \n- \\dfrac{\\partial}{\\partial \\mu} \\big( (x-\\mu)^T \\Lambda (x-\\mu) \\big) \n- \\dfrac{\\partial}{\\partial \\mu} \\big( (y-\\mu)^T \\Lambda (y-\\mu) \\big) \\\n&=& - (\\Lambda + \\Lambda^T)(x-\\mu) - (\\Lambda + \\Lambda^T)(y-\\mu) \\\n&=& 0\n\\end{eqnarray}\n$$\n$$\n\\therefore \\;\\; \\mu = \\dfrac{x + y}{2}\n$$\n$$\n\\begin{eqnarray}\n\\dfrac{\\partial f}{\\partial \\Lambda} \n&=& \\dfrac{\\partial}{\\partial \\Lambda} \\big( 2\\log | \\Lambda| \\big) \n- \\dfrac{\\partial}{\\partial \\Lambda} \\big( (x-\\mu)^T \\Lambda (x-\\mu) \\big) \n- \\dfrac{\\partial}{\\partial \\Lambda} \\big( (y-\\mu)^T \\Lambda (y-\\mu) \\big) \\\n&=& (2\\Lambda^{-1})^T \n- \\dfrac{\\partial}{\\partial \\Lambda} \\left( \\text{tr} \\left( (x-\\mu)(x-\\mu)^T \\Lambda \\right) \\right) \n- \\dfrac{\\partial}{\\partial \\Lambda} \\left( \\text{tr} \\left( (y-\\mu)(y-\\mu)^T \\Lambda \\right) \\right) \\\n&=& 2\\Sigma^T - ((x-\\mu)(x-\\mu)^T)^T - ((y-\\mu)(y-\\mu)^T)^T \\\n&=& 0\n\\end{eqnarray}\n$$\n$$\n\\therefore \\;\\; \\Sigma = \\dfrac{1}{2} \\left( (x-\\mu)(x-\\mu)^T + (y-\\mu)(y-\\mu)^T \\right)\n$$" ]
[ "markdown", "code", "markdown", "code", "markdown" ]
saashimi/code_guild
wk9/notebooks/.ipynb_checkpoints/ch.1-getting-started-with-django-checkpoint.ipynb
mit
[ "Wk9.0\nCh. 1 Getting Django Set Up Using a Functional Test\nObey the testing goat! Do nothing until you have a test\nBefore even installing anything, we'll write a test.\nWriting our first test", "# Make a directory called examples\n#!mkdir ../examples\n%cd ../examples\n!ls\n\n\n# Write functional_tests.py\n#%%writefile functional_tests.py\n\nfrom selenium import webdriver\n\nbrowser = webdriver.Firefox()\nbrowser.get('http://localhost:8000')\n\nassert 'Django' in browser.title", "Installing django and selenium", "# Create a virtual env to load with selenium and django\n#!conda create -yn django_env django python=3 # y flag automatically selects yes to install\n#!source activate django_eng # activate virtual environment\n#!pip install --upgrade selenium # install selenium.", "Checking that our test correctly fails", "# Try running our tests. We're expecting an assertion error here.\n%run functional_tests.py", "Fixing our failure", "# Use django to create a project called 'superlists'\n#django-admin.py startproject superlists\n\n!tree ../examples/", "Let's fire up our new project on a django server", "!cd superlists/ && python3 manage.py runserver", "Do our tests pass now?", "%run functional_tests.py ", "Now that our test passed, let's turn this into a git repo.", "# First, move our tests into the main project dir.\n#!mv functional_tests.py superlists/\n# \n# Change directories and initialize our superlist into a new git repo\n#%cd superlists/\n#!git init .\n\n%ls\n\n# Don't add the database to git.\n#! echo \"db.sqlite3\" >> .gitignore # >> means concatenate to end of file.\n\n# Don't add .pyc files\n#!echo \"*.pyc\" >> .gitignore\n\n# Add everything else.\n#!git add .\n#!git status\n#!git commit -m \"Initial commit\"" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
hbwzhsh/pyDataScienceToolkits_Base
Visualization/.ipynb_checkpoints/(2)interesting_plot-checkpoint.ipynb
mit
[ "内容索引\n3hao\n4hao\n\n动画 --- 动画模块animation、FuncAnimation函数\ndsafa\n三维绘图 --- Axes3D对象、plot_surface函数\n等高线图 --- contour函数、contourf函数\n\ncpp\nint main()\n{\n return 0;\n}", "%matplotlib inline\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.animation as animation", "1. 动画\nMatplotlib提供了动画功能,有专门的动画模块。我们需要定义一个回调函数,用于定期更新屏幕上的内容。", "fig = plt.figure()\nax = fig.add_subplot(111)\nN = 10\nx = np.random.rand(N)\ny = np.random.rand(N)\nz = np.random.rand(N)\n\ncircles, triangles, dots = ax.plot(x, 'ro', y, 'g^', z, 'b.')\nax.set_ylim(0,1)\nplt.axis('off')\n\ndef update(data):\n circles.set_ydata(data[0])\n triangles.set_ydata(data[1])\n return circles, triangles\n\ndef generate():\n while True:\n yield np.random.rand(2, N)\n \nanim = animation.FuncAnimation(fig, update, generate, interval=500)\nplt.show()", "2. 三维绘图\n对于3D作图,我们需要一个和三维投影相关的Axes3D对象。\n绘制简单的三维函数z = x^2 = y^2", "from mpl_toolkits.mplot3d import axes3d\nfrom matplotlib import cm\n\nfig = plt.figure()\nax = fig.add_subplot(111, projection='3d')\n\n# 使用meshgrid函数创建二维的坐标网络\nu = np.linspace(-1,1,100)\nx, y = np.meshgrid(u, u)\nz = x**2 + y**2\n# 指定行和列的步幅,以及绘制曲面所用的色彩表(color map)\nax.plot_surface(x, y, z, rstride=4, cstride=4, cmap=cm.rainbow_r)\nplt.show()", "3. 绘制等高线图\nMatplotlib中的等高线3D绘图有两种风格,填充和非填充的。我们可以使用contour函数创建一般的等高线图。对于色彩填充的等高线图,可以使用contourf绘制。", "fig = plt.figure(figsize=(3,5))\nax1 = fig.add_subplot(211)\nax2 = fig.add_subplot(212)\n\nu = np.linspace(-1, 1, 100)\n\nx, y = np.meshgrid(u, u)\nz = x**2 + y**2\nax1.contour(x, y, z)\nax2.contourf(x, y, z)\n\nplt.show()", "4. 结合三维绘图和等高线图", "fig = plt.figure()\nax = fig.gca(projection='3d')\nX, Y, Z = axes3d.get_test_data(0.05)\nax.plot_surface(X, Y, Z, rstride=8, cstride=8, alpha=0.3, cmap=cm.winter_r)\n# 绘制等高线\ncset = ax.contourf(X, Y, Z, zdir='z', offset=-100, cmap=cm.coolwarm)\ncset = ax.contourf(X, Y, Z, zdir='x', offset=-40, cmap=cm.coolwarm)\ncset = ax.contourf(X, Y, Z, zdir='y', offset=40, cmap=cm.coolwarm)\n\nax.set_xlabel('X')\nax.set_xlim(-40, 40)\nax.set_ylabel('Y')\nax.set_ylim(-40, 40)\nax.set_zlabel('Z')\nax.set_zlim(-100, 100)\n\nplt.show()" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
gengstrand/clojure-news-feed
client/ml/dt/accuracy.ipynb
epl-1.0
[ "Comparing Accuracy: scikit-learn vs tensorflow\nIn this notebook, we train then test the model in a 60 / 40 split for the decision tree algo on both scikit-learn and tensorflow. First, we start with scikit-learn where we predict cloud vendor based on throughput.", "import graphviz\nimport pandas\nfrom sklearn import tree\nfrom sklearn.model_selection import train_test_split\n\nclf = tree.DecisionTreeClassifier()\ninput = pandas.read_csv(\"/home/glenn/git/clojure-news-feed/client/ml/etl/throughput.csv\")\ndata = input[input.columns[6:9]]\ntarget = input['cloud']\nX_train, X_test, y_train, y_test = train_test_split(data, target, test_size=0.4, random_state=0)\nclf = clf.fit(X_train, y_train)\nclf.score(X_test, y_test)", "Next, we evaluate scikit-learn accuracy where we predict feed implementation based on latency.", "import graphviz\nimport pandas\nfrom sklearn import tree\nfrom sklearn.model_selection import train_test_split\n\nclf = tree.DecisionTreeClassifier()\ninput = pandas.read_csv(\"/home/glenn/git/clojure-news-feed/client/ml/etl/latency.csv\")\ndata = input[input.columns[7:9]]\ndata['cloud'] = input['cloud'].apply(lambda x: 1.0 if x == 'GKE' else 0.0)\ntarget = input['feed']\nX_train, X_test, y_train, y_test = train_test_split(data, target, test_size=0.4, random_state=0)\nclf = clf.fit(X_train, y_train)\nclf.score(X_test, y_test)", "As you can see, scikit-learn has a 99% accuracy rate. We now do the same thing with tensorflow.", "import tensorflow as tf\nimport numpy as np\nimport pandas\nfrom tensorflow.python.ops import parsing_ops\nfrom tensorflow.contrib.tensor_forest.python import tensor_forest\nfrom tensorflow.contrib.learn.python.learn.utils import input_fn_utils\nfrom sklearn.model_selection import train_test_split\n\ninput = pandas.read_csv(\"/home/glenn/git/clojure-news-feed/client/ml/etl/latency.csv\")\ndata = input[input.columns[7:9]]\ndata['cloud'] = input['cloud'].apply(lambda x: 1.0 if x == 'GKE' else 0.0)\nX_train, X_test, y_train, y_test = train_test_split(data, input['feed'], test_size=0.4, random_state=0)\nX_train_np = np.array(X_train, dtype=np.float32)\ny_train_np = np.array(y_train, dtype=np.int32)\nX_test_np = np.array(X_test, dtype=np.float32)\ny_test_np = np.array(y_test, dtype=np.int32)\nhparams = tensor_forest.ForestHParams(num_classes=7,\n num_features=3,\n num_trees=1,\n regression=False,\n max_nodes=500).fill()\nclassifier = tf.contrib.tensor_forest.client.random_forest.TensorForestEstimator(hparams)\nc = classifier.fit(x=X_train_np, y=y_train_np)\nc.evaluate(x=X_test_np, y=y_test_np)\n", "Looks like tensorflow has a 98% accuracy rate which is 1% less than scikit-learn algo. Let us use Tensorflow to look at the accuracy of predicting cloud vendor based on throughput.", "import tensorflow as tf\nimport numpy as np\nimport pandas\nfrom tensorflow.python.ops import parsing_ops\nfrom tensorflow.contrib.tensor_forest.python import tensor_forest\nfrom tensorflow.contrib.learn.python.learn.utils import input_fn_utils\nfrom sklearn.model_selection import train_test_split\n\ninput = pandas.read_csv(\"/home/glenn/git/clojure-news-feed/client/ml/etl/throughput.csv\")\ndata = input[input.columns[6:9]]\ntarget = input['cloud'].apply(lambda x: 1.0 if x == 'GKE' else 0.0)\nX_train, X_test, y_train, y_test = train_test_split(data, target, test_size=0.4, random_state=0)\nX_train_np = np.array(X_train, dtype=np.float32)\ny_train_np = np.array(y_train, dtype=np.int32)\nX_test_np = np.array(X_test, dtype=np.float32)\ny_test_np = np.array(y_test, dtype=np.int32)\nhparams = tensor_forest.ForestHParams(num_classes=3,\n num_features=3,\n num_trees=1,\n regression=False,\n max_nodes=500).fill()\nclassifier = tf.contrib.tensor_forest.client.random_forest.TensorForestEstimator(hparams)\nc = classifier.fit(x=X_train_np, y=y_train_np)\nc.evaluate(x=X_test_np, y=y_test_np)", "Looks like Tensorflow is 3% less accurate than Scikit-learn when it comes to predicting cloud vendor based on throughput." ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
samstav/scipy_2015_sklearn_tutorial
notebooks/03.2 Methods - Unsupervised Preprocessing.ipynb
cc0-1.0
[ "Example from Image Processing", "%matplotlib inline\nimport matplotlib.pyplot as plt", "Using PCA to Plot Datasets\nPCA is a useful preprocessing technique for both visualizing data in 2 or 3 dimensions, and for improving the performance of downstream algorithms such as classifiers. We will see more details about using PCA as part of a machine learning pipeline in the net section, but here we will explore the intuition behind what PCA does, and why it is useful for certain tasks.\nThe goal of PCA is to find the dimensions of maximum variation in the data, and project onto them. This is helpful for data that is stretched in a particular dimension. Here we show an example in two dimensions, to get an understanding for how PCA can help classification.", "import numpy as np\nrandom_state = np.random.RandomState(1999)\nX = np.random.randn(500, 2)\nred_idx = np.where(X[:, 0] < 0)[0]\nblue_idx = np.where(X[:, 0] >= 0)[0]\n# Stretching\ns_matrix = np.array([[1, 0],\n [0, 20]])\n# Rotation\nr_angle = 33\nr_rad = np.pi * r_angle / 180\nr_matrix = np.array([[np.cos(r_rad), -np.sin(r_rad)],\n [np.sin(r_rad), np.cos(r_rad)]])\n\nX = np.dot(X, s_matrix).dot(r_matrix) \nplt.scatter(X[red_idx, 0], X[red_idx, 1], color=\"darkred\")\nplt.scatter(X[blue_idx, 0], X[blue_idx, 1], color=\"steelblue\")\n\n# Fix axes to show mismatched dimensions\nplt.axis('off')\nplt.title(\"Skewed Data\")\n\nfrom sklearn.decomposition import PCA\npca = PCA()\nX_t = pca.fit_transform(X)\nplt.scatter(X_t[red_idx, 0], X_t[red_idx, 1], color=\"darkred\")\nplt.scatter(X_t[blue_idx, 0], X_t[blue_idx, 1], color=\"steelblue\")\nplt.axis('off')\nplt.title(\"PCA Corrected Data\")", "We can also use PCA to visualize complex data in low dimensions in order to see how \"close\" and \"far\" different datapoints are in a 2D space. There are many different ways to do this visualization, and some common algorithms are found in sklearn.manifold. PCA is one of the simplest and most common methods for quickly visualizing a dataset.\nNow we'll take a look at unsupervised learning on a facial recognition example.\nThis uses a dataset available within scikit-learn consisting of a\nsubset of the Labeled Faces in the Wild\ndata. Note that this is a relatively large download (~200MB) so it may\ntake a while to execute.", "from sklearn import datasets\nlfw_people = datasets.fetch_lfw_people(min_faces_per_person=70, resize=0.4,\n data_home='datasets')\nlfw_people.data.shape", "If you're on a unix-based system such as linux or Mac OSX, these shell commands\ncan be used to see the downloaded dataset:", "!ls datasets\n\n!du -sh datasets/lfw_home", "Let's visualize these faces to see what we're working with:", "fig = plt.figure(figsize=(8, 6))\n# plot several images\nfor i in range(15):\n ax = fig.add_subplot(3, 5, i + 1, xticks=[], yticks=[])\n ax.imshow(lfw_people.images[i], cmap=plt.cm.bone)", "We'll do a typical train-test split on the images before performing unsupervised learning:", "from sklearn.cross_validation import train_test_split\nX_train, X_test, y_train, y_test = train_test_split(lfw_people.data, lfw_people.target, random_state=0)\n\nprint(X_train.shape, X_test.shape)", "Feature Reduction Using Principal Component Analysis\nWe can use PCA to reduce the original 1850 features of the face images to a manageable\nsize, while maintaining most of the information in the dataset. Here it is useful to use a variant\nof PCA called RandomizedPCA, which is an approximation of PCA that can be much faster for large\ndatasets.", "from sklearn import decomposition\npca = decomposition.RandomizedPCA(n_components=150, whiten=True)\npca.fit(X_train)", "One interesting part of PCA is that it computes the \"mean\" face, which can be\ninteresting to examine:", "plt.imshow(pca.mean_.reshape((50, 37)), cmap=plt.cm.bone)", "The principal components measure deviations about this mean along orthogonal axes.\nIt is also interesting to visualize these principal components:", "print(pca.components_.shape)\n\nfig = plt.figure(figsize=(16, 6))\nfor i in range(30):\n ax = fig.add_subplot(3, 10, i + 1, xticks=[], yticks=[])\n ax.imshow(pca.components_[i].reshape((50, 37)), cmap=plt.cm.bone)", "The components (\"eigenfaces\") are ordered by their importance from top-left to bottom-right.\nWe see that the first few components seem to primarily take care of lighting\nconditions; the remaining components pull out certain identifying features:\nthe nose, eyes, eyebrows, etc.\nWith this projection computed, we can now project our original training\nand test data onto the PCA basis:", "X_train_pca = pca.transform(X_train)\nX_test_pca = pca.transform(X_test)\n\nprint(X_train_pca.shape)\nprint(X_test_pca.shape)", "These projected components correspond to factors in a linear combination of\ncomponent images such that the combination approaches the original face." ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
facaiy/book_notes
machine_learning/tree/decision_tree/presentation.ipynb
cc0-1.0
[ "%matplotlib inline\n\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nsns.set(color_codes=True)\nsns.set(font='SimHei')\n#plt.rcParams['axes.grid'] = False\n\nimport numpy as np\n\nimport pandas as pd\npd.options.display.max_rows = 10\n\nfrom IPython.display import Image", "决策树原理与实现简介\n前言\n为什么讲决策树?\n\n原理简单,直白易懂。\n可解释性好。\n变种在工业上应用多:随机森林、GBDT。\n\n深化拓展\n\n理论,考古:ID3, C4.5, CART\n工程,实现细节:\ndemo\nscikit-learn\nspark\nxgboost\n应用,调参分析\n演示\n\n理论\n算法:\n\nID3\nC4.5\nC5.0\nCART\nCHAID\nMARS\n\n行业黑话\n\n\n分类问题 vs 回归问题\n\n\n样本 = (特征$x$,真实值$y$)\n\n\n目的:找到模型$h(\\cdot)$,使得预测值$\\hat{y} = h(x)$ $\\to$ 真实值$y$", "from sklearn.datasets import load_iris\ndata = load_iris()\n\n# 准备特征数据\nX = pd.DataFrame(data.data, \n columns=[\"sepal_length\", \"sepal_width\", \"petal_length\", \"petal_width\"])\n\n# 准备标签数据\ny = pd.DataFrame(data.target, columns=['target'])\ny.replace(to_replace=range(3), value=data.target_names, inplace=True)\n\n# 组建样本 [特征,标签]\nsamples = pd.concat([X, y], axis=1, keys=[\"x\", \"y\"])\nsamples.head(5)\n\nsamples[\"y\", \"target\"].value_counts()\n\nsamples[\"x\"].describe()", "三分钟明白决策树", "Image(url=\"https://upload.wikimedia.org/wikipedia/commons/f/f3/CART_tree_titanic_survivors.png\")\n\nImage(url=\"http://scikit-learn.org/stable/_images/iris.svg\")\n\nImage(url=\"http://scikit-learn.org/stable/_images/sphx_glr_plot_iris_0011.png\")\n\nsamples = pd.concat([X, y], axis=1)\nsamples.head(3)", "工程\nDemo实现\n其主要问题是在每次决策时找到一个分割点,让生成的子集尽可能地纯净。这里涉及到四个问题:\n\n如何分割样本?\n如何评价子集的纯净度?\n如何找到单个最佳的分割点,其子集最为纯净?\n如何找到最佳的分割点序列,其最终分割子集总体最为纯净?", "Image(url=\"https://upload.wikimedia.org/wikipedia/commons/f/f3/CART_tree_titanic_survivors.png\")", "1.0 如何分割样本\n决策树的分割方法是取一个特征 $f$ 和阈值 $t$,以此为界将样本 $X$ 拆分为两个子集 $X_l, X_r$。其数学表达形同:\n\\begin{align}\n X = \\begin{cases}\n X_l, \\ \\text{if } X[f] < t \\\n X_r, \\ \\text{if } X[f] \\geq t\n \\end{cases}\n\\end{align}", "def splitter(samples, feature, threshold):\n # 按特征 f 和阈值 t 分割样本\n \n left_nodes = samples.query(\"{f} < {t}\".format(f=feature, t=threshold))\n right_nodes = samples.query(\"{f} >= {t}\".format(f=feature, t=threshold))\n \n return {\"left_nodes\": left_nodes, \"right_nodes\": right_nodes}\n\nsplit = splitter(samples, \"sepal_length\", 5)\n\n# 左子集\nx_l = split[\"left_nodes\"].loc[:, \"target\"].value_counts()\nx_l\n\n# 右子集\nx_r = split[\"right_nodes\"].loc[:, \"target\"].value_counts()\nx_r", "2. 如何评价子集的纯净度?\n常用的评价函数正是计算各标签 $c_k$ 在子集中的占比 $p_k = c_k / \\sum (c_k)$,并通过组合 $p_k$ 来描述占比集中或分散。", "def calc_class_proportion(node):\n # 计算各标签在集合中的占比\n \n y = node[\"target\"]\n return y.value_counts() / y.count()\n\ncalc_class_proportion(split[\"left_nodes\"])\n\ncalc_class_proportion(split[\"right_nodes\"])", "主要的评价函数有三种,它们评价的是集合的不纯度(值越大,集合越混杂)。\n先做些数学定义以便于描述: \n假设对于集合 $m$ 有 $N_m$ 个样本,可分割成 $R_m$ 子集。 \n若总的标签类别有 $K$ 种,则标签 $k$ 在此集合中的占比为:\n\\begin{equation}\n \\hat{p}{m k} = \\frac{1}{N_m} \\displaystyle \\sum{x_i \\in R_m} I(y_i = k)\n\\end{equation}\n且令标签 $k$ 是占比最大的标签,即 $k(m) = \\operatorname{arg max}k \\hat{p}{m k}$.\n1. Misclassification error\n我们一般把集合的分类结果定义为占比最大的标签,那么落在此集合中的其它标签就是误分类。其比率是 $1 - \\hat{p}_{m k}(m)$.\nscikit-learn实现简介\n应用\n评价:过拟合\n如何模型解释\n参数的含义\n演示\n使用sklearn的决策树,进行一个小样本的分析" ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
ES-DOC/esdoc-jupyterhub
notebooks/ec-earth-consortium/cmip6/models/ec-earth3-veg/ocean.ipynb
gpl-3.0
[ "ES-DOC CMIP6 Model Properties - Ocean\nMIP Era: CMIP6\nInstitute: EC-EARTH-CONSORTIUM\nSource ID: EC-EARTH3-VEG\nTopic: Ocean\nSub-Topics: Timestepping Framework, Advection, Lateral Physics, Vertical Physics, Uplow Boundaries, Boundary Forcing. \nProperties: 133 (101 required)\nModel descriptions: Model description details\nInitialized From: -- \nNotebook Help: Goto notebook help page\nNotebook Initialised: 2018-02-15 16:53:59\nDocument Setup\nIMPORTANT: to be executed each time you run the notebook", "# DO NOT EDIT ! \nfrom pyesdoc.ipython.model_topic import NotebookOutput \n\n# DO NOT EDIT ! \nDOC = NotebookOutput('cmip6', 'ec-earth-consortium', 'ec-earth3-veg', 'ocean')", "Document Authors\nSet document authors", "# Set as follows: DOC.set_author(\"name\", \"email\") \n# TODO - please enter value(s)", "Document Contributors\nSpecify document contributors", "# Set as follows: DOC.set_contributor(\"name\", \"email\") \n# TODO - please enter value(s)", "Document Publication\nSpecify document publication status", "# Set publication status: \n# 0=do not publish, 1=publish. \nDOC.set_publication_status(0)", "Document Table of Contents\n1. Key Properties\n2. Key Properties --&gt; Seawater Properties\n3. Key Properties --&gt; Bathymetry\n4. Key Properties --&gt; Nonoceanic Waters\n5. Key Properties --&gt; Software Properties\n6. Key Properties --&gt; Resolution\n7. Key Properties --&gt; Tuning Applied\n8. Key Properties --&gt; Conservation\n9. Grid\n10. Grid --&gt; Discretisation --&gt; Vertical\n11. Grid --&gt; Discretisation --&gt; Horizontal\n12. Timestepping Framework\n13. Timestepping Framework --&gt; Tracers\n14. Timestepping Framework --&gt; Baroclinic Dynamics\n15. Timestepping Framework --&gt; Barotropic\n16. Timestepping Framework --&gt; Vertical Physics\n17. Advection\n18. Advection --&gt; Momentum\n19. Advection --&gt; Lateral Tracers\n20. Advection --&gt; Vertical Tracers\n21. Lateral Physics\n22. Lateral Physics --&gt; Momentum --&gt; Operator\n23. Lateral Physics --&gt; Momentum --&gt; Eddy Viscosity Coeff\n24. Lateral Physics --&gt; Tracers\n25. Lateral Physics --&gt; Tracers --&gt; Operator\n26. Lateral Physics --&gt; Tracers --&gt; Eddy Diffusity Coeff\n27. Lateral Physics --&gt; Tracers --&gt; Eddy Induced Velocity\n28. Vertical Physics\n29. Vertical Physics --&gt; Boundary Layer Mixing --&gt; Details\n30. Vertical Physics --&gt; Boundary Layer Mixing --&gt; Tracers\n31. Vertical Physics --&gt; Boundary Layer Mixing --&gt; Momentum\n32. Vertical Physics --&gt; Interior Mixing --&gt; Details\n33. Vertical Physics --&gt; Interior Mixing --&gt; Tracers\n34. Vertical Physics --&gt; Interior Mixing --&gt; Momentum\n35. Uplow Boundaries --&gt; Free Surface\n36. Uplow Boundaries --&gt; Bottom Boundary Layer\n37. Boundary Forcing\n38. Boundary Forcing --&gt; Momentum --&gt; Bottom Friction\n39. Boundary Forcing --&gt; Momentum --&gt; Lateral Friction\n40. Boundary Forcing --&gt; Tracers --&gt; Sunlight Penetration\n41. Boundary Forcing --&gt; Tracers --&gt; Fresh Water Forcing \n1. Key Properties\nOcean key properties\n1.1. Model Overview\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nOverview of ocean model.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.model_overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "1.2. Model Name\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nName of ocean model code (NEMO 3.6, MOM 5.0,...)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.model_name') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "1.3. Model Family\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nType of ocean model.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.model_family') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"OGCM\" \n# \"slab ocean\" \n# \"mixed layer ocean\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "1.4. Basic Approximations\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nBasic approximations made in the ocean.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.basic_approximations') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Primitive equations\" \n# \"Non-hydrostatic\" \n# \"Boussinesq\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "1.5. Prognostic Variables\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nList of prognostic variables in the ocean component.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.prognostic_variables') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Potential temperature\" \n# \"Conservative temperature\" \n# \"Salinity\" \n# \"U-velocity\" \n# \"V-velocity\" \n# \"W-velocity\" \n# \"SSH\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "2. Key Properties --&gt; Seawater Properties\nPhysical properties of seawater in ocean\n2.1. Eos Type\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nType of EOS for sea water", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.seawater_properties.eos_type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Linear\" \n# \"Wright, 1997\" \n# \"Mc Dougall et al.\" \n# \"Jackett et al. 2006\" \n# \"TEOS 2010\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "2.2. Eos Functional Temp\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nTemperature used in EOS for sea water", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.seawater_properties.eos_functional_temp') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Potential temperature\" \n# \"Conservative temperature\" \n# TODO - please enter value(s)\n", "2.3. Eos Functional Salt\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nSalinity used in EOS for sea water", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.seawater_properties.eos_functional_salt') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Practical salinity Sp\" \n# \"Absolute salinity Sa\" \n# TODO - please enter value(s)\n", "2.4. Eos Functional Depth\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDepth or pressure used in EOS for sea water ?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.seawater_properties.eos_functional_depth') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Pressure (dbars)\" \n# \"Depth (meters)\" \n# TODO - please enter value(s)\n", "2.5. Ocean Freezing Point\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nEquation used to compute the freezing point (in deg C) of seawater, as a function of salinity and pressure", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.seawater_properties.ocean_freezing_point') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"TEOS 2010\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "2.6. Ocean Specific Heat\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: FLOAT&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nSpecific heat in ocean (cpocean) in J/(kg K)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.seawater_properties.ocean_specific_heat') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "2.7. Ocean Reference Density\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: FLOAT&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nBoussinesq reference density (rhozero) in kg / m3", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.seawater_properties.ocean_reference_density') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "3. Key Properties --&gt; Bathymetry\nProperties of bathymetry in ocean\n3.1. Reference Dates\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nReference date of bathymetry", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.bathymetry.reference_dates') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Present day\" \n# \"21000 years BP\" \n# \"6000 years BP\" \n# \"LGM\" \n# \"Pliocene\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "3.2. Type\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nIs the bathymetry fixed in time in the ocean ?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.bathymetry.type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "3.3. Ocean Smoothing\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDescribe any smoothing or hand editing of bathymetry in ocean", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.bathymetry.ocean_smoothing') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "3.4. Source\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDescribe source of bathymetry in ocean", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.bathymetry.source') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "4. Key Properties --&gt; Nonoceanic Waters\nNon oceanic waters treatement in ocean\n4.1. Isolated Seas\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDescribe if/how isolated seas is performed", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.nonoceanic_waters.isolated_seas') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "4.2. River Mouth\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDescribe if/how river mouth mixing or estuaries specific treatment is performed", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.nonoceanic_waters.river_mouth') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "5. Key Properties --&gt; Software Properties\nSoftware properties of ocean code\n5.1. Repository\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nLocation of code for this component.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.software_properties.repository') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "5.2. Code Version\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nCode version identifier.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.software_properties.code_version') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "5.3. Code Languages\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N\nCode language(s).", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.software_properties.code_languages') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "6. Key Properties --&gt; Resolution\nResolution in the ocean grid\n6.1. Name\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nThis is a string usually used by the modelling group to describe the resolution of this grid, e.g. ORCA025, N512L180, T512L70 etc.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.resolution.name') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "6.2. Canonical Horizontal Resolution\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nExpression quoted for gross comparisons of resolution, eg. 50km or 0.1 degrees etc.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.resolution.canonical_horizontal_resolution') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "6.3. Range Horizontal Resolution\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nRange of horizontal resolution with spatial details, eg. 50(Equator)-100km or 0.1-0.5 degrees etc.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.resolution.range_horizontal_resolution') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "6.4. Number Of Horizontal Gridpoints\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nTotal number of horizontal (XY) points (or degrees of freedom) on computational grid.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.resolution.number_of_horizontal_gridpoints') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "6.5. Number Of Vertical Levels\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nNumber of vertical levels resolved on computational grid.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.resolution.number_of_vertical_levels') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "6.6. Is Adaptive Grid\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDefault is False. Set true if grid resolution changes during execution.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.resolution.is_adaptive_grid') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "6.7. Thickness Level 1\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: FLOAT&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nThickness of first surface ocean level (in meters)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.resolution.thickness_level_1') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "7. Key Properties --&gt; Tuning Applied\nTuning methodology for ocean component\n7.1. Description\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nGeneral overview description of tuning: explain and motivate the main targets and metrics retained. &amp;Document the relative weight given to climate performance metrics versus process oriented metrics, &amp;and on the possible conflicts with parameterization level tuning. In particular describe any struggle &amp;with a parameter value that required pushing it to its limits to solve a particular model deficiency.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.tuning_applied.description') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "7.2. Global Mean Metrics Used\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N\nList set of metrics of the global mean state used in tuning model/component", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.tuning_applied.global_mean_metrics_used') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "7.3. Regional Metrics Used\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N\nList of regional metrics of mean state (e.g THC, AABW, regional means etc) used in tuning model/component", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.tuning_applied.regional_metrics_used') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "7.4. Trend Metrics Used\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N\nList observed trend metrics used in tuning model/component", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.tuning_applied.trend_metrics_used') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "8. Key Properties --&gt; Conservation\nConservation in the ocean component\n8.1. Description\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nBrief description of conservation methodology", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.conservation.description') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "8.2. Scheme\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nProperties conserved in the ocean by the numerical schemes", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.conservation.scheme') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Energy\" \n# \"Enstrophy\" \n# \"Salt\" \n# \"Volume of ocean\" \n# \"Momentum\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "8.3. Consistency Properties\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nAny additional consistency properties (energy conversion, pressure gradient discretisation, ...)?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.conservation.consistency_properties') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "8.4. Corrected Conserved Prognostic Variables\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nSet of variables which are conserved by more than the numerical scheme alone.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.conservation.corrected_conserved_prognostic_variables') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "8.5. Was Flux Correction Used\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDoes conservation involve flux correction ?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.conservation.was_flux_correction_used') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "9. Grid\nOcean grid\n9.1. Overview\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nOverview of grid in ocean", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.grid.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "10. Grid --&gt; Discretisation --&gt; Vertical\nProperties of vertical discretisation in ocean\n10.1. Coordinates\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nType of vertical coordinates in ocean", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.grid.discretisation.vertical.coordinates') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Z-coordinate\" \n# \"Z*-coordinate\" \n# \"S-coordinate\" \n# \"Isopycnic - sigma 0\" \n# \"Isopycnic - sigma 2\" \n# \"Isopycnic - sigma 4\" \n# \"Isopycnic - other\" \n# \"Hybrid / Z+S\" \n# \"Hybrid / Z+isopycnic\" \n# \"Hybrid / other\" \n# \"Pressure referenced (P)\" \n# \"P*\" \n# \"Z**\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "10.2. Partial Steps\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nUsing partial steps with Z or Z vertical coordinate in ocean ?*", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.grid.discretisation.vertical.partial_steps') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "11. Grid --&gt; Discretisation --&gt; Horizontal\nType of horizontal discretisation scheme in ocean\n11.1. Type\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nHorizontal grid type", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.grid.discretisation.horizontal.type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Lat-lon\" \n# \"Rotated north pole\" \n# \"Two north poles (ORCA-style)\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "11.2. Staggering\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nHorizontal grid staggering type", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.grid.discretisation.horizontal.staggering') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Arakawa B-grid\" \n# \"Arakawa C-grid\" \n# \"Arakawa E-grid\" \n# \"N/a\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "11.3. Scheme\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nHorizontal discretisation scheme in ocean", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.grid.discretisation.horizontal.scheme') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Finite difference\" \n# \"Finite volumes\" \n# \"Finite elements\" \n# \"Unstructured grid\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "12. Timestepping Framework\nOcean Timestepping Framework\n12.1. Overview\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nOverview of time stepping in ocean", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.timestepping_framework.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "12.2. Diurnal Cycle\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDiurnal cycle type", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.timestepping_framework.diurnal_cycle') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"None\" \n# \"Via coupling\" \n# \"Specific treatment\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "13. Timestepping Framework --&gt; Tracers\nProperties of tracers time stepping in ocean\n13.1. Scheme\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nTracers time stepping scheme", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.timestepping_framework.tracers.scheme') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Leap-frog + Asselin filter\" \n# \"Leap-frog + Periodic Euler\" \n# \"Predictor-corrector\" \n# \"Runge-Kutta 2\" \n# \"AM3-LF\" \n# \"Forward-backward\" \n# \"Forward operator\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "13.2. Time Step\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nTracers time step (in seconds)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.timestepping_framework.tracers.time_step') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "14. Timestepping Framework --&gt; Baroclinic Dynamics\nBaroclinic dynamics in ocean\n14.1. Type\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nBaroclinic dynamics type", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.timestepping_framework.baroclinic_dynamics.type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Preconditioned conjugate gradient\" \n# \"Sub cyling\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "14.2. Scheme\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nBaroclinic dynamics scheme", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.timestepping_framework.baroclinic_dynamics.scheme') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Leap-frog + Asselin filter\" \n# \"Leap-frog + Periodic Euler\" \n# \"Predictor-corrector\" \n# \"Runge-Kutta 2\" \n# \"AM3-LF\" \n# \"Forward-backward\" \n# \"Forward operator\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "14.3. Time Step\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nBaroclinic time step (in seconds)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.timestepping_framework.baroclinic_dynamics.time_step') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "15. Timestepping Framework --&gt; Barotropic\nBarotropic time stepping in ocean\n15.1. Splitting\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nTime splitting method", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.timestepping_framework.barotropic.splitting') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"None\" \n# \"split explicit\" \n# \"implicit\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "15.2. Time Step\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nBarotropic time step (in seconds)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.timestepping_framework.barotropic.time_step') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "16. Timestepping Framework --&gt; Vertical Physics\nVertical physics time stepping in ocean\n16.1. Method\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDetails of vertical time stepping in ocean", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.timestepping_framework.vertical_physics.method') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "17. Advection\nOcean advection\n17.1. Overview\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nOverview of advection in ocean", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.advection.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "18. Advection --&gt; Momentum\nProperties of lateral momemtum advection scheme in ocean\n18.1. Type\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nType of lateral momemtum advection scheme in ocean", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.advection.momentum.type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Flux form\" \n# \"Vector form\" \n# TODO - please enter value(s)\n", "18.2. Scheme Name\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nName of ocean momemtum advection scheme", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.advection.momentum.scheme_name') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "18.3. ALE\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nUsing ALE for vertical advection ? (if vertical coordinates are sigma)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.advection.momentum.ALE') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "19. Advection --&gt; Lateral Tracers\nProperties of lateral tracer advection scheme in ocean\n19.1. Order\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nOrder of lateral tracer advection scheme in ocean", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.advection.lateral_tracers.order') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "19.2. Flux Limiter\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nMonotonic flux limiter for lateral tracer advection scheme in ocean ?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.advection.lateral_tracers.flux_limiter') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "19.3. Effective Order\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: FLOAT&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nEffective order of limited lateral tracer advection scheme in ocean", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.advection.lateral_tracers.effective_order') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "19.4. Name\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDescriptive text for lateral tracer advection scheme in ocean (e.g. MUSCL, PPM-H5, PRATHER,...)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.advection.lateral_tracers.name') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "19.5. Passive Tracers\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N\nPassive tracers advected", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.advection.lateral_tracers.passive_tracers') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Ideal age\" \n# \"CFC 11\" \n# \"CFC 12\" \n# \"SF6\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "19.6. Passive Tracers Advection\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nIs advection of passive tracers different than active ? if so, describe.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.advection.lateral_tracers.passive_tracers_advection') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "20. Advection --&gt; Vertical Tracers\nProperties of vertical tracer advection scheme in ocean\n20.1. Name\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDescriptive text for vertical tracer advection scheme in ocean (e.g. MUSCL, PPM-H5, PRATHER,...)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.advection.vertical_tracers.name') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "20.2. Flux Limiter\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nMonotonic flux limiter for vertical tracer advection scheme in ocean ?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.advection.vertical_tracers.flux_limiter') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "21. Lateral Physics\nOcean lateral physics\n21.1. Overview\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nOverview of lateral physics in ocean", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.lateral_physics.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "21.2. Scheme\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nType of transient eddy representation in ocean", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.lateral_physics.scheme') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"None\" \n# \"Eddy active\" \n# \"Eddy admitting\" \n# TODO - please enter value(s)\n", "22. Lateral Physics --&gt; Momentum --&gt; Operator\nProperties of lateral physics operator for momentum in ocean\n22.1. Direction\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDirection of lateral physics momemtum scheme in the ocean", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.lateral_physics.momentum.operator.direction') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Horizontal\" \n# \"Isopycnal\" \n# \"Isoneutral\" \n# \"Geopotential\" \n# \"Iso-level\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "22.2. Order\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nOrder of lateral physics momemtum scheme in the ocean", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.lateral_physics.momentum.operator.order') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Harmonic\" \n# \"Bi-harmonic\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "22.3. Discretisation\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDiscretisation of lateral physics momemtum scheme in the ocean", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.lateral_physics.momentum.operator.discretisation') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Second order\" \n# \"Higher order\" \n# \"Flux limiter\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "23. Lateral Physics --&gt; Momentum --&gt; Eddy Viscosity Coeff\nProperties of eddy viscosity coeff in lateral physics momemtum scheme in the ocean\n23.1. Type\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nLateral physics momemtum eddy viscosity coeff type in the ocean", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.lateral_physics.momentum.eddy_viscosity_coeff.type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Constant\" \n# \"Space varying\" \n# \"Time + space varying (Smagorinsky)\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "23.2. Constant Coefficient\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nIf constant, value of eddy viscosity coeff in lateral physics momemtum scheme (in m2/s)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.lateral_physics.momentum.eddy_viscosity_coeff.constant_coefficient') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "23.3. Variable Coefficient\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nIf space-varying, describe variations of eddy viscosity coeff in lateral physics momemtum scheme", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.lateral_physics.momentum.eddy_viscosity_coeff.variable_coefficient') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "23.4. Coeff Background\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDescribe background eddy viscosity coeff in lateral physics momemtum scheme (give values in m2/s)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.lateral_physics.momentum.eddy_viscosity_coeff.coeff_background') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "23.5. Coeff Backscatter\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nIs there backscatter in eddy viscosity coeff in lateral physics momemtum scheme ?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.lateral_physics.momentum.eddy_viscosity_coeff.coeff_backscatter') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "24. Lateral Physics --&gt; Tracers\nProperties of lateral physics for tracers in ocean\n24.1. Mesoscale Closure\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nIs there a mesoscale closure in the lateral physics tracers scheme ?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.lateral_physics.tracers.mesoscale_closure') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "24.2. Submesoscale Mixing\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nIs there a submesoscale mixing parameterisation (i.e Fox-Kemper) in the lateral physics tracers scheme ?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.lateral_physics.tracers.submesoscale_mixing') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "25. Lateral Physics --&gt; Tracers --&gt; Operator\nProperties of lateral physics operator for tracers in ocean\n25.1. Direction\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDirection of lateral physics tracers scheme in the ocean", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.lateral_physics.tracers.operator.direction') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Horizontal\" \n# \"Isopycnal\" \n# \"Isoneutral\" \n# \"Geopotential\" \n# \"Iso-level\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "25.2. Order\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nOrder of lateral physics tracers scheme in the ocean", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.lateral_physics.tracers.operator.order') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Harmonic\" \n# \"Bi-harmonic\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "25.3. Discretisation\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDiscretisation of lateral physics tracers scheme in the ocean", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.lateral_physics.tracers.operator.discretisation') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Second order\" \n# \"Higher order\" \n# \"Flux limiter\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "26. Lateral Physics --&gt; Tracers --&gt; Eddy Diffusity Coeff\nProperties of eddy diffusity coeff in lateral physics tracers scheme in the ocean\n26.1. Type\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nLateral physics tracers eddy diffusity coeff type in the ocean", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_diffusity_coeff.type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Constant\" \n# \"Space varying\" \n# \"Time + space varying (Smagorinsky)\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "26.2. Constant Coefficient\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nIf constant, value of eddy diffusity coeff in lateral physics tracers scheme (in m2/s)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_diffusity_coeff.constant_coefficient') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "26.3. Variable Coefficient\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nIf space-varying, describe variations of eddy diffusity coeff in lateral physics tracers scheme", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_diffusity_coeff.variable_coefficient') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "26.4. Coeff Background\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDescribe background eddy diffusity coeff in lateral physics tracers scheme (give values in m2/s)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_diffusity_coeff.coeff_background') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "26.5. Coeff Backscatter\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nIs there backscatter in eddy diffusity coeff in lateral physics tracers scheme ?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_diffusity_coeff.coeff_backscatter') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "27. Lateral Physics --&gt; Tracers --&gt; Eddy Induced Velocity\nProperties of eddy induced velocity (EIV) in lateral physics tracers scheme in the ocean\n27.1. Type\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nType of EIV in lateral physics tracers in the ocean", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_induced_velocity.type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"GM\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "27.2. Constant Val\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nIf EIV scheme for tracers is constant, specify coefficient value (M2/s)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_induced_velocity.constant_val') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "27.3. Flux Type\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nType of EIV flux (advective or skew)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_induced_velocity.flux_type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "27.4. Added Diffusivity\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nType of EIV added diffusivity (constant, flow dependent or none)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_induced_velocity.added_diffusivity') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "28. Vertical Physics\nOcean Vertical Physics\n28.1. Overview\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nOverview of vertical physics in ocean", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.vertical_physics.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "29. Vertical Physics --&gt; Boundary Layer Mixing --&gt; Details\nProperties of vertical physics in ocean\n29.1. Langmuir Cells Mixing\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nIs there Langmuir cells mixing in upper ocean ?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.details.langmuir_cells_mixing') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "30. Vertical Physics --&gt; Boundary Layer Mixing --&gt; Tracers\n*Properties of boundary layer (BL) mixing on tracers in the ocean *\n30.1. Type\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nType of boundary layer mixing for tracers in ocean", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.tracers.type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Constant value\" \n# \"Turbulent closure - TKE\" \n# \"Turbulent closure - KPP\" \n# \"Turbulent closure - Mellor-Yamada\" \n# \"Turbulent closure - Bulk Mixed Layer\" \n# \"Richardson number dependent - PP\" \n# \"Richardson number dependent - KT\" \n# \"Imbeded as isopycnic vertical coordinate\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "30.2. Closure Order\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: FLOAT&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nIf turbulent BL mixing of tracers, specific order of closure (0, 1, 2.5, 3)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.tracers.closure_order') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "30.3. Constant\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nIf constant BL mixing of tracers, specific coefficient (m2/s)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.tracers.constant') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "30.4. Background\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nBackground BL mixing of tracers coefficient, (schema and value in m2/s - may by none)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.tracers.background') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "31. Vertical Physics --&gt; Boundary Layer Mixing --&gt; Momentum\n*Properties of boundary layer (BL) mixing on momentum in the ocean *\n31.1. Type\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nType of boundary layer mixing for momentum in ocean", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.momentum.type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Constant value\" \n# \"Turbulent closure - TKE\" \n# \"Turbulent closure - KPP\" \n# \"Turbulent closure - Mellor-Yamada\" \n# \"Turbulent closure - Bulk Mixed Layer\" \n# \"Richardson number dependent - PP\" \n# \"Richardson number dependent - KT\" \n# \"Imbeded as isopycnic vertical coordinate\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "31.2. Closure Order\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: FLOAT&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nIf turbulent BL mixing of momentum, specific order of closure (0, 1, 2.5, 3)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.momentum.closure_order') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "31.3. Constant\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nIf constant BL mixing of momentum, specific coefficient (m2/s)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.momentum.constant') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "31.4. Background\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nBackground BL mixing of momentum coefficient, (schema and value in m2/s - may by none)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.momentum.background') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "32. Vertical Physics --&gt; Interior Mixing --&gt; Details\n*Properties of interior mixing in the ocean *\n32.1. Convection Type\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nType of vertical convection in ocean", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.details.convection_type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Non-penetrative convective adjustment\" \n# \"Enhanced vertical diffusion\" \n# \"Included in turbulence closure\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "32.2. Tide Induced Mixing\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDescribe how tide induced mixing is modelled (barotropic, baroclinic, none)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.details.tide_induced_mixing') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "32.3. Double Diffusion\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nIs there double diffusion", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.details.double_diffusion') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "32.4. Shear Mixing\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nIs there interior shear mixing", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.details.shear_mixing') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "33. Vertical Physics --&gt; Interior Mixing --&gt; Tracers\n*Properties of interior mixing on tracers in the ocean *\n33.1. Type\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nType of interior mixing for tracers in ocean", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.tracers.type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Constant value\" \n# \"Turbulent closure / TKE\" \n# \"Turbulent closure - Mellor-Yamada\" \n# \"Richardson number dependent - PP\" \n# \"Richardson number dependent - KT\" \n# \"Imbeded as isopycnic vertical coordinate\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "33.2. Constant\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nIf constant interior mixing of tracers, specific coefficient (m2/s)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.tracers.constant') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "33.3. Profile\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nIs the background interior mixing using a vertical profile for tracers (i.e is NOT constant) ?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.tracers.profile') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "33.4. Background\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nBackground interior mixing of tracers coefficient, (schema and value in m2/s - may by none)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.tracers.background') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "34. Vertical Physics --&gt; Interior Mixing --&gt; Momentum\n*Properties of interior mixing on momentum in the ocean *\n34.1. Type\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nType of interior mixing for momentum in ocean", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.momentum.type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Constant value\" \n# \"Turbulent closure / TKE\" \n# \"Turbulent closure - Mellor-Yamada\" \n# \"Richardson number dependent - PP\" \n# \"Richardson number dependent - KT\" \n# \"Imbeded as isopycnic vertical coordinate\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "34.2. Constant\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nIf constant interior mixing of momentum, specific coefficient (m2/s)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.momentum.constant') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "34.3. Profile\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nIs the background interior mixing using a vertical profile for momentum (i.e is NOT constant) ?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.momentum.profile') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "34.4. Background\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nBackground interior mixing of momentum coefficient, (schema and value in m2/s - may by none)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.momentum.background') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "35. Uplow Boundaries --&gt; Free Surface\nProperties of free surface in ocean\n35.1. Overview\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nOverview of free surface in ocean", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.uplow_boundaries.free_surface.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "35.2. Scheme\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nFree surface scheme in ocean", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.uplow_boundaries.free_surface.scheme') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Linear implicit\" \n# \"Linear filtered\" \n# \"Linear semi-explicit\" \n# \"Non-linear implicit\" \n# \"Non-linear filtered\" \n# \"Non-linear semi-explicit\" \n# \"Fully explicit\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "35.3. Embeded Seaice\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nIs the sea-ice embeded in the ocean model (instead of levitating) ?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.uplow_boundaries.free_surface.embeded_seaice') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "36. Uplow Boundaries --&gt; Bottom Boundary Layer\nProperties of bottom boundary layer in ocean\n36.1. Overview\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nOverview of bottom boundary layer in ocean", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.uplow_boundaries.bottom_boundary_layer.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "36.2. Type Of Bbl\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nType of bottom boundary layer in ocean", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.uplow_boundaries.bottom_boundary_layer.type_of_bbl') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Diffusive\" \n# \"Acvective\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "36.3. Lateral Mixing Coef\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nIf bottom BL is diffusive, specify value of lateral mixing coefficient (in m2/s)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.uplow_boundaries.bottom_boundary_layer.lateral_mixing_coef') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "36.4. Sill Overflow\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDescribe any specific treatment of sill overflows", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.uplow_boundaries.bottom_boundary_layer.sill_overflow') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "37. Boundary Forcing\nOcean boundary forcing\n37.1. Overview\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nOverview of boundary forcing in ocean", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.boundary_forcing.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "37.2. Surface Pressure\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDescribe how surface pressure is transmitted to ocean (via sea-ice, nothing specific,...)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.boundary_forcing.surface_pressure') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "37.3. Momentum Flux Correction\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDescribe any type of ocean surface momentum flux correction and, if applicable, how it is applied and where.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.boundary_forcing.momentum_flux_correction') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "37.4. Tracers Flux Correction\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDescribe any type of ocean surface tracers flux correction and, if applicable, how it is applied and where.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.boundary_forcing.tracers_flux_correction') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "37.5. Wave Effects\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDescribe if/how wave effects are modelled at ocean surface.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.boundary_forcing.wave_effects') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "37.6. River Runoff Budget\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDescribe how river runoff from land surface is routed to ocean and any global adjustment done.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.boundary_forcing.river_runoff_budget') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "37.7. Geothermal Heating\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDescribe if/how geothermal heating is present at ocean bottom.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.boundary_forcing.geothermal_heating') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "38. Boundary Forcing --&gt; Momentum --&gt; Bottom Friction\nProperties of momentum bottom friction in ocean\n38.1. Type\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nType of momentum bottom friction in ocean", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.boundary_forcing.momentum.bottom_friction.type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Linear\" \n# \"Non-linear\" \n# \"Non-linear (drag function of speed of tides)\" \n# \"Constant drag coefficient\" \n# \"None\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "39. Boundary Forcing --&gt; Momentum --&gt; Lateral Friction\nProperties of momentum lateral friction in ocean\n39.1. Type\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nType of momentum lateral friction in ocean", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.boundary_forcing.momentum.lateral_friction.type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"None\" \n# \"Free-slip\" \n# \"No-slip\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "40. Boundary Forcing --&gt; Tracers --&gt; Sunlight Penetration\nProperties of sunlight penetration scheme in ocean\n40.1. Scheme\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nType of sunlight penetration scheme in ocean", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.boundary_forcing.tracers.sunlight_penetration.scheme') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"1 extinction depth\" \n# \"2 extinction depth\" \n# \"3 extinction depth\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "40.2. Ocean Colour\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nIs the ocean sunlight penetration scheme ocean colour dependent ?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.boundary_forcing.tracers.sunlight_penetration.ocean_colour') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "40.3. Extinction Depth\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDescribe and list extinctions depths for sunlight penetration scheme (if applicable).", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.boundary_forcing.tracers.sunlight_penetration.extinction_depth') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "41. Boundary Forcing --&gt; Tracers --&gt; Fresh Water Forcing\nProperties of surface fresh water forcing in ocean\n41.1. From Atmopshere\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nType of surface fresh water forcing from atmos in ocean", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.boundary_forcing.tracers.fresh_water_forcing.from_atmopshere') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Freshwater flux\" \n# \"Virtual salt flux\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "41.2. From Sea Ice\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nType of surface fresh water forcing from sea-ice in ocean", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.boundary_forcing.tracers.fresh_water_forcing.from_sea_ice') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Freshwater flux\" \n# \"Virtual salt flux\" \n# \"Real salt flux\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "41.3. Forced Mode Restoring\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nType of surface salinity restoring in forced mode (OMIP)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.boundary_forcing.tracers.fresh_water_forcing.forced_mode_restoring') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "©2017 ES-DOC" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
giacomov/3ML
examples/grb_multi_analysis.ipynb
bsd-3-clause
[ "# Scientific libraries\nimport numpy as np\n\n%matplotlib notebook\nimport matplotlib.pyplot as plt\nfrom jupyterthemes import jtplot\n\njtplot.style(context='notebook', fscale=1, ticks=True, grid=False)\nplt.style.use('mike')\n\nfrom threeML import *\n\nimport warnings\nwarnings.simplefilter('ignore')\n", "Data setup\nWe are going to use data from Fermi-LAT, Fermi-GBM and Swift-XRT. Let's go through the process of setting up the data from each instrument. We will work from high energy to low energy.\nFermi-LAT\nOnce we have obtained the Fermi-LAT data, in this case, the LAT Low Energy (LLE) data, we can reduce the data into an plugin using the light curve tools provided in 3ML. LLE data is in the format of FITS event files with an associated spacecraft point history file and energy dispersion response. The TimeSeriesBuilder class has special methods for dealing with the LLE data.", "\n\nlle = TimeSeriesBuilder.from_lat_lle('lle',ft2_file=\"lle_pt.fit\",\n lle_file=\"lle.fit\",\n rsp_file=\"lle.rsp\")\nlle.set_background_interval('-100--10','150-500')\n\nlle.set_active_time_interval('68-110')\nlle.view_lightcurve(-200,500);\n\n\nlle_plugin = lle.to_spectrumlike()\nlle_plugin.use_effective_area_correction()\n\nlle_plugin.display()\n\nlle_plugin.view_count_spectrum();", "Fermi-GBM", "gbm_detectors = [\"n4\", \"n7\", \"n8\", \"b0\"]\n\nfor det in gbm_detectors:\n ts_cspec = TimeSeriesBuilder.from_gbm_cspec_or_ctime(\n det, cspec_or_ctime_file=f\"cspec_{det}.pha\", rsp_file=f\"cspec_{det}.rsp2\"\n )\n ts_cspec.set_background_interval(\"-400--10\", \"700-1200\")\n\n ts_cspec.save_background(filename=f\"{det}_bkg.h5\", overwrite=True)\n\ngbm_time_series = {}\ngbm_plugins = {}\nfor det in gbm_detectors:\n\n\n ts = TimeSeriesBuilder.from_gbm_tte(\n det,\n tte_file=f\"tte_{det}.fit.gz\",\n rsp_file=f\"cspec_{det}.rsp2\",\n restore_background=f\"{det}_bkg.h5\",\n )\n\n gbm_time_series[det] = ts\n\n ts.view_lightcurve(-10, 200)\n ts.set_active_time_interval(\"68-110\")\n gbm_plugins[det] = ts.to_spectrumlike()\n\nfor det, plugin in gbm_plugins.items():\n\n if det.startswith(\"b\"):\n\n plugin.set_active_measurements(\"250-30000\")\n\n else:\n\n plugin.set_active_measurements(\"10-900\")\n \n if det != \"n3\":\n \n plugin.use_effective_area_correction()\n\n plugin.rebin_on_background(1)\n plugin.view_count_spectrum()", "Swift-XRT\nFor Swift-XRT, we can use the normal OGIPLike plugin, but the energy resolution of the instrument is so fine that we would waste time integrating over the photon bins during forward-folding. Thus, there is a special plugin that overrides the computation of the photon integrals with a simple sum.", "xrt = SwiftXRTLike(\n \"xrt\",\n observation=\"awt.pi\",\n background=\"awtback.pi\",\n arf_file=\"awt.arf\",\n response=\"awt.rmf\",\n)\nxrt.display()\n\nxrt.remove_rebinning()\nxrt.set_active_measurements('4-10')\n\nxrt.rebin_on_background(1.)\nxrt.use_effective_area_correction()\nxrt.view_count_spectrum();\n", "Combining all the plugins", "all_plugins = [lle_plugin, xrt]\nfor _ , plugin in gbm_plugins.items():\n \n all_plugins.append(plugin)\n\n\ndatalist = DataList(*all_plugins)", "Fitting\nModel setup\nBand Function", "sbpl = SmoothlyBrokenPowerLaw(pivot=1E3)\n\nsbpl.alpha.prior = Truncated_gaussian(lower_bound=-1.5, upper_bound=0, mu=-1, sigma=.5)\nsbpl.beta.prior = Truncated_gaussian(lower_bound=-3., upper_bound=-1.6, mu=-2, sigma=.5)\nsbpl.break_energy.prior = Log_uniform_prior(lower_bound=1, upper_bound=1E3)\nsbpl.break_scale.prior = Log_uniform_prior(lower_bound=1E-4, upper_bound=10.)\nsbpl.K.prior = Log_uniform_prior(lower_bound=1E-2, upper_bound=1E2)\nsbpl.K = 1E-1\nsbpl.break_energy.bounds = (0, None)\n\nsbpl.break_scale.free=True\n\nps = PointSource('grb',0,0,spectral_shape=sbpl)\n\nmodel = Model(ps)\n\nbayes = BayesianAnalysis(model,datalist)\nbayes.set_sampler('multinest')\n\nfor k,v in model.free_parameters.items():\n if \"cons\" in k:\n \n v.prior = Truncated_gaussian(lower_bound=.8, upper_bound=1.2, mu=1, sigma=.1)\n \n \n\n\n#bayes.sampler.setup(dlogz=10.,frac_remain=.5)\nbayes.sampler.setup(n_live_points=1000)\n\nbayes.sample()\n\n\nbayes.restore_median_fit()\n#sbpl.K = 1E-1\ndisplay_spectrum_model_counts(bayes, min_rate=[-1,5,5,5,5,5]);\n\nbayes.results.corner_plot();\n\nplot_point_source_spectra(bayes.results, flux_unit='erg2/(cm2 s keV)',ene_max =1E5);", "Fit" ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
dchud/warehousing-course
lectures/week-03/sql-demo.ipynb
cc0-1.0
[ "Sqlite3 and MySQL demo\nWith the excellent ipython-sql jupyter extension installed, it becomes very easy to connect to SQL database backends. This notebook demonstrates how to do this.\nNote that this is a Python 2 notebook.\nFirst, we need to activate the extension:", "%load_ext sql", "There are warnings, but that's okay - this happens a lot these days due to the whole ipython/jupyter renaming process. You can ignore them.\nGet a database\nUsing the bash shell (not a notebook!), follow the instructions at the SW Carpentry db lessons discussion page to get the survey.db file. This is a sqlite3 database.\nI recommend following up with the rest of the instructions on that page to explore sqlite3.\nConnecting to a Sqlite3 database\nThis part is easy, just connect like so (assuming the survey.db file is in the same directory as this notebook):", "%sql sqlite:///survey.db\n\n%sql SELECT * FROM Person;", "You should be able to execute all the standard SQL queries from the lesson here now. Note that you can also do this on the command line.\nNote specialized sqlite3 commands like \".schema\" might not work.\nConnecting to a MySQL database\nNow that you've explored the survey.db sample database with sqlite3, let's try working with mysql:", "%sql mysql://mysqluser:mysqlpass@localhost/", "note if you get an error about MySQLdb not being installed here, enter this back in your bash shell:\n% sudo pip install mysql-python\nIf it asks for your password, it's \"vagrant\".\nAfter doing this, try executing the above cell again. You should see:\nu'Connected: mysqluser@'\n...if it works.\nCreating a database\nNow that we're connected, let's create a database.", "%sql CREATE DATABASE week3demo;", "Now that we've created the database week3demo, we need to tell MySQL that we want to use it:", "%sql USE week3demo;", "But there's nothing in it:", "%sql SHOW TABLES;", "Creating a table\nFrom here we need to create a first table. Let's recreate the Person table from the SW Carpentry db lesson, topic 1.", "%%sql \nCREATE TABLE Person\n(ident CHAR(10),\n personal CHAR(25),\n family CHAR(25));\n\n%sql SHOW TABLES;\n\n%sql DESCRIBE Person;", "Inserting data\nOkay then, let's insert the sample data:", "%%sql\nINSERT INTO Person VALUES\n(\"dyer\", \"William\", \"Dyer\"),\n(\"pb\", \"Frank\", \"Pabodie\"),\n(\"lake\", \"Anderson\", \"Lake\"),\n(\"roe\", \"Valentina\", \"Roerich\"),\n(\"danforth\", \"Frank\", \"Danforth\")\n;", "Selecting data\nOkay, now we're cooking. There's data in the Person table, so we can start to SELECT it.", "%sql SELECT * FROM Person;\n\n%sql SELECT * FROM Person WHERE personal = \"Frank\";", "Accessing data from Python\nOne of the great things about ipython-sql is it marshalls all the data into Python objects for you. For example, to get the result data into a Python object, grab it from _:", "result = _\nprint result", "You can even assign it to a Pandas dataframe:", "df = result.DataFrame()\n\ndf", "Cleaning up\nIf you were just doing a little exploring and wish to clean up, it's easy to get rid of tables and databases.\nNOTE: these are permanent actions. Only do them if you know you don't need them any longer.\nTo get rid of a table, use DROP TABLE:", "%sql DROP TABLE Person;\n\n%sql SHOW TABLES;", "And to get rid of a whole database, use DROP DATABASE:", "%sql DROP DATABASE week3demo;\n\n%sql SHOW DATABASES;" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
ptpro3/ptpro3.github.io
Projects/Project5/NeuralNetSum.ipynb
mit
[ "Neural Networks\nCredits:\n- Metis\n- Harsh Pokharna: For Dummies — The Introduction to Neural Networks we all need !\nArtificial Neural Networks are a computational approach that mimics brain function: a large collection of linked neural units.\n\n\nA perceptron is the digital equivalent of a neuron, firing if strength of inputs exceeds its threshold theta\n\nGeneral Neural Network with Hidden Layer\nDerivatives / Gradient Descent to optimize the \"weights\"", "# plot y = x-squared\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n%matplotlib inline\n\nx = np.linspace(-5,5,1000)\ny = x**2\nplt.plot(x,y);\n\n# create our function\ndef f(x):\n return x**2\n\n# define values\nepsilon = 1e-5\nx = 3\n\n# calculate delta y / delta x\ngradient = (f(x+epsilon) - f(x-epsilon)) / (2*epsilon)\n\n# compare with our known calculus solution\ngradient", "We can use gradient descent to minimize a cost function, thereby optimizing our weights.\nANNs in Sklearn\nMulti-layer Perceptron (MLP) models in sklearn\nThe advantages of MLP are:\n- Capability to learn non-linear models.\n- Capability to learn models in real-time (on-line learning) using partial_fit.\nThe disadvantages of MLP include:\n- MLP with hidden layers have a non-convex loss function where there exists more than one local minimum. Therefore different random weight initializations can lead to different validation accuracy.\n- MLP requires tuning a number of hyperparameters such as the number of hidden neurons, layers, and iterations.\n- MLP is sensitive to feature scaling.", "# build simple neural net with sklearn: An \"OR\" gate\nfrom sklearn.neural_network import MLPClassifier\n\nX = [[0., 0.], [1., 1.], [1., 0.], [0., 1.]]\ny = [0, 1, 1, 1]\n\nclf = MLPClassifier(hidden_layer_sizes=(5,2),\n solver='lbfgs',\n random_state=42)\nclf.fit(X,y)\n\n# predict new observations\nclf.predict([[0,1]])\n\n# find parameters\nprint([coef.shape for coef in clf.coefs_])\nclf.coefs_\n\nclf.predict([[2,2]])\n\nclf.predict([[-2,2]])\n\nclf.predict([[-2,-2]])", "Scaling\nMulti-layer Perceptron is sensitive to feature scaling, so it is highly recommended to scale your data.\nSolver options\nL-BFGS converges faster and with better solutions on small datasets. For relatively large datasets, Adam is performant and robust. SGD with momentum or nesterov’s momentum, on the other hand, can perform better than those two algorithms if learning rate is correctly tuned." ]
[ "markdown", "code", "markdown", "code", "markdown" ]
joaoandre/algorithms
intro-python-data-science/course1_downloads/Assignment 2.ipynb
mit
[ "You are currently looking at version 1.1 of this notebook. To download notebooks and datafiles, as well as get help on Jupyter notebooks in the Coursera platform, visit the Jupyter Notebook FAQ course resource.\n\nAssignment 2 - Pandas Introduction\nAll questions are weighted the same in this assignment.\nPart 1\nThe following code loads the olympics dataset (olympics.csv), which was derrived from the Wikipedia entry on All Time Olympic Games Medals, and does some basic data cleaning. \nThe columns are organized as # of Summer games, Summer medals, # of Winter games, Winter medals, total # number of games, total # of medals. Use this dataset to answer the questions below.", "import pandas as pd\n\ndf = pd.read_csv('olympics.csv', index_col=0, skiprows=1)\n\nfor col in df.columns:\n if col[:2]=='01':\n df.rename(columns={col:'Gold'+col[4:]}, inplace=True)\n if col[:2]=='02':\n df.rename(columns={col:'Silver'+col[4:]}, inplace=True)\n if col[:2]=='03':\n df.rename(columns={col:'Bronze'+col[4:]}, inplace=True)\n if col[:1]=='№':\n df.rename(columns={col:'#'+col[1:]}, inplace=True)\n\nnames_ids = df.index.str.split('\\s\\(') # split the index by '('\n\ndf.index = names_ids.str[0] # the [0] element is the country name (new index) \ndf['ID'] = names_ids.str[1].str[:3] # the [1] element is the abbreviation or ID (take first 3 characters from that)\n\ndf = df.drop('Totals')\ndf.head()", "Question 0 (Example)\nWhat is the first country in df?\nThis function should return a Series.", "# You should write your whole answer within the function provided. The autograder will call\n# this function and compare the return value against the correct solution value\ndef answer_zero():\n # This function returns the row for Afghanistan, which is a Series object. The assignment\n # question description will tell you the general format the autograder is expecting\n return df.iloc[0]\n\n# You can examine what your function returns by calling it in the cell. If you have questions\n# about the assignment formats, check out the discussion forums for any FAQs\nanswer_zero() ", "Question 1\nWhich country has won the most gold medals in summer games?\nThis function should return a single string value.", "def answer_one():\n return \"YOUR ANSWER HERE\"", "Question 2\nWhich country had the biggest difference between their summer and winter gold medal counts?\nThis function should return a single string value.", "def answer_two():\n return \"YOUR ANSWER HERE\"", "Question 3\nWhich country has the biggest difference between their summer gold medal counts and winter gold medal counts relative to their total gold medal count? \n$$\\frac{Summer~Gold - Winter~Gold}{Total~Gold}$$\nOnly include countries that have won at least 1 gold in both summer and winter.\nThis function should return a single string value.", "def answer_three():\n return \"YOUR ANSWER HERE\"", "Question 4\nWrite a function to update the dataframe to include a new column called \"Points\" which is a weighted value where each gold medal counts for 3 points, silver medals for 2 points, and bronze mdeals for 1 point. The function should return only the column (a Series object) which you created.\nThis function should return a Series named Points of length 146", "def answer_four():\n return \"YOUR ANSWER HERE\"", "Part 2\nFor the next set of questions, we will be using census data from the United States Census Bureau. Counties are political and geographic subdivisions of states in the United States. This dataset contains population data for counties and states in the US from 2010 to 2015. See this document for a description of the variable names.\nThe census dataset (census.csv) should be loaded as census_df. Answer questions using this as appropriate.\nQuestion 5\nWhich state has the most counties in it? (hint: consider the sumlevel key carefully! You'll need this for future questions too...)\nThis function should return a single string value.", "census_df = pd.read_csv('census.csv')\ncensus_df.head()\n\ndef answer_five():\n return \"YOUR ANSWER HERE\"", "Question 6\nOnly looking at the three most populous counties for each state, what are the three most populous states (in order of highest population to lowest population)?\nThis function should return a list of string values.", "def answer_six():\n return \"YOUR ANSWER HERE\"", "Question 7\nWhich county has had the largest absolute change in population within the period 2010-2015? (Hint: population values are stored in columns POPESTIMATE2010 through POPESTIMATE2015, you need to consider all six columns.)\ne.g. If County Population in the 5 year period is 100, 120, 80, 105, 100, 130, then its largest change in the period would be |130-80| = 50.\nThis function should return a single string value.", "def answer_seven():\n return \"YOUR ANSWER HERE\"", "Question 8\nIn this datafile, the United States is broken up into four regions using the \"REGION\" column. \nCreate a query that finds the counties that belong to regions 1 or 2, whose name starts with 'Washington', and whose POPESTIMATE2015 was greater than their POPESTIMATE 2014.\nThis function should return a 5x2 DataFrame with the columns = ['STNAME', 'CTYNAME'] and the same index ID as the census_df (sorted ascending by index).", "def answer_eight():\n return \"YOUR ANSWER HERE\"" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
openfisca/openfisca-france-indirect-taxation
openfisca_france_indirect_taxation/examples/notebooks/consommations_coicop_par_decile.ipynb
agpl-3.0
[ "Le but de cet exemple est de calculer, pour chaque décile de revenu, la part de leur consommation que les ménages accordent à chaque catégorie de bien. Les catégories suivent le niveau le plus agrégé de la nomenclature COICOP.\nImport de modules généraux", "from __future__ import division\n\nimport pandas\nimport seaborn\n", "Import de modules spécifiques à Openfisca", "from openfisca_france_indirect_taxation.examples.utils_example import graph_builder_bar\nfrom openfisca_france_indirect_taxation.surveys import SurveyScenario\n", "Import d'une nouvelle palette de couleurs", "seaborn.set_palette(seaborn.color_palette(\"Set2\", 12))\n%matplotlib inline", "Construction de la dataframe et réalisation des graphiques", " simulated_variables = ['coicop12_{}'.format(coicop12_index) for coicop12_index in range(1, 13)]\n for year in [2000, 2005, 2011]:\n survey_scenario = SurveyScenario.create(year = year)\n pivot_table = pandas.DataFrame()\n for values in simulated_variables:\n pivot_table = pandas.concat([\n pivot_table,\n survey_scenario.compute_pivot_table(values = [values], columns = ['niveau_vie_decile'])\n ])\n df = pivot_table.T\n df['depenses_tot'] = df[['coicop12_{}'.format(i) for i in range(1, 13)]].sum(axis = 1)\n\n for i in range(1, 13):\n df['part_coicop12_{}'.format(i)] = \\\n df['coicop12_{}'.format(i)] / df['depenses_tot']\n\n print 'Profil de la consommation des ménages en {}'.format(year)\n graph_builder_bar(df[['part_coicop12_{}'.format(i) for i in range(1, 13)]])\n" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
tensorflow/docs-l10n
site/ko/guide/extension_type.ipynb
apache-2.0
[ "Copyright 2021 The TensorFlow Authors.", "#@title Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.", "확장 유형\n<table class=\"tfo-notebook-buttons\" align=\"left\">\n <td><a target=\"_blank\" href=\"https://www.tensorflow.org/guide/extension_type\"><img src=\"https://www.tensorflow.org/images/tf_logo_32px.png\">TensorFlow.org에서 보기</a></td>\n <td><a target=\"_blank\" href=\"https://colab.research.google.com/github/tensorflow/docs-l10n/blob/master/site/ko/guide/extension_type.ipynb\"><img src=\"https://www.tensorflow.org/images/colab_logo_32px.png\">Google Colab에서 실행</a></td>\n <td><a target=\"_blank\" href=\"https://github.com/tensorflow/docs-l10n/blob/master/site/ko/guide/extension_type.ipynb\"><img src=\"https://www.tensorflow.org/images/GitHub-Mark-32px.png\">GitHub에서 소스 보기</a></td>\n <td><a href=\"https://storage.googleapis.com/tensorflow_docs/docs-l10n/site/ko/guide/extension_type.ipynb\"><img src=\"https://www.tensorflow.org/images/download_logo_32px.png\">노트북 다운로드</a></td>\n</table>\n\n설정", "!pip install -q tf_nightly\nimport tensorflow as tf\nimport numpy as np\nfrom typing import Tuple, List, Mapping, Union, Optional\nimport tempfile", "확장 유형\n사용자 정의 유형을 사용하면 프로젝트를 더 읽기 쉽고 모듈식으로 유지 관리할 수 있습니다. 그러나 대부분의 TensorFlow API는 사용자 정의 Python 유형에 대한 지원이 매우 제한적입니다. 이것은 (예 모두 높은 수준의 API를 포함 Keras , tf.function , tf.SavedModel (예로서 하위 레벨의 API) tf.while_loop 및 tf.concat ). TensorFlow 확장 유형 을 사용하여 TensorFlow의 API와 원활하게 작동하는 사용자 정의 객체 지향 유형을 생성할 수 있습니다. tf.experimental.ExtensionType 을 기본으로 하는 Python 클래스를 정의하고 유형 주석 을 사용하여 각 필드의 유형을 지정하면 됩니다.", "class TensorGraph(tf.experimental.ExtensionType):\n \"\"\"A collection of labeled nodes connected by weighted edges.\"\"\"\n edge_weights: tf.Tensor # shape=[num_nodes, num_nodes]\n node_labels: Mapping[str, tf.Tensor] # shape=[num_nodes]; dtype=any\n\nclass MaskedTensor(tf.experimental.ExtensionType):\n \"\"\"A tensor paired with a boolean mask, indicating which values are valid.\"\"\"\n values: tf.Tensor\n mask: tf.Tensor # shape=values.shape; false for missing/invalid values.\n\nclass CSRSparseMatrix(tf.experimental.ExtensionType):\n \"\"\"Compressed sparse row matrix (https://en.wikipedia.org/wiki/Sparse_matrix).\"\"\"\n values: tf.Tensor # shape=[num_nonzero]; dtype=any\n col_index: tf.Tensor # shape=[num_nonzero]; dtype=int64\n row_index: tf.Tensor # shape=[num_rows+1]; dtype=int64", "tf.experimental.ExtensionType 기본 클래스는 표준 Python 라이브러리의 typing.NamedTuple 및 @dataclasses.dataclass 와 유사하게 작동합니다. 특히 필드 유형 주석을 기반으로 생성자와 특수 메서드(예: __repr__ 및 __eq__\n일반적으로 확장 유형은 다음 두 가지 범주 중 하나로 분류되는 경향이 있습니다.\n\n\n관련 값의 컬렉션을 그룹화하고 해당 값을 기반으로 유용한 작업을 제공할 수 있는 데이터 구조. 데이터 구조는 상당히 일반적일 수 있습니다(예 TensorGraph 예). 또는 특정 모델에 고도로 맞춤화될 수 있습니다.\n\n\n\"Tensor\"의 개념을 전문화하거나 확장하는 Tensor와 유사한 유형입니다. 이 범주의 유형에는 rank , shape 및 일반적으로 dtype . tf.stack , tf.add 또는 tf.matmul )과 함께 사용하는 것이 좋습니다. MaskedTensor 및 CSRSparseMatrix 는 텐서 유사 유형의 예입니다.\n\n\n지원되는 API\n확장 유형은 다음 TensorFlow API에서 지원됩니다.\n\nKeras Models 및 Layers 대한 입력 및 출력으로 사용할 수 있습니다.\ntf.data.Dataset : 확장 유형은 Datasets Iterators 의해 반환됩니다.\nTensorflow 허브 tf.hub 모듈의 입력 및 출력으로 사용할 수 있습니다.\nSavedModel SavedModel 함수에 대한 입력 및 출력으로 사용할 수 있습니다.\ntf.function @tf.function 데코레이터로 래핑된 함수의 인수 및 반환 값으로 사용할 수 있습니다.\nwhile 루프 : 확장 유형은 tf.while_loop 에서 루프 변수로 사용할 수 있으며 while 루프 본문에 대한 인수 및 반환 값으로 사용할 수 있습니다.\nconditionals tf.cond 및 tf.case 사용하여 조건부로 선택할 수 있습니다.\npy_function : 확장 유형을 인수로 사용할 수 있고 func 인수에 tf.py_function 반환할 수 있습니다.\nTensor ops tf.matmul , tf.gather 및 tf.reduce_sum )을 허용하는 대부분의 TensorFlow 작업을 지원하도록 확장될 수 있습니다. 자세한 내용은 아래의 \" 디스패치 \" 섹션을 참조하십시오.\n배포 전략 : 확장 유형을 복제본당 값으로 사용할 수 있습니다.\n\n자세한 내용은 아래 \"ExtensionTypes를 지원하는 TensorFlow API\" 섹션을 참조하세요.\n요구 사항\n필드 유형\n모든 필드(일명 인스턴스 변수)를 선언해야 하며 각 필드에 유형 주석을 제공해야 합니다. 다음 유형 주석이 지원됩니다.\n유형 | 예시\n--- | ---\n파이썬 정수 | i: int\n파이썬 수레 | f: float\n파이썬 문자열 | s: str\n파이썬 부울 | b: bool\n파이썬 없음 | n: None\n텐서 모양 | shape: tf.TensorShape\n텐서 dtypes | dtype: tf.DType\n텐서 | t: tf.Tensor\n확장 유형 | mt: MyMaskedTensor\n비정형 텐서 | rt: tf.RaggedTensor\n희소 텐서 | st: tf.SparseTensor\n인덱싱된 슬라이스 | s: tf.IndexedSlices\n선택적 텐서 | o: tf.experimental.Optional\n유형 조합 | int_or_float: typing.Union[int, float]\n튜플 | params: typing.Tuple[int, float, tf.Tensor, int]\n가변 길이 튜플 | lengths: typing.Tuple[int, ...]\n매핑 | tags: typing.Mapping[str, tf.Tensor]\n선택적 값 | weight: typing.Optional[tf.Tensor]\n가변성\n확장 유형은 변경 불가능해야 합니다. 이렇게 하면 TensorFlow의 그래프 추적 메커니즘으로 적절하게 추적할 수 있습니다. 확장 유형 값을 변경하려는 경우 값을 변환하는 메서드를 대신 정의하는 것이 좋습니다. 예를 들어 MaskedTensor 를 변경하기 위해 set_mask MaskedTensor 를 반환하는 replace_mask 메서드를 정의할 수 있습니다.", "class MaskedTensor(tf.experimental.ExtensionType):\n values: tf.Tensor\n mask: tf.Tensor\n\n def replace_mask(self, new_mask):\n self.values.shape.assert_is_compatible_with(new_mask.shape)\n return MaskedTensor(self.values, new_mask)", "ExtensionType 추가한 기능\nExtensionType 기본 클래스는 다음 기능을 제공합니다.\n\n생성자( __init__ ).\n인쇄 가능한 표현 방법( __repr__ ).\n등식 및 부등식 연산자( __eq__ ).\n유효성 검사 방법( __validate__ ).\n강제 불변성.\n중첩된 TypeSpec .\n텐서 API 디스패치 지원.\n\n이 기능을 사용자 정의하는 방법에 대한 자세한 내용은 아래의 \"ExtensionType 사용자 정의\" 섹션을 참조하십시오.\n건설자\nExtensionType 에 의해 추가된 생성자는 각 필드를 명명된 인수로 사용합니다(클래스 정의에 나열된 순서대로). 이 생성자는 각 매개변수를 유형 검사하고 필요한 경우 변환합니다. 특히, Tensor tf.convert_to_tensor 사용하여 변환됩니다. Tuple 필드로 변환됩니다 tuple 의; Mapping 필드는 변경할 수 없는 사전으로 변환됩니다.", "class MaskedTensor(tf.experimental.ExtensionType):\n values: tf.Tensor\n mask: tf.Tensor\n\n# Constructor takes one parameter for each field.\nmt = MaskedTensor(values=[[1, 2, 3], [4, 5, 6]],\n mask=[[True, True, False], [True, False, True]])\n\n# Fields are type-checked and converted to the declared types.\n# E.g., mt.values is converted to a Tensor.\nprint(mt.values)", "필드 값을 선언된 유형으로 변환할 수 없는 경우 생성자는 TypeError", "try:\n MaskedTensor([1, 2, 3], None)\nexcept TypeError as e:\n print(f\"Got expected TypeError: {e}\")", "필드의 기본값은 클래스 수준에서 값을 설정하여 지정할 수 있습니다.", "class Pencil(tf.experimental.ExtensionType):\n color: str = \"black\"\n has_erasor: bool = True\n length: tf.Tensor = 1.0\n\nPencil()\n\nPencil(length=0.5, color=\"blue\")", "인쇄 가능한 표현\nExtensionType 은 클래스 이름과 각 필드의 값을 포함하는 기본 인쇄 가능한 표현 방법( __repr__", "print(MaskedTensor(values=[1, 2, 3], mask=[True, True, False]))", "등호 연산자\nExtensionType 은 유형이 동일하고 모든 필드가 동일한 경우 두 값을 동일하게 간주하는 기본 동등 연산자( __eq__ 및 __ne__ 텐서 필드는 모양이 동일하고 모든 요소에 대해 요소별로 동일한 경우 동일한 것으로 간주됩니다.", "a = MaskedTensor([1, 2], [True, False])\nb = MaskedTensor([[3, 4], [5, 6]], [[False, True], [True, True]])\nprint(f\"a == a: {a==a}\")\nprint(f\"a == b: {a==b}\")\nprint(f\"a == a.values: {a==a.values}\")", "참고: Tensor 가 포함된 경우 __eq__ 는 (Python 부울 값 대신) Tensor 반환할 수 있습니다.\n검증 방법\nExtensionType 은 필드에 대한 유효성 검사를 수행하기 위해 재정의할 수 있는 __validate__ 생성자가 호출되고 필드가 유형 검사되고 선언된 유형으로 변환된 후에 실행되므로 모든 필드에 선언된 유형이 있다고 가정할 수 있습니다.\n다음 예제는 MaskedTensor 를 업데이트하여 해당 필드의 shape s 및 dtype 을 확인합니다.", "class MaskedTensor(tf.experimental.ExtensionType):\n \"\"\"A tensor paired with a boolean mask, indicating which values are valid.\"\"\"\n values: tf.Tensor\n mask: tf.Tensor\n def __validate__(self):\n self.values.shape.assert_is_compatible_with(self.mask.shape)\n assert self.mask.dtype.is_bool, 'mask.dtype must be bool'\n\ntry:\n MaskedTensor([1, 2, 3], [0, 1, 0]) # wrong dtype for mask.\nexcept AssertionError as e:\n print(f\"Got expected AssertionError: {e}\")\n\ntry:\n MaskedTensor([1, 2, 3], [True, False]) # shapes don't match.\nexcept ValueError as e:\n print(f\"Got expected ValueError: {e}\")", "강제 불변성\nExtensionType __setattr__ 및 __delattr__ 메서드를 재정의하여 변형을 방지하여 확장 유형 값을 변경할 수 없도록 합니다.", "mt = MaskedTensor([1, 2, 3], [True, False, True])\n\ntry:\n mt.mask = [True, True, True]\nexcept AttributeError as e:\n print(f\"Got expected AttributeError: {e}\")\n\ntry:\n mt.mask[0] = False\nexcept TypeError as e:\n print(f\"Got expected TypeError: {e}\")\n\ntry:\n del mt.mask\nexcept AttributeError as e:\n print(f\"Got expected AttributeError: {e}\")", "중첩된 유형 사양\n각 ExtensionType 클래스에는 자동으로 생성되고 &lt;extension_type_name&gt;.Spec TypeSpec 클래스가 있습니다.\n이 클래스는 중첩된 텐서의 값을 제외한 값에서 모든 정보를 캡처합니다. 특히 TypeSpec 은 중첩된 Tensor, ExtensionType 또는 CompositeTensor를 TypeSpec 으로 대체하여 생성됩니다.", "class Player(tf.experimental.ExtensionType):\n name: tf.Tensor\n attributes: Mapping[str, tf.Tensor]\n\nanne = Player(\"Anne\", {\"height\": 8.3, \"speed\": 28.1})\nanne_spec = tf.type_spec_from_value(anne)\nprint(anne_spec.name) # Records dtype and shape, but not the string value.\nprint(anne_spec.attributes) # Records keys and TensorSpecs for values.", "TypeSpec 값은 명시적으로 구성하거나 tf.type_spec_from_value 사용하여 ExtensionType 값에서 빌드할 수 있습니다.", "spec1 = Player.Spec(name=tf.TensorSpec([], tf.float32), attributes={})\nspec2 = tf.type_spec_from_value(anne)", "TypeSpec 은 TensorFlow에서 값을 정적 구성 요소 와 동적 구성 요소로 나누는 데 사용됩니다.\n\n그래프 생성 시 고정되는 정적 구성 요소 tf.TypeSpec 인코딩됩니다.\n그래프가 실행될 때마다 다를 수 있는 동적 구성 요소 tf.Tensor 목록으로 인코딩됩니다.\n\n예를 들어, tf.function은 인수에 이전에 볼 수 없는 TypeSpec tf.function .", "@tf.function\ndef anonymize_player(player):\n print(\"<<TRACING>>\")\n return Player(\"<anonymous>\", player.attributes)\n\n# Function gets traced (first time the function has been called):\nanonymize_player(Player(\"Anne\", {\"height\": 8.3, \"speed\": 28.1}))\n\n# Function does NOT get traced (same TypeSpec: just tensor values changed)\nanonymize_player(Player(\"Bart\", {\"height\": 8.1, \"speed\": 25.3}))\n\n# Function gets traced (new TypeSpec: keys for attributes changed):\nanonymize_player(Player(\"Chuck\", {\"height\": 11.0, \"jump\": 5.3}))", "자세한 내용은 tf.function 가이드를 참조하십시오.\nExtensionType 사용자 정의\n단순히 필드와 해당 유형을 선언하는 것 외에도 확장 유형은 다음을 수행할 수 있습니다.\n\n기본 인쇄 가능한 표현( __repr__ )을 재정의합니다.\n방법을 정의합니다.\n클래스 메서드와 정적 메서드를 정의합니다.\n속성을 정의합니다.\n기본 생성자( __init__ )를 재정의합니다.\n기본 항등 연산자( __eq__ )를 재정의합니다.\n연산자를 정의합니다(예: __add__ 및 __lt__ ).\n필드의 기본값을 선언합니다.\n하위 클래스를 정의합니다.\n\n기본 인쇄 가능한 표현 재정의\n확장 유형에 대해 이 기본 문자열 변환 연산자를 재정의할 수 있습니다. 다음 예제에서는 값이 Eager 모드에서 인쇄될 때 더 읽기 쉬운 문자열 표현을 생성 MaskedTensor", "class MaskedTensor(tf.experimental.ExtensionType):\n \"\"\"A tensor paired with a boolean mask, indicating which values are valid.\"\"\"\n values: tf.Tensor\n mask: tf.Tensor # shape=values.shape; false for invalid values.\n\n def __repr__(self):\n return masked_tensor_str(self.values, self.mask)\n\ndef masked_tensor_str(values, mask):\n if isinstance(values, tf.Tensor):\n if hasattr(values, 'numpy') and hasattr(mask, 'numpy'):\n return f'<MaskedTensor {masked_tensor_str(values.numpy(), mask.numpy())}>'\n else:\n return f'MaskedTensor(values={values}, mask={mask})'\n if len(values.shape) == 1:\n items = [repr(v) if m else '_' for (v, m) in zip(values, mask)]\n else:\n items = [masked_tensor_str(v, m) for (v, m) in zip(values, mask)]\n return '[%s]' % ', '.join(items)\n\nmt = MaskedTensor(values=[[1, 2, 3], [4, 5, 6]],\n mask=[[True, True, False], [True, False, True]])\nprint(mt)", "메소드 정의\n확장 유형은 일반 Python 클래스와 마찬가지로 메서드를 정의할 수 있습니다. 예를 들어 MaskedTensor default 대체된 마스킹된 값 self 의 복사본을 반환하는 with_default 메서드를 정의할 수 있습니다. @tf.function 데코레이터로 주석을 달 수 있습니다.", "class MaskedTensor(tf.experimental.ExtensionType):\n values: tf.Tensor\n mask: tf.Tensor\n\n def with_default(self, default):\n return tf.where(self.mask, self.values, default)\n\nMaskedTensor([1, 2, 3], [True, False, True]).with_default(0)", "클래스 메서드 및 정적 메서드 정의\n@classmethod 및 @staticmethod 데코레이터를 사용하여 메소드를 정의할 수 있습니다. 예를 들어 MaskedTensor 유형은 주어진 값으로 모든 요소를 마스킹하는 팩토리 메소드를 정의할 수 있습니다.", "class MaskedTensor(tf.experimental.ExtensionType):\n values: tf.Tensor\n mask: tf.Tensor\n\n def __repr__(self):\n return masked_tensor_str(self.values, self.mask)\n\n @staticmethod\n def from_tensor_and_value_to_mask(values, value_to_mask):\n return MaskedTensor(values, values == value_to_mask)\n\nx = tf.constant([[1, 0, 2], [3, 0, 0]])\nMaskedTensor.from_tensor_and_value_to_mask(x, 0)", "속성 정의\n확장 유형은 일반 Python 클래스와 마찬가지로 @property 데코레이터를 사용하여 속성을 정의할 수 있습니다. 예를 들어 MaskedTensor 유형은 값의 dtype에 대한 약칭인 dtype 속성을 정의할 수 있습니다.", "class MaskedTensor(tf.experimental.ExtensionType):\n values: tf.Tensor\n mask: tf.Tensor\n\n @property\n def dtype(self):\n return self.values.dtype\n\nMaskedTensor([1, 2, 3], [True, False, True]).dtype", "기본 생성자 재정의\n확장 유형에 대한 기본 생성자를 재정의할 수 있습니다. 사용자 정의 생성자는 선언된 모든 필드에 대해 값을 설정해야 합니다. 사용자 정의 생성자가 반환된 후 모든 필드가 유형 검사되고 위에서 설명한 대로 값이 변환됩니다.", "class Toy(tf.experimental.ExtensionType):\n name: str\n price: tf.Tensor\n def __init__(self, name, price, discount=0):\n self.name = name\n self.price = price * (1 - discount)\n\nprint(Toy(\"ball\", 5.0, discount=0.2)) # On sale -- 20% off!", "또는 기본 생성자를 그대로 두고 하나 이상의 팩토리 메소드를 추가하는 것을 고려할 수 있습니다. 예:", "class Toy(tf.experimental.ExtensionType):\n name: str\n price: tf.Tensor\n\n @staticmethod\n def new_toy_with_discount(name, price, discount):\n return Toy(name, price * (1 - discount))\n\nprint(Toy.new_toy_with_discount(\"ball\", 5.0, discount=0.2))", "기본 항등 연산자 재정의( __eq__ )\n확장 유형에 대한 __eq__ 연산자를 재정의할 수 있습니다. 다음 예제에서는 MaskedTensor 비교할 때 마스크된 요소를 무시하도록 MaskedTensor를 업데이트합니다.", "class MaskedTensor(tf.experimental.ExtensionType):\n values: tf.Tensor\n mask: tf.Tensor\n\n def __repr__(self):\n return masked_tensor_str(self.values, self.mask)\n\n def __eq__(self, other):\n result = tf.math.equal(self.values, other.values)\n result = result | ~(self.mask & other.mask)\n return tf.reduce_all(result)\n\nx = MaskedTensor([1, 2, 3, 4], [True, True, False, True])\ny = MaskedTensor([5, 2, 0, 4], [False, True, False, True])\nprint(x == y)", "참고: 기본 구현은 단순히 __eq__ 를 호출하고 결과를 무효화하기 __ne__ 를 재정의할 필요가 없습니다.\n정방향 참조 사용\n필드 유형이 아직 정의되지 않은 경우 유형 이름이 포함된 문자열을 대신 사용할 수 있습니다. 다음 예제에서는 Node 유형이 아직 (완전히) 정의되지 않았기 때문에 \"Node\" children 필드에 주석을 다는 데 사용됩니다.", "class Node(tf.experimental.ExtensionType):\n value: tf.Tensor\n children: Tuple[\"Node\", ...] = ()\n\nNode(3, [Node(5), Node(2)])", "서브클래스 정의\n확장 유형은 표준 Python 구문을 사용하여 하위 분류될 수 있습니다. 확장 유형 하위 클래스는 새 필드, 메서드 및 속성을 추가할 수 있습니다. 생성자, 인쇄 가능한 표현 및 등호 연산자를 재정의할 수 있습니다. 다음 예제는 세 개의 Tensor 필드를 사용하여 노드 사이의 가장자리 집합을 인코딩하는 TensorGraph 그런 다음 각 노드에 대한 \"기능 값\"을 기록하기 위해 Tensor 필드를 추가하는 하위 클래스를 정의합니다. 또한 하위 클래스는 가장자리를 따라 특성 값을 전파하는 방법을 정의합니다.", "class TensorGraph(tf.experimental.ExtensionType):\n num_nodes: tf.Tensor\n edge_src: tf.Tensor # edge_src[e] = index of src node for edge e.\n edge_dst: tf.Tensor # edge_dst[e] = index of dst node for edge e.\n\nclass TensorGraphWithNodeFeature(TensorGraph):\n node_features: tf.Tensor # node_features[n] = feature value for node n.\n\n def propagate_features(self, weight=1.0) -> 'TensorGraphWithNodeFeature':\n updates = tf.gather(self.node_features, self.edge_src) * weight\n new_node_features = tf.tensor_scatter_nd_add(\n self.node_features, tf.expand_dims(self.edge_dst, 1), updates)\n return TensorGraphWithNodeFeature(\n self.num_nodes, self.edge_src, self.edge_dst, new_node_features)\n\ng = TensorGraphWithNodeFeature( # Edges: 0->1, 4->3, 2->2, 2->1\n num_nodes=5, edge_src=[0, 4, 2, 2], edge_dst=[1, 3, 2, 1],\n node_features=[10.0, 0.0, 2.0, 5.0, -1.0, 0.0])\n\nprint(\"Original features:\", g.node_features)\nprint(\"After propagating:\", g.propagate_features().node_features)", "개인 필드 정의\n확장 유형의 필드는 접두사에 밑줄을 붙여 비공개로 표시할 수 있습니다(표준 Python 규칙에 따라). 이것은 TensorFlow가 어떤 식으로든 필드를 처리하는 방식에 영향을 미치지 않습니다. 그러나 단순히 확장 유형의 모든 사용자에게 해당 필드가 비공개라는 신호 역할을 합니다.\nExtensionType의 TypeSpec\n각 ExtensionType 클래스에는 자동으로 생성되고 &lt;extension_type_name&gt;.Spec TypeSpec 클래스가 있습니다. 자세한 내용은 위의 \"중첩된 TypeSpec\" 섹션을 참조하세요.\nTypeSpec 을 사용자 정의하려면 Spec 이라는 자체 중첩 클래스를 정의하기만 하면 ExtensionType 이 이를 자동으로 생성된 TypeSpec 의 기초로 사용합니다. Spec 클래스를 사용자 정의할 수 있습니다.\n\n기본 인쇄 가능한 표현을 재정의합니다.\n기본 생성자를 재정의합니다.\n메서드, 클래스 메서드, 정적 메서드 및 속성을 정의합니다.\n\n다음 예제에서는 사용하기 쉽도록 MaskedTensor.Spec 클래스를 사용자 지정합니다.", "class MaskedTensor(tf.experimental.ExtensionType):\n values: tf.Tensor\n mask: tf.Tensor\n\n shape = property(lambda self: self.values.shape)\n dtype = property(lambda self: self.values.dtype)\n\n def __repr__(self):\n return masked_tensor_str(self.values, self.mask)\n\n def with_values(self, new_values):\n return MaskedTensor(new_values, self.mask)\n\n class Spec:\n def __init__(self, shape, dtype=tf.float32):\n self.values = tf.TensorSpec(shape, dtype)\n self.mask = tf.TensorSpec(shape, tf.bool)\n\n def __repr__(self):\n return f\"MaskedTensor.Spec(shape={self.shape}, dtype={self.dtype})\"\n\n shape = property(lambda self: self.values.shape)\n dtype = property(lambda self: self.values.dtype)", "참고 : 사용자 정의 Spec ExtensionType 선언되지 않은 인스턴스 변수를 사용할 수 없습니다.\n텐서 API 디스패치\ntf.Tensor 유형에 의해 정의된 인터페이스를 전문화하거나 확장한다는 점에서 \"텐서와 유사\"할 수 있습니다. 텐서와 유사한 확장 유형의 예로는 RaggedTensor , SparseTensor 및 MaskedTensor 있습니다. 디스패치 데코레이터 는 텐서와 유사한 확장 유형에 적용될 때 TensorFlow 작업의 기본 동작을 재정의하는 데 사용할 수 있습니다. TensorFlow는 현재 세 가지 디스패치 데코레이터를 정의합니다.\n\n@tf.experimental.dispatch_for_api(tf_api)\n@tf.experimental.dispatch_for_unary_elementwise_api(x_type)\n@tf.experimental.dispatch_for_binary_elementwise_apis(x_type, y_type)\n\n단일 API에 대한 디스패치\ntf.experimental.dispatch_for_api 데코레이터는 지정된 서명으로 호출될 때 지정된 TensorFlow 작업의 기본 동작을 재정의합니다. 예를 들어 이 데코레이터를 사용하여 tf.stack 이 MaskedTensor 값을 처리하는 방법을 지정할 수 있습니다.", "@tf.experimental.dispatch_for_api(tf.stack)\ndef masked_stack(values: List[MaskedTensor], axis = 0):\n return MaskedTensor(tf.stack([v.values for v in values], axis),\n tf.stack([v.mask for v in values], axis))", "MaskedTensor 값 목록과 함께 호출될 때마다 tf.stack 대한 기본 구현을 재정의 values typing.List[MaskedTensor] 주석으로 지정되어 있기 때문입니다):", "x = MaskedTensor([1, 2, 3], [True, True, False])\ny = MaskedTensor([4, 5, 6], [False, True, True])\ntf.stack([x, y])", "tf.stack 이 혼합된 MaskedTensor 및 Tensor 값 목록을 처리할 수 있도록 하려면 values 매개변수에 대한 유형 주석을 구체화하고 함수 본문을 적절하게 업데이트할 수 있습니다.", "tf.experimental.unregister_dispatch_for(masked_stack)\n\ndef convert_to_masked_tensor(x):\n if isinstance(x, MaskedTensor):\n return x\n else:\n return MaskedTensor(x, tf.ones_like(x, tf.bool))\n\n@tf.experimental.dispatch_for_api(tf.stack)\ndef masked_stack_v2(values: List[Union[MaskedTensor, tf.Tensor]], axis = 0):\n values = [convert_to_masked_tensor(v) for v in values]\n return MaskedTensor(tf.stack([v.values for v in values], axis),\n tf.stack([v.mask for v in values], axis))\nx = MaskedTensor([1, 2, 3], [True, True, False])\ny = tf.constant([4, 5, 6])\ntf.stack([x, y, x])", "재정의할 수 있는 API 목록은 tf.experimental.dispatch_for_api 대한 API 설명서를 참조하세요.\n모든 단항 요소별 API에 대한 디스패치\ntf.experimental.dispatch_for_unary_elementwise_apis 데코레이터는 첫 번째 인수(일반적으로 이름이 x )에 대한 값이 유형 주석 x_type 과 일치할 때마다 모든 단항 요소별 연산(예: tf.math.cos )의 기본 동작을 재정의합니다. 데코레이팅된 함수는 두 개의 인수를 취해야 합니다.\n\napi_func : 단일 매개변수를 취하고 요소별 연산을 수행하는 함수(예: tf.abs ).\nx : 요소별 연산의 첫 번째 인수입니다.\n\nMaskedTensor 유형을 처리하기 위해 모든 단항 요소별 연산을 업데이트합니다.", " @tf.experimental.dispatch_for_unary_elementwise_apis(MaskedTensor)\n def masked_tensor_unary_elementwise_api_handler(api_func, x):\n return MaskedTensor(api_func(x.values), x.mask)", "MaskedTensor 에서 단항 요소별 연산이 호출될 때마다 사용됩니다.", " x = MaskedTensor([1, -2, -3], [True, False, True])\n print(tf.abs(x))\n\nprint(tf.ones_like(x, dtype=tf.float32))", "바이너리 모든 요소별 API에 대한 디스패치\n마찬가지로 tf.experimental.dispatch_for_binary_elementwise_apis MaskedTensor 유형을 처리하기 위해 모든 바이너리 요소별 연산을 업데이트하는 데 사용할 수 있습니다.", "@tf.experimental.dispatch_for_binary_elementwise_apis(MaskedTensor, MaskedTensor)\ndef masked_tensor_binary_elementwise_api_handler(api_func, x, y):\n return MaskedTensor(api_func(x.values, y.values), x.mask & y.mask)\n\nx = MaskedTensor([1, -2, -3], [True, False, True])\ny = MaskedTensor([[4], [5]], [[True], [False]])\ntf.math.add(x, y)", "재정의되는 요소별 API 목록은 tf.experimental.dispatch_for_unary_elementwise_apis 및 tf.experimental.dispatch_for_binary_elementwise_apis 대한 API 문서를 참조하세요.\n일괄 처리 가능한 확장 유형\nExtensionType 단일 인스턴스 값의 배치를 나타내는 데 사용할 수있는 경우 batchable이다. Tensor 배치 차원을 추가하여 수행됩니다. 다음 TensorFlow API를 사용하려면 모든 확장 유형 입력이 일괄 처리 가능해야 합니다.\n\ntf.data.Dataset ( batch , unbatch , from_tensor_slices )\ntf.Keras ( fit , evaluate , predict )\ntf.map_fn\n\n기본적으로 BatchableExtensionType Tensor , CompositeTensor 및 ExtensionType 일괄 처리하여 일괄 처리된 값을 생성합니다. 이것이 클래스에 적합하지 않은 경우 tf.experimental.ExtensionTypeBatchEncoder 를 사용하여 이 기본 동작을 재정의해야 합니다. 예를 들어, 개별 희소 텐서의 values , indices 및 dense_shape tf.SparseTensor 값의 배치를 만드는 것은 적절하지 않습니다. 대부분의 경우 이러한 텐서는 호환되지 않는 모양을 가지고 있기 때문에 스택할 수 없습니다. ; 가능하더라도 결과는 유효한 SparseTensor .\n참고 : BatchableExtensionType tf.stack , tf.concat , tf.slice 등에 대한 디스패처를 자동으로 정의하지 않습니다 . 이러한 API에서 클래스를 지원해야 하는 경우 위에서 설명한 디스패치 데코레이터를 사용하세요.\nBatchableExtensionType 예: 네트워크\nNetwork 클래스를 생각해 보십시오. 이 클래스는 각 노드에서 수행해야 할 작업의 양과 노드 간에 작업을 이동하는 데 사용할 수 있는 대역폭을 추적합니다.", "class Network(tf.experimental.ExtensionType): # This version is not batchable.\n work: tf.Tensor # work[n] = work left to do at node n\n bandwidth: tf.Tensor # bandwidth[n1, n2] = bandwidth from n1->n2\n\nnet1 = Network([5., 3, 8], [[0., 2, 0], [2, 0, 3], [0, 3, 0]])\nnet2 = Network([3., 4, 2], [[0., 2, 2], [2, 0, 2], [2, 2, 0]])", "이 유형을 일괄 처리 가능하게 만들려면 기본 유형을 BatchableExtensionType 변경하고 선택적 일괄 처리 차원을 포함하도록 각 필드의 모양을 조정합니다. 다음 예제에서는 배치 모양을 추적하기 shape 필드도 추가합니다. 이 shape 필드는 필요로하지 않는 tf.data.Dataset 또는 tf.map_fn 있지만 요구하는 tf.Keras .", "class Network(tf.experimental.BatchableExtensionType):\n shape: tf.TensorShape # batch shape. A single network has shape=[].\n work: tf.Tensor # work[*shape, n] = work left to do at node n\n bandwidth: tf.Tensor # bandwidth[*shape, n1, n2] = bandwidth from n1->n2\n\n def __init__(self, work, bandwidth):\n self.work = tf.convert_to_tensor(work)\n self.bandwidth = tf.convert_to_tensor(bandwidth)\n work_batch_shape = self.work.shape[:-1]\n bandwidth_batch_shape = self.bandwidth.shape[:-2]\n self.shape = work_batch_shape.merge_with(bandwidth_batch_shape)\n\n def __repr__(self):\n return network_repr(self)\n\ndef network_repr(network):\n work = network.work\n bandwidth = network.bandwidth\n if hasattr(work, 'numpy'):\n work = ' '.join(str(work.numpy()).split())\n if hasattr(bandwidth, 'numpy'):\n bandwidth = ' '.join(str(bandwidth.numpy()).split())\n return (f\"<Network shape={network.shape} work={work} bandwidth={bandwidth}>\")\n\nnet1 = Network([5., 3, 8], [[0., 2, 0], [2, 0, 3], [0, 3, 0]])\nnet2 = Network([3., 4, 2], [[0., 2, 2], [2, 0, 2], [2, 2, 0]])\nbatch_of_networks = Network(\n work=tf.stack([net1.work, net2.work]),\n bandwidth=tf.stack([net1.bandwidth, net2.bandwidth]))\nprint(f\"net1={net1}\")\nprint(f\"net2={net2}\")\nprint(f\"batch={batch_of_networks}\")", "tf.data.Dataset 을 사용하여 네트워크 배치를 반복할 수 있습니다.", "dataset = tf.data.Dataset.from_tensor_slices(batch_of_networks)\nfor i, network in enumerate(dataset):\n print(f\"Batch element {i}: {network}\")", "map_fn 을 사용하여 각 배치 요소에 함수를 적용할 수도 있습니다.", "def balance_work_greedy(network):\n delta = (tf.expand_dims(network.work, -1) - tf.expand_dims(network.work, -2))\n delta /= 4\n delta = tf.maximum(tf.minimum(delta, network.bandwidth), -network.bandwidth)\n new_work = network.work + tf.reduce_sum(delta, -1)\n return Network(new_work, network.bandwidth)\n\ntf.map_fn(balance_work_greedy, batch_of_networks)", "ExtensionTypes를 지원하는 TensorFlow API\n@tf.function\ntf.function 은 TensorFlow 코드의 성능을 크게 향상시킬 수 있는 Python 함수용 TensorFlow 그래프를 미리 계산하는 데코레이터입니다. @tf.function 함수와 함께 투명하게 사용할 수 있습니다.", "class Pastry(tf.experimental.ExtensionType):\n sweetness: tf.Tensor # 2d embedding that encodes sweetness\n chewiness: tf.Tensor # 2d embedding that encodes chewiness\n\n@tf.function\ndef combine_pastry_features(x: Pastry):\n return (x.sweetness + x.chewiness) / 2\n\ncookie = Pastry(sweetness=[1.2, 0.4], chewiness=[0.8, 0.2])\ncombine_pastry_features(cookie)", "input_signature 대해 tf.function 를 명시적으로 지정 TypeSpec 사용하여 지정할 수 있습니다.", "pastry_spec = Pastry.Spec(tf.TensorSpec([2]), tf.TensorSpec(2))\n\n@tf.function(input_signature=[pastry_spec])\ndef increase_sweetness(x: Pastry, delta=1.0):\n return Pastry(x.sweetness + delta, x.chewiness)\n\nincrease_sweetness(cookie)", "구체적인 기능\ntf.function 의해 구축된 개별 추적 그래프를 캡슐화합니다. 확장 유형은 구체적인 기능과 함께 투명하게 사용할 수 있습니다.", "cf = combine_pastry_features.get_concrete_function(pastry_spec)\ncf(cookie)", "제어 흐름 작업\n확장 유형은 TensorFlow의 제어 흐름 작업에서 지원됩니다.\n\ntf.cond\ntf.case\ntf.while_loop\ntf.identity", "# Example: using tf.cond to select between two MaskedTensors. Note that the\n# two MaskedTensors don't need to have the same shape.\na = MaskedTensor([1., 2, 3], [True, False, True])\nb = MaskedTensor([22., 33, 108, 55], [True, True, True, False])\ncondition = tf.constant(True)\nprint(tf.cond(condition, lambda: a, lambda: b))\n\n# Example: using tf.while_loop with MaskedTensor.\ncond = lambda i, _: i < 10\ndef body(i, mt):\n return i + 1, mt.with_values(mt.values + 3 / 7)\nprint(tf.while_loop(cond, body, [0, b])[1])", "사인 제어 흐름\n확장 유형은 tf.function의 제어 흐름 문에서도 지원됩니다(autograph 사용). 다음 예에서 if 문과 for 문은 확장 유형을 지원 tf.cond 및 tf.while_loop 작업으로 자동 변환됩니다.", "@tf.function\ndef fn(x, b):\n if b:\n x = MaskedTensor(x, tf.less(x, 0))\n else:\n x = MaskedTensor(x, tf.greater(x, 0))\n for i in tf.range(5 if b else 7):\n x = x.with_values(x.values + 1 / 2)\n return x\n\nprint(fn(tf.constant([1., -2, 3]), tf.constant(True)))\nprint(fn(tf.constant([1., -2, 3]), tf.constant(False)))", "케라스\ntf.keras 는 딥 러닝 모델을 구축하고 훈련하기 위한 TensorFlow의 고급 API입니다. 확장 유형은 Keras 모델에 대한 입력으로 전달되고, Keras 계층 간에 전달되고, Keras 모델에서 반환될 수 있습니다. Keras는 현재 확장 유형에 두 가지 요구 사항을 적용합니다.\n\n배치 가능해야 합니다(위의 \"배치 가능한 ExtensionType\" 참조).\nshape 이라는 필드 또는 속성이 있어야 합니다. shape[0] 은 배치 차원으로 간주됩니다.\n\n다음 두 하위 섹션에서는 확장 유형을 Keras와 함께 사용하는 방법을 보여주는 예를 제공합니다.\nKeras 예: Network\n첫 번째 예에서는 노드 간의 부하 분산 작업에 사용할 수 있는 위의 \"Batchable ExtensionTypes\" 섹션에 정의된 Network 그 정의는 여기에서 반복됩니다.", "class Network(tf.experimental.BatchableExtensionType):\n shape: tf.TensorShape # batch shape. A single network has shape=[].\n work: tf.Tensor # work[*shape, n] = work left to do at node n\n bandwidth: tf.Tensor # bandwidth[*shape, n1, n2] = bandwidth from n1->n2\n\n def __init__(self, work, bandwidth):\n self.work = tf.convert_to_tensor(work)\n self.bandwidth = tf.convert_to_tensor(bandwidth)\n work_batch_shape = self.work.shape[:-1]\n bandwidth_batch_shape = self.bandwidth.shape[:-2]\n self.shape = work_batch_shape.merge_with(bandwidth_batch_shape)\n\n def __repr__(self):\n return network_repr(self)\n\nsingle_network = Network( # A single network w/ 4 nodes.\n work=[8.0, 5, 12, 2],\n bandwidth=[[0.0, 1, 2, 2], [1, 0, 0, 2], [2, 0, 0, 1], [2, 2, 1, 0]])\n\nbatch_of_networks = Network( # Batch of 2 networks, each w/ 2 nodes.\n work=[[8.0, 5], [3, 2]],\n bandwidth=[[[0.0, 1], [1, 0]], [[0, 2], [2, 0]]])", "Network 를 처리하는 새로운 Keras 계층을 정의할 수 있습니다.", "class BalanceNetworkLayer(tf.keras.layers.Layer):\n \"\"\"Layer that balances work between nodes in a network.\n\n Shifts work from more busy nodes to less busy nodes, constrained by bandwidth.\n \"\"\"\n def call(self, inputs):\n # This function is defined above, in \"Batchable ExtensionTypes\" section.\n return balance_work_greedy(inputs)", "그런 다음 이 레이어를 사용하여 간단한 모델을 만들 수 있습니다. ExtensionType 을 모델에 제공하려면 type_spec 이 확장 유형의 TypeSpec tf.keras.layer.Input 레이어를 사용할 수 있습니다. Keras 모델을 사용하여 배치를 처리하는 경우 type_spec 에 배치 차원이 포함되어야 합니다.", "input_spec = Network.Spec(shape=None,\n work=tf.TensorSpec(None, tf.float32),\n bandwidth=tf.TensorSpec(None, tf.float32))\nmodel = tf.keras.Sequential([\n tf.keras.layers.Input(type_spec=input_spec),\n BalanceNetworkLayer(),\n ])", "마지막으로 단일 네트워크와 네트워크 배치에 모델을 적용할 수 있습니다.", "model(single_network)\n\nmodel(batch_of_networks)", "케라스 예시: MaskedTensor\n이 예에서 MaskedTensor Keras 를 지원하도록 확장되었습니다. shape values 필드에서 계산되는 속성으로 정의됩니다. TypeSpec 모두에 이 속성을 추가해야 합니다. MaskedTensor SavedModel 직렬화에 필요한 __name__ 변수도 정의합니다(아래 참조).", "class MaskedTensor(tf.experimental.BatchableExtensionType):\n # __name__ is required for serialization in SavedModel; see below for details.\n __name__ = 'extension_type_colab.MaskedTensor'\n\n values: tf.Tensor\n mask: tf.Tensor\n\n shape = property(lambda self: self.values.shape)\n dtype = property(lambda self: self.values.dtype)\n\n def with_default(self, default):\n return tf.where(self.mask, self.values, default)\n\n def __repr__(self):\n return masked_tensor_str(self.values, self.mask)\n\n class Spec:\n def __init__(self, shape, dtype=tf.float32):\n self.values = tf.TensorSpec(shape, dtype)\n self.mask = tf.TensorSpec(shape, tf.bool)\n\n shape = property(lambda self: self.values.shape)\n dtype = property(lambda self: self.values.dtype)\n\n def with_shape(self):\n return MaskedTensor.Spec(tf.TensorSpec(shape, self.values.dtype),\n tf.TensorSpec(shape, self.mask.dtype))", "다음으로 디스패치 데코레이터는 여러 TensorFlow API의 기본 동작을 재정의하는 데 사용됩니다. 이러한 API는 표준 Keras 레이어(예: Dense MaskedTensor 와 함께 해당 레이어를 사용할 수 있습니다. 이 예의 목적을 matmul 은 마스킹된 값을 0으로 처리하도록 정의됩니다(즉, 제품에 포함하지 않기 위해).", "@tf.experimental.dispatch_for_unary_elementwise_apis(MaskedTensor)\ndef unary_elementwise_op_handler(op, x):\n return MaskedTensor(op(x.values), x.mask)\n\n@tf.experimental.dispatch_for_binary_elementwise_apis(\n Union[MaskedTensor, tf.Tensor],\n Union[MaskedTensor, tf.Tensor])\ndef binary_elementwise_op_handler(op, x, y):\n x = convert_to_masked_tensor(x)\n y = convert_to_masked_tensor(y)\n return MaskedTensor(op(x.values, y.values), x.mask & y.mask)\n\n@tf.experimental.dispatch_for_api(tf.matmul)\ndef masked_matmul(a: MaskedTensor, b,\n transpose_a=False, transpose_b=False,\n adjoint_a=False, adjoint_b=False,\n a_is_sparse=False, b_is_sparse=False,\n output_type=None):\n if isinstance(a, MaskedTensor):\n a = a.with_default(0)\n if isinstance(b, MaskedTensor):\n b = b.with_default(0)\n return tf.matmul(a, b, transpose_a, transpose_b, adjoint_a,\n adjoint_b, a_is_sparse, b_is_sparse, output_type)", "그런 다음 표준 Keras 레이어를 사용하여 MaskedTensor 입력을 허용하는 Keras 모델을 구성할 수 있습니다.", "input_spec = MaskedTensor.Spec([None, 2], tf.float32)\n\nmasked_tensor_model = tf.keras.Sequential([\n tf.keras.layers.Input(type_spec=input_spec),\n tf.keras.layers.Dense(16, activation=\"relu\"),\n tf.keras.layers.Dense(1)])\nmasked_tensor_model.compile(loss='binary_crossentropy', optimizer='rmsprop')\n\na = MaskedTensor([[1., 2], [3, 4], [5, 6]],\n [[True, False], [False, True], [True, True]])\nmasked_tensor_model.fit(a, tf.constant([[1], [0], [1]]), epochs=3)\nprint(masked_tensor_model(a))", "저장된 모델\nSavedModel 은 가중치와 계산을 모두 포함하는 직렬화된 TensorFlow 프로그램입니다. Keras 모델 또는 사용자 지정 모델에서 구축할 수 있습니다. 두 경우 모두 확장 유형은 SavedModel에 의해 정의된 함수 및 메소드와 함께 투명하게 사용될 수 있습니다.\n__name__ 필드가 있는 한 확장 유형을 처리하는 모델, 계층 및 함수를 저장할 수 있습니다. 이 이름은 확장 유형을 등록하는 데 사용되므로 모델을 로드할 때 찾을 수 있습니다.\n예: Keras 모델 저장\n확장 유형을 사용하는 SavedModel 사용하여 저장할 수 있습니다.", "masked_tensor_model_path = tempfile.mkdtemp()\ntf.saved_model.save(masked_tensor_model, masked_tensor_model_path)\nimported_model = tf.saved_model.load(masked_tensor_model_path)\nimported_model(a)", "예: 사용자 정의 모델 저장\nSavedModel은 확장 유형을 처리하는 함수로 tf.Module 하위 클래스를 저장하는 데 사용할 수도 있습니다.", "class CustomModule(tf.Module):\n def __init__(self, variable_value):\n super().__init__()\n self.v = tf.Variable(variable_value)\n\n @tf.function\n def grow(self, x: MaskedTensor):\n \"\"\"Increase values in `x` by multiplying them by `self.v`.\"\"\"\n return MaskedTensor(x.values * self.v, x.mask)\n\nmodule = CustomModule(100.0)\n\nmodule.grow.get_concrete_function(MaskedTensor.Spec(shape=None,\n dtype=tf.float32))\ncustom_module_path = tempfile.mkdtemp()\ntf.saved_model.save(module, custom_module_path)\nimported_model = tf.saved_model.load(custom_module_path)\nimported_model.grow(MaskedTensor([1., 2, 3], [False, True, False]))", "ExtensionType을 사용할 수 없을 때 저장된 모델 로드\nExtensionType 을 사용하는 SavedModel 을 로드하지만 해당 ExtensionType 사용할 수 없는 경우(즉, 가져오지 않은 경우) 경고가 표시되고 TensorFlow는 \"익명 확장 유형\" 개체를 사용하도록 대체합니다. 이 개체는 원래 유형과 동일한 필드를 갖지만 사용자 정의 메소드 또는 속성과 같이 유형에 추가한 추가 사용자 정의가 부족합니다.\nTensorFlow 제공과 함께 ExtensionType 사용\n현재 TensorFlow 서비스 (및 SavedModel \"서명\" 사전의 다른 소비자)는 모든 입력 및 출력이 원시 텐서가 되어야 합니다. 확장 유형을 사용하는 모델과 함께 TensorFlow 서비스를 사용하려는 경우 텐서에서 확장 유형 값을 구성하거나 분해하는 래퍼 메서드를 추가할 수 있습니다. 예:", "class CustomModuleWrapper(tf.Module):\n def __init__(self, variable_value):\n super().__init__()\n self.v = tf.Variable(variable_value)\n\n @tf.function\n def var_weighted_mean(self, x: MaskedTensor):\n \"\"\"Mean value of unmasked values in x, weighted by self.v.\"\"\"\n x = MaskedTensor(x.values * self.v, x.mask)\n return (tf.reduce_sum(x.with_default(0)) /\n tf.reduce_sum(tf.cast(x.mask, x.dtype)))\n\n @tf.function()\n def var_weighted_mean_wrapper(self, x_values, x_mask):\n \"\"\"Raw tensor wrapper for var_weighted_mean.\"\"\"\n return self.var_weighted_mean(MaskedTensor(x_values, x_mask))\n\nmodule = CustomModuleWrapper([3., 2., 8., 5.])\n\nmodule.var_weighted_mean_wrapper.get_concrete_function(\n tf.TensorSpec(None, tf.float32), tf.TensorSpec(None, tf.bool))\ncustom_module_path = tempfile.mkdtemp()\ntf.saved_model.save(module, custom_module_path)\nimported_model = tf.saved_model.load(custom_module_path)\nx = MaskedTensor([1., 2., 3., 4.], [False, True, False, True])\nimported_model.var_weighted_mean_wrapper(x.values, x.mask)", "데이터세트\ntf.data 는 간단하고 재사용 가능한 부분으로 복잡한 입력 파이프라인을 구축할 수 있는 API입니다. 핵심 데이터 구조는 tf.data.Dataset 이며, 이는 각 요소가 하나 이상의 구성 요소로 구성된 일련의 요소를 나타냅니다.\n확장 유형으로 데이터세트 빌드\nDataset.from_tensors , Dataset.from_tensor_slices 또는 Dataset.from_generator 사용하여 확장 유형 값에서 데이터 세트를 빌드할 수 있습니다.", "ds = tf.data.Dataset.from_tensors(Pastry(5, 5))\niter(ds).next()\n\nmt = MaskedTensor(tf.reshape(range(20), [5, 4]), tf.ones([5, 4]))\nds = tf.data.Dataset.from_tensor_slices(mt)\nfor value in ds:\n print(value)\n\ndef value_gen():\n for i in range(2, 7):\n yield MaskedTensor(range(10), [j%i != 0 for j in range(10)])\n\nds = tf.data.Dataset.from_generator(\n value_gen, output_signature=MaskedTensor.Spec(shape=[10], dtype=tf.int32))\nfor value in ds:\n print(value)", "확장 유형이 있는 데이터 세트 일괄 처리 및 일괄 해제\n확장 유형의 데이터 세트를 사용하여 batchand 및 unbatched 수 있습니다 Dataset.batch ADN Dataset.unbatch .", "batched_ds = ds.batch(2)\nfor value in batched_ds:\n print(value)\n\nunbatched_ds = batched_ds.unbatch()\nfor value in unbatched_ds:\n print(value)" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
hethapu/big-data-python-class
Lectures/Lecture6-Streams/SGD TESTING.ipynb
mit
[ "Stochastic Gradient Descent\ndiscriminative learning of linear classifiers under convex loss functions such as (linear) Support Vector Machines and Logistic Regression. \nSGD has been successfully applied to large-scale and sparse machine learning problems often encountered in text classification and natural language processing. \nGiven that the data is sparse, the classifiers in this module easily scale to problems with more than 10^5 training examples and more than 10^5 features.\nThe advantages of Stochastic Gradient Descent are:\n<ol>\n<li>Efficiency.\n<li>Ease of implementation (lots of opportunities for code tuning).\n</ol>\n\nThe disadvantages of Stochastic Gradient Descent include:\n<ol>\n<li>SGD requires a number of hyperparameters such as the regularization parameter and the number of iterations.\n<li>SGD is sensitive to feature scaling.\n</ol>\n\nhttp://scikit-learn.org/stable/modules/sgd.html\nThe class SGDClassifier implements a plain stochastic gradient descent learning routine which supports different loss functions and penalties for classification.", "from sklearn.linear_model import SGDClassifier\nX = [[0., 0.], [1., 1.]] #N_samples, N_features - training samples\ny = [0, 1] # n_samples - target values for training the samples\nclf = SGDClassifier(loss=\"hinge\", penalty=\"l2\")\nclf.fit(X, y)\n\n\nclf.predict([[2.,2.]]) # predict new value\n\nclf.coef_ # fits a linear model to the training data , coef_ holds the model parameters\n\nclf.intercept_ # contains the offset of the linear equation\n\n clf.decision_function([[2., 2.]]) ", "The concrete loss function can be set via the loss parameter. SGDClassifier supports the following loss functions:\n<ul>\n<li>loss=\"hinge\": (soft-margin) linear Support Vector Machine,\n<li>loss=\"modified_huber\": smoothed hinge loss,\n<li>loss=\"log\": logistic regression,\n<li>and all regression losses below.\n</ul>\n\nThe first two loss functions are lazy, they only update the model parameters if an example violates the margin constraint, which makes training very efficient and may result in sparser models, even when L2 penalty is used.\nUsing loss=\"log\" or loss=\"modified_huber\" enables the predict_proba method, which gives a vector of probability estimates P(y|x) per sample x:", "clf = SGDClassifier(loss=\"log\").fit(X, y)\nclf.predict_proba([[1., 1.]]) ", "The default setting is penalty=\"l2\". The L1 penalty leads to sparse solutions, driving most coefficients to zero. The Elastic Net solves some deficiencies of the L1 penalty in the presence of highly correlated attributes. The parameter l1_ratio controls the convex combination of L1 and L2 penalty.", "%matplotlib inline\n# SGD: Maximum Margin Separating hyperplan\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn.linear_model import SGDClassifier\nfrom sklearn.datasets.samples_generator import make_blobs\n\n# we create 50 separable points\nX, Y = make_blobs(n_samples=50, centers=2, random_state=0, cluster_std=0.60)\n\n# fit the model\nclf = SGDClassifier(loss=\"hinge\", alpha=0.01, n_iter=200, fit_intercept=True)\nclf.fit(X, Y)\n\n# plot the line, the points, and the nearest vectors to the plane\nxx = np.linspace(-1, 5, 10)\nyy = np.linspace(-1, 5, 10)\n\nX1, X2 = np.meshgrid(xx, yy)\nZ = np.empty(X1.shape)\nfor (i, j), val in np.ndenumerate(X1):\n x1 = val\n x2 = X2[i, j]\n p = clf.decision_function([x1, x2])\n Z[i, j] = p[0]\nlevels = [-1.0, 0.0, 1.0]\nlinestyles = ['dashed', 'solid', 'dashed']\ncolors = 'k'\nplt.contour(X1, X2, Z, levels, colors=colors, linestyles=linestyles)\nplt.scatter(X[:, 0], X[:, 1], c=Y, cmap=plt.cm.Paired)\n\nplt.axis('tight')\nplt.show()\n" ]
[ "markdown", "code", "markdown", "code", "markdown", "code" ]
TeamEmily/Emily_server
customKonlpy/tutorials/usage_of_templatetagger.ipynb
mit
[ "KoNLPy의 트위터 한국어 분석기 (현 오픈 한국어 분석기)는 속도도 빠르고 다양한 사전도 확보하고 있는 한국어 분석기입니다. 하지만 컴파일이 되어 있는 형태로 KoNLPy에 들어가 있기 때문에 사용자 사전의 추가가 힘들고*, 내가 원하는 임의의 태그를 지정할 수 없습니다. \ncustomized_KoNLPy는 확실히 알고 있는 단어들에 대해서는 라이브러리를 거치지 않고 주어진 어절을 아는 단어들로 토크나이징 / 품사판별을 하는 기능을 제공합니다. 이를 위해 template 기반 토크나이징을 수행합니다. \n사전: {'아이오아이': 'Noun', '는': 'Josa'}\n탬플릿: Noun + Josa\n\n위와 같은 단어 리스트와 탬플릿이 있다면 '아이오아이는' 이라는 어절은 [('아이오아이', 'Noun'), ('는', 'Josa')]로 분리됩니다. \n* Scala 코드를 이용할 경우에는 사용자 사전의 추가가 매우 쉽습니다 참고\nKoNLPy의 버전은 0.4.4 기준입니다. KoNLPy의 Twitter를 이용하여 '우리아이오아이는 정말 이뻐요'라는 문장을 처리하면 '아이오' + '아이'로 명사가 잘못 인식됩니다. 트와이스의 'tt' 역시 명사보다는 영어로 인식됩니다. 한국어 분석기 이지만, tt는 명사로 미리 분류하고 싶습니다.", "import konlpy\nkonlpy.__version__\n\nfrom konlpy.tag import Twitter as OriginalTwitter\n\ntwitter_original = OriginalTwitter()\nprint(twitter_original.pos('우리아이오아이는 정말 이뻐요'),'\\n')\nprint(twitter_original.pos('트둥이꺼 tt도 좋아요'))", "customized_KoNLPy 에는 현재 트위터 한국어 분석기 만을 이용하는 wrapping class만 제공되고 있습니다. customized_KoNLPy의 Twitter는 본래 KoNLPy의 tag에 추가되는 함수가 있습니다. \nTwitter.add_dictionary(words, tag)는 사용자가 사전을 추가할 수 있는 부분입니다. 단어를 하나씩 추가할 수 있습니다. 추가한 뒤 Twitter의 숨김 변수인 _dictionary._pos2words를 확인해보면 입력한 단어들을 볼 수 있습니다. \ngit clone을 한 상태에서 tutorial code를 이용하신다면 아래의 코드를 실행하여 path를 추가하십시요", "import sys\nsys.path.append('../')\n\nfrom ckonlpy.tag import Twitter\n\ntwitter = Twitter()\n\ntwitter.add_dictionary('이', 'Modifier')\ntwitter.add_dictionary('우리', 'Modifier')\ntwitter.add_dictionary('이번', 'Modifier')\ntwitter.add_dictionary('아이오아이', 'Noun')\ntwitter.add_dictionary('행사', 'Noun')\ntwitter.add_dictionary('아이', 'Noun')\ntwitter.add_dictionary('번것', 'Noun')\ntwitter.add_dictionary('것', 'Noun')\ntwitter.add_dictionary('은', 'Josa')\ntwitter.add_dictionary('는', 'Josa')\ntwitter._dictionary._pos2words", "사전을 추가한 뒤, '아이오아이'가 명사로 제대로 인식됨을 확인할 수 있습니다.", "twitter.pos('우리아이오아이는 정말 이뻐요')\n\ntwitter.pos('아이오아이 이뻐요')", "사전을 추가할 때, 하나의 품사에 대하여 동시에 여러 개의 단어셋을 입력할 수도 있습니다.\nTwitter.add_dictionary(words, tag)는 한번에 list of str 형식의 여러 개의 단어들을 입력할 수도 있습니다.", "twitter.add_dictionary(['트와이스', 'tt', '트둥이', '꺼', '우리'], 'Noun')\ntwitter._dictionary._pos2words\n\ntwitter.pos('트와이스tt는 좋아요')", "트위터 분석기의 조사사전을 이용할 수도 있습니다. Twitter()를 만들 때 argument를 넣을 수 있습니다.", "twitter1 = Twitter(load_default_dictionary=True)\nlen(twitter1._dictionary._pos2words['Josa'])", "하지만 아직 '우리트둥이꺼tt는' 이라는 어절이 제대로 인식되지 않습니다. 그 이유는 templates에 'Noun + Noun + Josa'가 없었기 때문입니다. 이 경우에는 KoNLPy에 해당 어절을 분석하라고 보냅니다. 하지만 '트둥이'라는 단어를 알지 못해서 제대로 인식되지 않습니다.", "twitter.pos('우리트둥이꺼tt는 좋아요')", "현재는 customized_tagger로 탬플릿 기반 토크나이저를 이용하고 있습니다. 어떤 탬플릿이 들어있는지 확인하기 위해서는 아래 부분을 확인하면 됩니다. \ntwitter._customized_tagger.templates\n\n현재는 다음의 탬플릿이 입력되어 있습니다.", "twitter._customized_tagger.templates", "기본 탬플릿은 customized_konlpy/data/templates/twitter_templates0 에 저장되어 있습니다. text 형식의 파일이며, 띄어쓰기로 아래와 같은 기본 템플릿을 지정하면 됩니다.", "cat ../ckonlpy/data/templates/twitter_templates0", "작업 중 탬플릿을 추가하고 싶다면, 탬플릿은 하나 단위로 tuple of str의 형식으로 입력할 수 있습니다. _customized_tagger.add_a_templated()은 중복되는 탬플릿이 아닌지 확인한 다음 탬플릿을 추가하는 함수입니다.", "twitter._customized_tagger.add_a_template(('Modifier', 'Noun', 'Noun', 'Noun', 'Josa'))\ntwitter._customized_tagger.templates", "('Noun', 'Noun', 'Josa')가 입력되었고, '트와이스', 'tt'가 명사인지 알고 있기 때문에 아래 문장은 제대로 인식이 됩니다.", "twitter.pos('우리트둥이꺼tt는 좋아요')", "사전을 추가할 때, 트위터 한국어 분석기에 존재하지 않는 태그가 들어가는 것을 방지하기 위해 tag의 값을 확인하는 부분이 구현되어 있습니다. \ntwitter.tagset\n\n&gt;&gt;&gt; {'Adjective': '형용사',\n 'Adverb': '부사',\n 'Alpha': '알파벳',\n 'Conjunction': '접속사',\n 'Determiner': '관형사',\n 'Eomi': '어미',\n 'Exclamation': '감탄사',\n 'Foreign': '외국어, 한자 및 기타기호',\n 'Hashtag': '트위터 해쉬태그',\n 'Josa': '조사',\n 'KoreanParticle': '(ex: ㅋㅋ)',\n 'Modifier': '관형사',\n 'Noun': '명사',\n 'Number': '숫자',\n 'PreEomi': '선어말어미',\n 'Punctuation': '구두점',\n 'ScreenName': '트위터 아이디',\n 'Suffix': '접미사',\n 'Unknown': '미등록어',\n 'Verb': '동사'}\n\ntwitter.tagset에 등록되어 있지 않는 품사에 대해서는 ValueError를 raise 합니다.", "twitter.add_dictionary('lovit', 'Name')", "하지만 Twitter.add_dictionary(words, tag, force=True)로 단어를 사전에 입력하면 알려지지 않은 품사라 하더라도 입력할 수 있습니다.", "twitter.add_dictionary('lovit', 'Name', force=True)\ntwitter._dictionary._pos2words", "'Name'이라는 클래스 (더이상 품사가 아니므로)를 이용하는 탬플릿을 하나 입력한 뒤 pos에 입력하면 어절 'lovit은' customized_tagger에 의하여 처리가 되고, 사용자 사전에 알려지지 않은 어절인 '졸려'는 본래의 트위터 분석기에 의하여 처리가 됩니다.", "twitter._customized_tagger.add_a_template(('Name', 'Josa'))\nprint(twitter._customized_tagger.templates)\ntwitter.pos('lovit은 이름입니다.')", "Templates를 이용하여도 후보가 여러 개 나올 수 있습니다. 여러 개 후보 중에서 best 를 선택하는 함수를 직접 디자인 할 수 도 있습니다. 이처럼 몇 개의 점수 기준을 만들고, 각 기준의 weight를 부여하는 방식은 트위터 분석기에서 이용하는 방식인데, 직관적이고 튜닝 가능해서 매우 좋은 방식이라 생각합니다.", "score_weights = {\n 'num_nouns': -0.1,\n 'num_words': -0.2,\n 'no_noun': -1\n}\n\ndef my_score(candidate):\n num_nouns = len([w for w,t in candidate if t == 'Noun'])\n num_words = len(candidate)\n no_noun = 1 if num_nouns == 0 else 0\n \n score = (num_nouns * score_weights['num_nouns'] \n + num_words * score_weights['num_words']\n + no_noun * score_weights['no_noun'])\n return score\n\ntwitter.set_selector(score_weights, my_score)" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
Dataweekends/odsc_intro_to_data_science
Titanic Survival Workshop.ipynb
mit
[ "Predicting survival of Titanic Passengers\nThis notebook explores a dataset containing information of passengers of the Titanic.\nThe dataset can be downloaded from Kaggle\nTutorial goals\n\nExplore the dataset\nBuild a simple predictive modeling\nIterate and improve your score\nOptional: upload your prediction to Kaggle using the test dataset\n\nHow to follow along:\ngit clone https://github.com/Dataweekends/odsc_intro_to_data_science\n\ncd odsc_intro_to_data_science\n\nipython notebook\n\nWe start by importing the necessary libraries:", "import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n%matplotlib inline", "1) Explore the dataset\nNumerical exploration\n\nLoad the csv file into memory using Pandas\nDescribe each attribute\nis it discrete?\nis it continuous?\nis it a number?\nis it text?\n\n\nIdentify the target\nCheck if any values are missing\n\nLoad the csv file into memory using Pandas", "df = pd.read_csv('titanic-train.csv')", "What's the content of df ?", "df.head(3)", "Describe each attribute (is it discrete? is it continuous? is it a number? is it text?)", "df.info()", "Is Pclass a continuous or discrete class?", "df['Pclass'].value_counts()", "What about these: ('SibSp', 'Parch')?", "df['SibSp'].value_counts()\n\ndf['Parch'].value_counts()", "and what about these: ('Ticket', 'Fare', 'Cabin', 'Embarked')?", "df[['Ticket', 'Fare', 'Cabin']].head(3)\n\ndf['Embarked'].value_counts()", "Identify the target\nWhat are we trying to predict?\nah, yes... Survival!", "df['Survived'].value_counts()", "Check if any values are missing", "df.info()", "Mental notes so far:\n\nDataset contains 891 entries\n1 Target column (Survived)\n11 Features:\n6 numerical, 5 text\n1 useless (PassengerId)\n3 categorical (Pclass, Sex, Embarked)\n4 numerical, > 0 (Age, SibSp, Parch, Fare)\n3 not sure how to treat (Name, Ticket, Cabin)\n\n\nAge is only available for 714 passengers\nCabin is only available for 204 passengers\nEmbarked is missing for 2 passengers\n\nVisual exploration\n\nplot the distribution of Age\nimpute the missing values for Age using the median Age\ncheck the influence of Age, Sex and Class on Survival\n\nPlot the distribution of Age", "df['Age'].plot(kind='hist', figsize=(10,6))\nplt.title('Distribution of Age', size = '20')\nplt.xlabel('Age', size = '20')\nplt.ylabel('Number of passengers', size = '20')\nmedian_age = df['Age'].median()\nplt.axvline(median_age, color = 'r')\nmedian_age", "impute the missing values for Age using the median Age", "df['Age'].fillna(median_age, inplace = True)\ndf.info()", "check the influence of Age", "df[df['Survived']==1]['Age'].plot(kind='hist', bins = 10, range = (0,100), figsize=(10,6), alpha = 0.3, color = 'g')\ndf[df['Survived']==0]['Age'].plot(kind='hist', bins = 10, range = (0,100), figsize=(10,6), alpha = 0.3, color = 'r')\nplt.title('Distribution of Age', size = '20')\nplt.xlabel('Age', size = '20')\nplt.ylabel('Number of passengers', size = '20')\nplt.legend(['Survived', 'Dead'])\nplt.show()", "Check the influence of Sex on Survival", "survival_by_gender = df[['Sex','Survived']].pivot_table(columns =\n ['Survived'], index = ['Sex'], aggfunc=len)\nsurvival_by_gender\n\nsurvival_by_gender.plot(kind = 'bar', stacked = True)\nplt.show()", "Check the influence of Pclass on Survival", "survival_by_Pclass = df[['Pclass','Survived']].pivot_table(columns =\n ['Survived'], index = ['Pclass'], aggfunc=len)\nsurvival_by_Pclass\n\nsurvival_by_Pclass.plot(kind = 'bar', stacked = True)\nplt.show()", "Ok, so, Age and Pclass seem to have some influence on survival rate.\nLet's build a simple model to test that\nDefine a new feature called \"Male\" that is 1 if Sex = 'male' and 0 otherwise", "df['Male'] = df['Sex'].map({'male': 1, 'female': 0})\ndf[['Sex', 'Male']].head()", "Define simplest model as benchmark\nThe simplest model is a model that predicts 0 for everybody, i.e. no survival.\nHow good is it?", "actual_dead = len(df[df['Survived'] == 0])\ntotal_passengers = len(df)\nratio_of_dead = actual_dead / float(total_passengers)\n\nprint \"If I predict everybody dies, I'm correct %0.1f %% of the time\" % (100 * ratio_of_dead)\n\ndf['Survived'].value_counts()", "We need to do better than that\nDefine features (X) and target (y) variables", "X = df[['Male', 'Pclass', 'Age']]\ny = df['Survived']", "Initialize a decision tree model", "from sklearn.tree import DecisionTreeClassifier\n\nmodel = DecisionTreeClassifier(random_state=0)\nmodel", "Split the features and the target into a Train and a Test subsets.\nRatio should be 80/20", "from sklearn.cross_validation import train_test_split\n\nX_train, X_test, y_train, y_test = train_test_split(X, y, \n test_size = 0.2, random_state=0)", "Train the model", "model.fit(X_train, y_train)", "Calculate the model score", "my_score = model.score(X_test, y_test)\n\nprint \"Classification Score: %0.2f\" % my_score", "Print the confusion matrix for the decision tree model", "from sklearn.metrics import confusion_matrix\n\ny_pred = model.predict(X_test)\n\nprint \"\\n=======confusion matrix==========\"\nprint confusion_matrix(y_test, y_pred)", "3) Iterate and improve\nNow you have a basic pipeline. How can you improve the score? Try:\n- adding new features\n could you add a feature for family?\n could you use the Embark or other as dummies\n check the get_dummies function here:\n http://pandas.pydata.org/pandas-docs/stable/generated/pandas.get_dummies.html\n\n\nchanging the parameters of the model\n check the documentation here:\n http://scikit-learn.org/stable/modules/generated/sklearn.tree.DecisionTreeClassifier.html\n\n\nchanging the model itself\n check examples here:\n http://scikit-learn.org/stable/auto_examples/classification/plot_classifier_comparison.html\n\n\nLet's have a small competition....\n4) Optional: upload your prediction to Kaggle using the test dataset\nhttps://www.kaggle.com/c/titanic/submissions/attach" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
kdungs/teaching-SMD2-2016
tutorials/Wissenschaftliches Python Tutorial.ipynb
mit
[ "Wissenschaftliches Python Tutorial\nNachdem wir uns im Python Tutorial um die Grundlagen gekümmert haben, wollen wir uns nun mit einigen Bibliotheken beschäftigen, die das wissenschaftliche Arbeiten erleichtern. Diese sind\n\nNumpy für effiziente Berechnungen auf strukturierten Daten\nMatplotlib bietet eine einfache Möglichkeit Daten schön darzustellen\nScipy enthält mathematische Funktionen und Algorithmen für statistische Berechnungen, Fits, etc.\n\nZunächst laden wir die Bibliotheken. Sollte dabei ein Fehler auftreten, stell bitte sicher, dass bei der Installation alles geklappt hat und du kein Paket vergessen hast. Die erste Zeile mit dem %-Zeichen ist sogenannte \"Magie\", die dafür sorgt, dass Plots im Notebook dargestellt werden.", "%matplotlib inline\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport scipy", "Numpy: Arrays und effiziente Berechnungen\nDas Herzstück von Numpy ist das Array. Dieser Datentyp repräsentiert eine Matrix und ist unter der Haube in C implementiert. Dabei wird großer Wert auf effiziente Speichernutzung gelegt. Der gängige Satz \"Python ist viel zu langsam\" ist also nicht zwingend wahr. Wir können Arrays auf verschiedene Arten erzeugen.", "xs = np.array([1, 2, 3, 4]) # Konvertiert eine Python-Liste in ein Numpy-Array\nprint(xs)\nys = np.arange(4) # Erzeugt ein Array analog zur `range` Funktion\nprint(ys)", "Numpy Arrays unterstützen arithmetische Operationen, die wiederum effizient implementiert sind. Beispielsweise lassen sich zwei Arrays (elementweise) addieren sofern sie die gleichen Dimensionen haben.", "xs + ys", "Um einen Überblick über alle Features von Numpy zu bekommen, können wir die Hilfe zu Rate ziehen. Zusätzlich zur help Funktion bietet IPython auch die ?-Magie mit einer besseren Integration in Jupyter", "np?", "Für die Übungsaufgaben werden wir häufig Zufallszahlen brauchen. Dafür bietet sich die Verwendung von np.random an.", "np.random?\n\nn_events = 10000\ngauss = np.random.normal(2, 3, size=n_events) # Erzeuge 10000 Gauß-verteilte Zufallszahlen\n # mit µ=2 und σ=3.", "Matplotlib: Schöne Plots\nMatplotlib bietet sehr intuitive Funktionen um Daten darzustellen. Die sehr ausführliche Dokumentation bietet einen guten Überblick. Wir benutzen an dieser Stelle nur das pyplot Submodul, das uns ein einfaches Interface für die Kernfunktionalität bietet. In der Matplotlib Galerie finden sich viele schöne Beispiele mit Codeschnipseln.\nUm unsere Gauß-verteilten Zufallszahlen zu histogrammieren benutzen wir einfach plt.hist. Außerdem setzen wir gleich Achsenbeschriftungen.", "plt.hist(gauss)\nplt.xlabel('Wert')\nplt.ylabel('Absolute Häufigkeit')", "Falls dir dieser Plot zu steril ist, können wir den Stil der bekannten R-Bibliothek ggplot2 verwenden.", "plt.style.use('ggplot')", "Wir wollen nun die Anzahl Bins erhöhen und zusätzlich das Histogramm normieren, damit wir die normierte Verteilungsfunktion (PDF) eintragen können.", "plt.hist(gauss, bins=20, normed=True)\nplt.xlabel('Wert')\nplt.ylabel('Relative Häufigkeit')", "Scipy: Statistische Funktionen und mehr\nDie PDF erhalten wir ebenfalls aus Scipy. Um sie plotten zu können, müssen wir sie auf eine Reihe von Werten anwenden um Datenpunkte zu erhalten. Hier zeigt sich erneut die Stärke von Numpy: wir können einfach die Funktion auf das ganze Array anwenden und erhalten ein Array von Ergebnissen. Scipy ist modular aufgebaut, so dass wir mit unserem obigen Import nicht alle Untermodule enthalten. Wir müssen das Statistikmodul explizit importieren.", "import scipy.stats\npdf = scipy.stats.norm(2, 3).pdf\nxs = np.linspace(-15, 15, 5000) # Erzeuge 5000 äquidistante Werte im Interval [-15, 15).\n\nplt.hist(gauss, bins=20, normed=True, label='Werte')\nplt.plot(xs, pdf(xs), label='PDF')\nplt.xlabel('Wert')\nplt.ylabel('Relative Häufigkeit')\nplt.legend()", "Das sieht doch schon mal hübsch aus. Zum Abschluss wollen wir noch Unsicherheiten auf die Bins berechnen und in das Histogramm eintragen. Um es einfach zu halten, verwenden wir nicht die normierte PDF, sondern skalieren unsere PDF auf unsere Daten.", "bins, edges = np.histogram(gauss, bins=20)\nbin_width = edges[1] - edges[0] # Alle Bins haben die gleiche Breite\ncentres = edges[:-1] + bin_width / 2\n\ndef scaled_pdf(x):\n return bin_width * n_events * pdf(x)\n\nplt.errorbar( # Typisches \"Teilchenphysikerhistorgamm\"\n centres, # x\n bins, # y\n xerr=bin_width/2, # Unsicherheit auf x: hier Breite der Bins\n yerr=np.sqrt(bins), # Unsicherheit auf y\n fmt='o', # Benutze Punkte statt Linien zur Darstellung\n label='Data'\n)\nplt.plot(xs, scaled_pdf(xs), label='PDF')\nplt.xlabel('Wert')\nplt.ylabel('Relative Häufigkeit')\nplt.ylim(-100, 2000) # Manuelles Setzen des sichtbaren vertikalen Ausschnittes\nplt.legend()", "Ich denke das ist ein guter Punkt um unsere kleine Einführung zu beenden.\nAn dieser Stelle sei abschließend auf die Scipy Dokumentation verwiesen. Neben dem Statistikmodul gibt es dort Funktionen für, unter anderem, Integration (scipy.integrate), Optimierung/Fits (scipy.optimize), Interpolation (scipy.interpolate), Fouriertransformationen (scipy.fftpack) und viele mehr.\nWeiterführendes Material\n\nPandas ein mächtiges Werkzeug für die Analyse tabellarischer Daten im Stile von Data Frames in R\nUncertainties Bibliothek für das Rechnen mit Unsicherheiten\nSimPy Bibliothek für symbolisches Rechnen" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
haltaro/predicting-comic-end
1_analyze_comic_data_j.ipynb
mit
[ "1. 目次情報の分析\ndataにあるwj-api.json等のデータを使って遊んでみます.\n環境構築\nbash\nconda env create -f env.yml\n準備\n日本語で漫画のタイトルを表示したいので,matplotlibで日本語を描画 on Ubuntuを参考に設定します.Ubuntu以外をお使いの方は,適宜ご対応ください.", "%matplotlib inline\n\nimport json\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport pandas as pd\n\nsns.set(style='ticks')\n\nimport matplotlib\nfrom matplotlib.font_manager import FontProperties\nfont_path = '/usr/share/fonts/truetype/takao-gothic/TakaoPGothic.ttf'\nfont_prop = FontProperties(fname=font_path)\nmatplotlib.rcParams['font.family'] = font_prop.get_name()", "ComicAnalyzer\n雑誌分析用に,ComicAnalyzerクラスを定義します.", "class ComicAnalyzer():\n \"\"\"漫画雑誌の目次情報を読みだして,管理するクラスです.\"\"\"\n \n def __init__(self, data_path='data/wj-api.json', min_week=7, short_week=10):\n \"\"\"\n 初期化時に,data_pathにある.jsonファイルから目次情報を抽出します.\n - self.data: 全目次情報を保持するリスト型\n - self.all_titles: 全作品名情報を保持するリスト型\n - self.serialized_titles: min_week以上連載した全作品名を保持するリスト型\n - self.last_year: 最新の目次情報の年を保持する数値型\n - self.last_no: 最新の目次情報の号数を保持する数値型\n - self.end_titles: self.serialized_titlesのうち,self.last_yearおよび\n self.last_noまでに終了した全作品名を保持するリスト型\n - self.short_end_titles: self.end_titlesのうち,short_week週以内に\n 連載が終了した作品名を保持するリスト型\n - self.long_end_titles: self.end_titlesのうち,short_week+1週以上に\n 連載が継続した作品名を保持するリスト型\n \"\"\"\n \n self.data = self.read_data(data_path)\n self.all_titles = self.collect_all_titles()\n self.serialized_titles = self.drop_short_titles(self.all_titles, min_week)\n self.last_year = self.find_last_year(self.serialized_titles[-100:])\n self.last_no = self.find_last_no(self.serialized_titles[-100:], self.last_year)\n self.end_titles = self.drop_continued_titles(\n self.serialized_titles, self.last_year, self.last_no)\n self.short_end_titles = self.drop_long_titles(\n self.end_titles, short_week)\n self.long_end_titles = self.drop_short_titles(\n self.end_titles, short_week + 1)\n\n def read_data(self, data_path):\n \"\"\" data_pathにあるjsonファイルを読み出して,全ての目次情報をまとめたリストを返します. \"\"\"\n with open(data_path, 'r', encoding='utf-8') as f:\n data = json.load(f)\n return data\n\n def collect_all_titles(self):\n \"\"\" self.dataから全ての作品名を抽出したリストを返します. \"\"\"\n titles = []\n for comic in self.data:\n if comic['title'] not in titles:\n titles.append(comic['title'])\n return titles\n\n def extract_item(self, title='ONE PIECE', item='worst'):\n \"\"\" self.dataからtitleのitemをすべて抽出したリストを返します. \"\"\"\n return [comic[item] for comic in self.data if comic['title'] == title]\n\n def drop_short_titles(self, titles, min_week):\n \"\"\" titlesのうち,min_week週以上連載した作品名のリストを返します. \"\"\"\n return [title for title in titles\n if len(self.extract_item(title)) >= min_week]\n\n def drop_long_titles(self, titles, max_week):\n \"\"\" titlesのうち,max_week週以内で終了した作品名のリストを返します. \"\"\"\n return [title for title in titles\n if len(self.extract_item(title)) <= max_week]\n\n def find_last_year(self, titles):\n \"\"\" titlesが掲載された雑誌のうち,最新の年を返します. \"\"\"\n return max([self.extract_item(title, 'year')[-1]\n for title in titles])\n\n def find_last_no(self, titles, year):\n \"\"\" titlesが掲載されたyear年の雑誌のうち,最新の号数を返します. \"\"\"\n return max([self.extract_item(title, 'no')[-1]\n for title in titles\n if self.extract_item(title, 'year')[-1] == year])\n\n def drop_continued_titles(self, titles, year, no):\n \"\"\" titlesのうち,year年のno号までに連載が終了した作品名のリストを返します. \"\"\"\n end_titles = []\n for title in titles:\n last_year = self.extract_item(title, 'year')[-1]\n if last_year < year:\n end_titles.append(title)\n elif last_year == year:\n if self.extract_item(title, 'no')[-1] < no:\n end_titles.append(title)\n return end_titles\n\n def search_title(self, key, titles):\n \"\"\" titlesのうち,keyを含む作品名のリストを返します. \"\"\"\n return [title for title in titles if key in title]", "かなりわかりづらい処理をしているので,初期化時(__init__())の動作を補足します.\n1. self.all_titlesは文字通り全ての作品名を保持します.しかし,self.all_titlesは,明らかに読みきり作品や企画作品を含んでしまっています.\n2. そこで,min_week以上連載した作品self.serialized_titlesとして抽出します.しかし,self.serialized_titlesは,データベースの最新の目次情報の時点で,連載を継続中の作品を含んでおり,連載継続期間が不正確になってしまいます.例えば,「鬼滅の刃」など現在も連載中の人気作が,21週で連載が終了した作品のように見えてしまいます.\n3. そこで,データベースの最新の目次情報の時点で連載が終了した(と思われれる)作品のみをself.end_titlesとして抽出します.self.end_titlesが,本分析における全体集合です.\n4. self.end_titlesのうち,10週以内に終了した作品をself.short_end_titlesとして,11週以内に継続した作品をself.long_end_titlesとして抽出します.\n分析", "wj = ComicAnalyzer()", "10週以内で終わった最新10タイトルの最初の10話分の掲載順(worst)を表示してみます.値が大きいほど,巻頭付近に掲載されていたことになります.", "for title in wj.short_end_titles[-10:]:\n plt.plot(wj.extract_item(title)[:10], label=title[:6])\nplt.xlabel('Week')\nplt.ylabel('Worst')\nplt.ylim(0,22)\nplt.legend()", "あれ?「斉木楠雄」って結構連載していたんじゃ…?こういうときは,search_title()を使います.", "wj.search_title('斉木', wj.all_titles)\n\nlen(wj.extract_item('超能力者 斉木楠雄のΨ難'))\n\nwj.extract_item('超能力者 斉木楠雄のΨ難', 'year'), \\\nwj.extract_item('超能力者 斉木楠雄のΨ難', 'no')\n\nlen(wj.extract_item('斉木楠雄のΨ難'))", "どうやら,「超能力者 斉木楠雄のΨ難」で試験的に7回読み切り掲載したあと,「斉木楠雄のΨ難」の連載を開始したみたいですね(wikipedia).\n次は,近年のヒット作(独断)の最初の10話分の掲載順を表示します.", "target_titles = ['ONE PIECE', 'NARUTO-ナルト-', 'BLEACH', 'HUNTER×HUNTER']\nfor title in target_titles:\n plt.plot(wj.extract_item(title)[:10], label=title[:6])\nplt.ylim(0,22)\nplt.xlabel('Week')\nplt.ylabel('Worst')\nplt.legend()", "個人的に気になったので,50話まで掲載順を見てみます.", "target_titles = ['ONE PIECE', 'NARUTO-ナルト-', 'BLEACH', 'HUNTER×HUNTER']\nfor title in target_titles:\n plt.plot(wj.extract_item(title)[:50], label=title[:6])\nplt.ylim(0,22)\nplt.xlabel('Week')\nplt.ylabel('Worst')\nplt.legend()", "ある程度予想はしてましたが,さすがですね.ちなみにですが,extract_item()を使ってサブタイトルを取得しながら掲載順を見ると,マンガ好きの方は楽しいと思います.", "wj.extract_item('ONE PIECE', 'subtitle')[:10]", "さて,seabornで相関分析をやってみます.ここでは,ひとまず6週目までの掲載順をプロットします.同じ座標に複数の点が重なって非常に見づらいので,便宜上ランダムなノイズを加えて見栄えを整えます.なお,1週目を外したのは,ほとんどの場合巻頭に掲載されるためです.", "end_data = pd.DataFrame(\n [[wj.extract_item(title)[1] + np.random.randn() * .3,\n wj.extract_item(title)[2] + np.random.randn() * .3,\n wj.extract_item(title)[3] + np.random.randn() * .3,\n wj.extract_item(title)[4] + np.random.randn() * .3,\n wj.extract_item(title)[5] + np.random.randn() * .3,\n '短命作品' if title in wj.short_end_titles else '継続作品']\n for title in wj.end_titles])\n\nend_data.columns = [\"Worst (week2)\", \"Worst (week3)\", \"Worst (week4)\", \n \"Worst (week5)\", \"Worst (week6)\", \"Type\"]\nsns.pairplot(end_data, hue=\"Type\", palette=\"husl\")", "若干ですが,短命作品は低い掲載順位に集中しがちな傾向があることがわかります." ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
loujine/musicbrainz-dataviz
23-bnf.ipynb
mit
[ "Exploring BNF/WikiData SPARQL data\nhttps://www.w3.org/2009/Talks/0615-qbe/\nhttps://www.ibm.com/developerworks/library/j-sparql/\nhttps://www.wikidata.org/wiki/Wikidata:SPARQL_query_service/queries/examples\nhttps://github.com/bobdc/misc/blob/master/JupyterSPARQL/JupyterSPARQLFun.ipynb", "import requests\nimport pandas as pd\nimport json\nimport SPARQLWrapper\n\n# https://lawlesst.github.io/notebook/sparql-dataframe.html\ndef get_sparql_dataframe(endpoint, query):\n \"\"\"\n Helper function to convert SPARQL results into a Pandas data frame.\n \"\"\"\n sparql = SPARQLWrapper.SPARQLWrapper(endpoint)\n sparql.setQuery(query)\n sparql.setReturnFormat(SPARQLWrapper.JSON)\n result = sparql.query()\n\n processed_results = json.load(result.response)\n cols = processed_results['head']['vars']\n\n out = []\n for row in processed_results['results']['bindings']:\n item = []\n for c in cols:\n item.append(row.get(c, {}).get('value'))\n out.append(item)\n\n return pd.DataFrame(out, columns=cols)\n\nprefixes = \"\"\"\nPREFIX mus: <http://data.doremus.org/ontology#>\nPREFIX ecrm: <http://erlangen-crm.org/current/>\nPREFIX efrbroo: <http://erlangen-crm.org/efrbroo/>\nPREFIX skos: <http://www.w3.org/2004/02/skos/core#>\nPREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>\nPREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>\nPREFIX dcterms: <http://purl.org/dc/terms/>\nPREFIX dcmitype: <http://purl.org/dc/dcmitype/>\nPREFIX dwc: <http://rs.tdwg.org/dwc/terms/>\nPREFIX tdwg: <http://rs.tdwg.org/dwc/terms/#>\nPREFIX mrtg: <http://xxx.org/XXX/>\nPREFIX foaf: <http://xmlns.com/foaf/0.1/>\nPREFIX txn: <http://lod.taxonconcept.org/ontology/txn.owl#>\nPREFIX dsw: <http://purl.org/dsw/>\nPREFIX mbank: <http://www.morphbank.net/schema/morphbank#>\n\"\"\"", "WikiData", "endpoint = 'https://query.wikidata.org/bigdata/namespace/wdq/sparql'\n\nquery = \"\"\"\nPREFIX wikibase: <http://wikiba.se/ontology#>\nPREFIX wd: <http://www.wikidata.org/entity/>\nPREFIX wdt: <http://www.wikidata.org/prop/direct/>\nPREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>\n\nSELECT ?president ?cause ?dob ?dod WHERE {\n ?pid wdt:P39 wd:Q11696 .\n ?pid wdt:P509 ?cid .\n ?pid wdt:P569 ?dob .\n ?pid wdt:P570 ?dod .\n\n OPTIONAL {\n ?pid rdfs:label ?president filter (lang(?president) = \"en\") .\n }\n OPTIONAL {\n ?cid rdfs:label ?cause filter (lang(?cause) = \"en\") .\n }\n}\n\"\"\"\n\nrequests.get(endpoint, params={'query': query, 'format': 'json'}).json()\n\nget_sparql_dataframe(endpoint, query).head()", "Data BNF\nhttp://data.bnf.fr/fr/opendata", "endpoint = 'http://data.bnf.fr/sparql'\n\nquery = \"\"\"\nSELECT ?artist ?name ?bdate ?ddate ?wdurl ?mburl\nWHERE {\n ?artist isni:identifierValid \"0000000108935378\" .\n ?artist owl:sameAs ?wdurl .\n FILTER (regex (?wdurl, \"wikidata.org\"))\n ?artist owl:sameAs ?mburl .\n FILTER (regex (?mburl, \"musicbrainz.org\")) .\n OPTIONAL {\n ?artist bio:birth ?bdate .\n ?artist bio:death ?ddate .\n ?artist foaf:name ?name\n }\n}\n\"\"\"\nget_sparql_dataframe(endpoint, query).head()\n\nquery = \"\"\"\nSELECT DISTINCT ?predicate ?val\nWHERE {\n <http://data.bnf.fr/ark:/12148/cb13894801b> ?predicate ?val\n}\n\"\"\"\nget_sparql_dataframe(endpoint, query).head()\n\nquery = \"\"\"\nSELECT ?artist ?name ?isni\nWHERE {\n ?artist foaf:name \"Emilʹ Grigorʹevič Gilelʹs\" ;\n foaf:name ?name .\n #?artist isni:identifierValid ?isni\n}\n\"\"\"\nget_sparql_dataframe(endpoint, query).head()\n\nhttp://data.bnf.fr/sparql?default-graph-uri=&query=PREFIX+foaf%3A+%3Chttp%3A%2F%2Fxmlns.com%2Ffoaf%2F0.1%2F%3E%0D%0APREFIX+rdarelationships%3A+%3Chttp%3A%2F%2Frdvocab.info%2FRDARelationshipsWEMI%2F%3E%0D%0APREFIX+dcterms%3A+%3Chttp%3A%2F%2Fpurl.org%2Fdc%2Fterms%2F%3E%0D%0ASELECT+DISTINCT+%3Fedition+%3Ftitre+%3Fdate+%3Fediteur+%3FURLGallica%0D%0AWHERE+{%0D%0A%3Chttp%3A%2F%2Fdata.bnf.fr%2Fark%3A%2F12148%2Fcb12258414j%3E+foaf%3Afocus+%3Foeuvre.%0D%0A%3Fedition+rdarelationships%3AworkManifested+%3Foeuvre.%0D%0AOPTIONAL+{%0D%0A%3Fedition+dcterms%3Adate+%3Fdate.%0D%0A++}%0D%0AOPTIONAL+{%0D%0A%3Fedition+dcterms%3Atitle+%3Ftitre.+%0D%0A++}%0D%0AOPTIONAL+{%0D%0A%3Fedition+dcterms%3Apublisher+%3Fediteur.%0D%0A++}%0D%0AOPTIONAL+{%0D%0A%3Fedition+rdarelationships%3AelectronicReproduction+%3FURLGallica.%0D%0A++}%0D%0A}&format=application%2Fjson&timeout=0&should-sponge=&debug=on\n\nquery = \"\"\"\"\nSELECT DISTINCT ?name ?gender ?nat ?bday ?dday\nWHERE {\n ?mbartist foaf:name ?name ;\n foaf:gender ?gender ;\n rdagroup2elements:dateOfBirth ?bday ;\n rdagroup2elements:dateOfDeath ?dday .\nOPTIONAL\n {\n ?mbartist foaf:nationality ?nat\n }\n}\nLIMIT 10\n\"\"\"\nget_sparql_dataframe(endpoint, query).head()\n\nquery = \"\"\"SELECT ?auteur ?jour ?date1 ?date2 ?nom\nWHERE {\n ?auteur foaf:birthday ?jour.\n ?auteur bio:birth ?date1.\n ?auteur bio:death ?date2.\n OPTIONAL {\n ?auteur foaf:name ?nom.\n }\n} \t\nORDER BY (?jour)\nLIMIT 10\n\"\"\"\nget_sparql_dataframe(endpoint, query).head()\n\nquery = \"\"\"\nPREFIX foaf: <http://xmlns.com/foaf/0.1/>\nPREFIX bnf-onto: <http://data.bnf.fr/ontology/bnf-onto/>\nPREFIX owl: <http://www.w3.org/2002/07/owl#>\nSELECT DISTINCT ?name ?year ?endyear ?url ?wikidata ?gallica ?gender\nWHERE {\n <http://data.bnf.fr/ark:/12148/cb13894801b#foaf:Person> foaf:name ?name ;\n bnf-onto:firstYear ?year ;\n bnf-onto:lastYear ?endyear ;\n owl:sameAs ?url ;\n foaf:page ?wikidata ;\n foaf:depiction ?gallica ;\n foaf:gender ?gender .\n}\n\"\"\"\nget_sparql_dataframe(endpoint, query).head()" ]
[ "markdown", "code", "markdown", "code", "markdown", "code" ]
digital-humanities-data-curation/hilt2015
4-intro-to-pandas.ipynb
mit
[ "Power Tools: Python Pandas\nTo demonstrate the usefulness of the pandas Python library, we'll walk through a real life use case. An extract-transform-load pipeline we built for working with the NYPL menus data:\nA Repeatable Extract-Tranform-Load Pipeline for NYPL Menus Data\nCreated: 17 October 2014\nUpdated: 28 October 2014, 19 November 2014, 3 December 2014\nAuthors: Trevor Muñoz and Katie Rawson\n&nbsp;\nAcquiring Data (Extract)\nFor the purpose of this exercise, we'll grab a copy from our local data folder", "import os\nimport datetime\nimport time\nimport tarfile\n\nDATA_FILE = '../data/menus-2015_07_16_07_01_00_data.tgz'\n\ntar = tarfile.open(DATA_FILE)\n\nfor tf in tar.getmembers():\n print('Name: {0} \\t Last Modified: {1}'.format(tf.name, time.ctime(tf.mtime)))\n\nDATA_DIR = '../data/nypl_menus'\ntar.extractall(path=DATA_DIR)\n\nfor f in os.listdir(DATA_DIR):\n if f.endswith('csv'):\n if os.path.isfile(os.path.join(DATA_DIR, f)) == True:\n print('{0} … \\u2713'.format(f))\n\ntar.close()", "Working with Data in DataFrames (Tranform)\n&nbsp;", "import re\nimport pandas as pd\n\nLATEST_DISH_DATA_DF = pd.DataFrame.from_csv(os.path.join(DATA_DIR, 'Dish.csv'), \n index_col='id')\nLATEST_ITEM_DATA_DF = pd.DataFrame.from_csv(os.path.join(DATA_DIR, 'MenuItem.csv'), \n index_col='dish_id')\nLATEST_PAGE_DATA_DF = pd.DataFrame.from_csv(os.path.join(DATA_DIR, 'MenuPage.csv'), \n index_col='id')\nLATEST_MENU_DATA_DF = pd.DataFrame.from_csv(os.path.join(DATA_DIR, 'Menu.csv'),\n index_col='id')", "Dish.csv", "NULL_APPEARANCES = LATEST_DISH_DATA_DF[LATEST_DISH_DATA_DF.times_appeared == 0]\n\nprint('Data set contains {0} dishes that appear 0 times …'.format(\n len(NULL_APPEARANCES))\n)\n\nNON_NULL_DISH_DATA_DF = LATEST_DISH_DATA_DF[LATEST_DISH_DATA_DF.times_appeared != 0]\n\ndiscarded_columns = [n for n in NON_NULL_DISH_DATA_DF.columns if n not in \n ['name', 'menus_appeared', 'times_appeared']]\n\nprint('Discarding columns from Dish.csv …')\nfor discard in discarded_columns:\n print('{0} … removed'.format(discard))\n\nTRIMMED_DISH_DATA_DF = NON_NULL_DISH_DATA_DF[['name', 'menus_appeared', 'times_appeared']]\n\nprint('Dish.csv contains {0} potentially-unique dish names before any normalization'.\n format(TRIMMED_DISH_DATA_DF.name.nunique()))\n\ndef normalize_names(obj):\n '''\n Take a name as a string, converts the string\n to lowercase, strips whitespace from beginning\n and end, normalizes multiple internal whitespace\n characters to a single space. E.g.:\n \n normalize_names('Chicken gumbo ') = 'chicken gumbo'\n \n '''\n tokens = obj.strip().lower().split()\n result = ' '.join(filter(None, tokens))\n return result\n\nTRIMMED_DISH_DATA_DF['normalized_name'] = TRIMMED_DISH_DATA_DF.name.map(normalize_names)\n\nprint(\n 'Dish.csv contains {0} potentially-unique dish names after normalizing whitespace and punctuation'\n .format(TRIMMED_DISH_DATA_DF.normalized_name.nunique())\n)\n\ndef fingerprint(obj):\n \"\"\"\n A modified version of the fingerprint clustering algorithm implemented by Open Refine.\n See https://github.com/OpenRefine/OpenRefine/wiki/Clustering-In-Depth\n This does not normalize to ASCII characters since diacritics may be significant in this dataset\n \"\"\"\n alphanumeric_tokens = filter(None, re.split('\\W', obj))\n seen = set()\n seen_add = seen.add\n deduped = sorted([i for i in alphanumeric_tokens if i not in seen and not seen_add(i)])\n fingerprint = ' '.join(deduped)\n \n return fingerprint\n\nTRIMMED_DISH_DATA_DF['fingerprint'] = TRIMMED_DISH_DATA_DF.normalized_name.map(fingerprint)\n\nprint(\n 'Dish.csv contains {0} unique fingerprint values'\n .format(TRIMMED_DISH_DATA_DF.fingerprint.nunique())\n)\n\nTRIMMED_DISH_DATA_DF.head()", "MenuItem.csv", "discarded_columns2 = [n for n in LATEST_ITEM_DATA_DF.columns if n not in \n ['id', 'menu_page_id', 'xpos', 'ypos']]\n\nprint('Discarding columns from MenuItem.csv …')\nfor discard2 in discarded_columns2:\n print('{0} … removed'.format(discard2))\n\nTRIMMED_ITEM_DATA_DF = LATEST_ITEM_DATA_DF[['id', 'menu_page_id', 'xpos', 'ypos']]\n\nTRIMMED_ITEM_DATA_DF.head()", "MenuPage.csv", "LATEST_PAGE_DATA_DF.head()\n\nLATEST_PAGE_DATA_DF[['full_height', 'full_width']].astype(int, raise_on_error=False)", "Menu.csv", "LATEST_MENU_DATA_DF.columns\n\ndiscarded_columns3 = [n for n in LATEST_MENU_DATA_DF.columns if n not in \n ['sponsor', 'location', 'date', 'page_count', 'dish_count']]\n\npipeline_logger.info('Discarding columns from Menu.csv …')\nfor discard3 in discarded_columns3:\n pipeline_logger.info('{0} … removed'.format(discard3))\n\nTRIMMED_MENU_DATA_DF = LATEST_MENU_DATA_DF[['sponsor', 'location', 'date',\n 'page_count', 'dish_count']]\n\nTRIMMED_MENU_DATA_DF.head()", "Merging DataFrames", "MERGED_ITEM_PAGES_DF = pd.merge(TRIMMED_ITEM_DATA_DF, LATEST_PAGE_DATA_DF, \n left_on='menu_page_id', right_index=True, )\n\nMERGED_ITEM_PAGES_DF.columns = ['item_id', 'menu_page_id', 'xpos', 'ypos', \n 'menu_id', 'page_number', \n 'image_id', 'full_height', 'full_width', 'uuid']\n\n#MERGED_ITEM_PAGES_DF.head()\n\nMERGED_ITEM_PAGES_MENUS_DF = pd.merge(TRIMMED_MENU_DATA_DF, MERGED_ITEM_PAGES_DF, \n left_index=True, right_on='menu_id')\n\nFULL_MERGE = pd.merge(MERGED_ITEM_PAGES_MENUS_DF, TRIMMED_DISH_DATA_DF, \n left_index=True, right_index=True)\n\nFULL_MERGE.head()\n\nFOR_JSON_OUTPUT = FULL_MERGE.reset_index()\n\nFOR_JSON_OUTPUT.columns\n\nrenamed_columns = ['dish_id', 'menu_sponsor', 'menu_location', 'menu_date', 'menu_page_count', \n 'menu_dish_count', 'item_id', 'menu_page_id', 'item_xpos', 'item_ypos', \n 'menu_id', 'menu_page_number', 'image_id', \n 'page_image_full_height', 'page_image_full_width', 'page_image_uuid', 'dish_name', \n 'dish_menus_appeared', 'dish_times_appeared', 'dish_normalized_name', 'dish_name_fingerprint']\n\nFOR_JSON_OUTPUT.columns = renamed_columns\n\nFOR_JSON_OUTPUT[['menu_page_number', 'dish_id', 'item_id', 'menu_page_id', 'menu_id']].astype(int, raise_on_error=False)\n\nFOR_JSON_OUTPUT['dish_uri']= FOR_JSON_OUTPUT.dish_id.map(lambda x: 'http://menus.nypl.org/dishes/{0}'.format(int(x)))\n\nFOR_JSON_OUTPUT['item_uri']= FOR_JSON_OUTPUT.item_id.map(lambda x: 'http://menus.nypl.org/menu_items/{0}/edit'\n .format(int(x)))\n\nFOR_JSON_OUTPUT['menu_page_uri'] = FOR_JSON_OUTPUT.menu_page_id.map(lambda x: 'http://menus.nypl.org/menu_pages/{0}'\n .format(int(x)))\n\nFOR_JSON_OUTPUT['menu_uri'] = FOR_JSON_OUTPUT.menu_id.map(lambda x:'http://menus.nypl.org/menus/{0}'\n .format(int(x)))\n\nFOR_JSON_OUTPUT.head()\n\nprint('Generating JSON …')\nFOR_JSON_OUTPUT.to_json(path_or_buf='../data/nypl_menus/menus_all.json', orient='index', force_ascii=False)" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
GoogleCloudPlatform/ml-design-patterns
02_data_representation/feature_cross.ipynb
apache-2.0
[ "Feature Crosses\nA feature cross is a synthetic feature formed by multiplying two or more features.", "from google.cloud import bigquery", "Feature Crosses in BigQuery\nWe'll first explore how to create a feature cross in BigQuery. The cell below will create a dataset called babyweight in your GCP project, if it does not already exist. This dataset will will house our tables and models.", "bq = bigquery.Client()\ndataset = bigquery.Dataset(bq.dataset(\"babyweight\"))\n\ntry:\n bq.create_dataset(dataset)\n print(\"Dataset created.\")\nexcept:\n print(\"Dataset already exists.\")", "Create datasets for training and evaluation", "%%bigquery\nCREATE OR REPLACE TABLE\n babyweight.babyweight_data AS\nSELECT\n weight_pounds,\n CAST(is_male AS STRING) AS is_male,\n mother_age,\n CASE\n WHEN plurality = 1 THEN \"Single(1)\"\n WHEN plurality = 2 THEN \"Twins(2)\"\n WHEN plurality = 3 THEN \"Triplets(3)\"\n WHEN plurality = 4 THEN \"Quadruplets(4)\"\n WHEN plurality = 5 THEN \"Quintuplets(5)\"\n END AS plurality,\n gestation_weeks,\n CAST(mother_race AS STRING) AS mother_race,\n FARM_FINGERPRINT(\n CONCAT(\n CAST(year AS STRING),\n CAST(month AS STRING)\n )\n ) AS hashmonth\nFROM\n publicdata.samples.natality\nWHERE\n year > 2000\n AND weight_pounds > 0\n AND mother_age > 0\n AND plurality > 0\n AND gestation_weeks > 0", "Next, we'll create tables in BigQuery that we'll use for training and evaluation.", "%%bigquery\nCREATE OR REPLACE TABLE\n babyweight.babyweight_data_train AS\nSELECT\n weight_pounds,\n is_male,\n mother_age,\n plurality,\n gestation_weeks,\n mother_race\nFROM\n babyweight.babyweight_data\nWHERE\n ABS(MOD(hashmonth, 4)) < 3\n\n%%bigquery\nCREATE OR REPLACE TABLE\n babyweight.babyweight_data_eval AS\nSELECT\n weight_pounds,\n is_male,\n mother_age,\n plurality,\n gestation_weeks,\n mother_race\nFROM\n babyweight.babyweight_data\nWHERE\n ABS(MOD(hashmonth, 4)) = 3", "Create model in BigQuery", "%%bigquery\nCREATE OR REPLACE MODEL `babyweight.natality_model`\nOPTIONS\n (MODEL_TYPE=\"DNN_REGRESSOR\",\n HIDDEN_UNITS=[64, 32],\n BATCH_SIZE=32,\n INPUT_LABEL_COLS=[\"weight_pounds\"],\n DATA_SPLIT_METHOD=\"NO_SPLIT\") AS\nSELECT\n weight_pounds,\n is_male,\n plurality,\n gestation_weeks,\n mother_age,\n CAST(mother_race AS string) AS mother_race\nFROM\n babyweight.babyweight_data_train", "We can use ML.EVALUATE to determine the root mean square error of our model on the evaluation set.", "query = \"\"\"\nSELECT\n *, SQRT(mean_squared_error) AS rmse\nFROM\n ML.EVALUATE(MODEL `babyweight.natality_model`,\n (\n SELECT\n weight_pounds,\n is_male,\n plurality,\n gestation_weeks,\n mother_age,\n CAST(mother_race AS STRING) AS mother_race\n FROM\n babyweight.babyweight_data_eval ))\n\"\"\"\n\ndf = bq.query(query).to_dataframe()\ndf.head()", "Creating a Feature Cross with BQML\nNext, we'll create a feature cross of the features is_male and mother_race. To create a feature cross we apply ML.FEATURE_CROSS to a STRUCT of the features is_male and mother_race cast as a string. \nThe STRUCT clause creates an ordered pair of the two features. The TRANSFORM clause is used for engineering features of our model. This allows us to specify all preprocessing during model creation and apply those preprocessing steps during prediction and evaluation. The rest of the features within the TRANSFORM clause remain unchanged.", "%%bigquery\nCREATE OR REPLACE MODEL `babyweight.natality_model_feat_eng`\nTRANSFORM(weight_pounds,\n is_male,\n plurality,\n gestation_weeks, \n mother_age,\n CAST(mother_race AS string) AS mother_race,\n ML.FEATURE_CROSS(\n STRUCT(\n is_male,\n plurality)\n ) AS gender_X_plurality)\nOPTIONS\n (MODEL_TYPE='linear_reg',\n INPUT_LABEL_COLS=['weight_pounds'],\n DATA_SPLIT_METHOD=\"NO_SPLIT\") AS \nSELECT\n *\nFROM\n babyweight.babyweight_data_train", "As before, we compute the root mean square error.", "query = \"\"\"\nSELECT\n *, SQRT(mean_squared_error) AS rmse\nFROM\n ML.EVALUATE(MODEL `babyweight.natality_model_feat_eng`,\n (\n SELECT\n weight_pounds,\n is_male,\n plurality,\n gestation_weeks,\n mother_age,\n CAST(mother_race AS STRING) AS mother_race\n FROM\n babyweight.babyweight_data_eval ))\n\"\"\"\n\ndf = bq.query(query).to_dataframe()\ndf.head()", "Feature Crosses in Keras\nNext, we'll see how to implement a feature cross in Tensorflow using feature columns.", "import os\n\nimport tensorflow as tf\nimport datetime\n\nfrom tensorflow import keras\nfrom tensorflow.keras import layers\nfrom tensorflow import feature_column as fc\n\n# Determine CSV, label, and key columns\n# Create list of string column headers, make sure order matches.\nCSV_COLUMNS = [\"weight_pounds\",\n \"is_male\",\n \"mother_age\",\n \"plurality\",\n \"gestation_weeks\",\n \"mother_race\"]\n\n# Add string name for label column\nLABEL_COLUMN = \"weight_pounds\"\n\n# Set default values for each CSV column as a list of lists.\n# Treat is_male and plurality as strings.\nDEFAULTS = [[0.0], [\"null\"], [0.0], [\"null\"], [0.0], [\"null\"]]", "Make a dataset of features and label.", "def features_and_labels(row_data):\n \"\"\"Splits features and labels from feature dictionary.\n Args:\n row_data: Dictionary of CSV column names and tensor values.\n Returns:\n Dictionary of feature tensors and label tensor.\n \"\"\"\n label = row_data.pop(LABEL_COLUMN)\n\n return row_data, label\n\n\ndef load_dataset(pattern, batch_size=1, mode=tf.estimator.ModeKeys.EVAL):\n \"\"\"Loads dataset using the tf.data API from CSV files.\n Args:\n pattern: str, file pattern to glob into list of files.\n batch_size: int, the number of examples per batch.\n mode: tf.estimator.ModeKeys to determine if training or evaluating.\n Returns:\n `Dataset` object.\n \"\"\"\n # Make a CSV dataset\n dataset = tf.data.experimental.make_csv_dataset(\n file_pattern=pattern,\n batch_size=batch_size,\n column_names=CSV_COLUMNS,\n column_defaults=DEFAULTS)\n\n # Map dataset to features and label\n dataset = dataset.map(map_func=features_and_labels) # features, label\n\n # Shuffle and repeat for training\n if mode == tf.estimator.ModeKeys.TRAIN:\n dataset = dataset.shuffle(buffer_size=1000).repeat()\n\n # Take advantage of multi-threading; 1=AUTOTUNE\n dataset = dataset.prefetch(buffer_size=1)\n\n return dataset", "We'll need to get the data read in by our input function to our model function, but just how do we go about connecting the dots? We can use Keras input layers (tf.Keras.layers.Input).", "def create_input_layers():\n \"\"\"Creates dictionary of input layers for each feature.\n\n Returns:\n Dictionary of `tf.Keras.layers.Input` layers for each feature.\n \"\"\"\n inputs = {\n colname: tf.keras.layers.Input(\n name=colname, shape=(), dtype=\"float32\")\n for colname in [\"mother_age\", \"gestation_weeks\"]}\n\n inputs.update({\n colname: tf.keras.layers.Input(\n name=colname, shape=(), dtype=\"string\")\n for colname in [\"is_male\", \"plurality\", \"mother_race\"]})\n\n return inputs", "Create feature columns for inputs\nNext, define the feature columns. mother_age and gestation_weeks should be numeric. The others, is_male, plurality and mother_race, should be categorical. Remember, only dense feature columns can be inputs to a DNN.\nThe last feature column created in the create_feature_columns function is a feature cross with is_male and plurality. To implement a feature cross in Tensorflow we use tf.feature_column.crossed_column which takes two arguments: a list of the feature keys to be crossed and the hash bucket size. Crossed features will be hashed according to hash_bucket_size so it should be large enough to accommodate all possible crossed categories. Since the feature is_male can take 3 values (True, False or Unknown) and the feature plurality can take 6 values (Single(1), Twins(2), Triplets(3), Quadruplets(4), Quintuplets(5), Multiple(2+)), we'll set hash_bucket_size=18.\nFinally, to use crossed column in DNN model, you need to wrap it either in an indicator_column or an embedding_column. In the code below, we use an embedding column and take the embedding dimension to be 2. \nTo create a crossed column with features of numeric type, you can use categorical_column, or bucketized_column before passing to a crossed_column.", "def categorical_fc(name, values):\n cat_column = fc.categorical_column_with_vocabulary_list(\n key=name, vocabulary_list=values)\n\n return fc.indicator_column(categorical_column=cat_column)\n\n\ndef create_feature_columns():\n feature_columns = {\n colname : fc.numeric_column(key=colname)\n for colname in [\"mother_age\", \"gestation_weeks\"]\n }\n\n feature_columns[\"is_male\"] = categorical_fc(\n \"is_male\", [\"True\", \"False\", \"Unknown\"])\n feature_columns[\"plurality\"] = categorical_fc(\n \"plurality\", [\"Single(1)\", \"Twins(2)\", \"Triplets(3)\",\n \"Quadruplets(4)\", \"Quintuplets(5)\", \"Multiple(2+)\"])\n feature_columns[\"mother_race\"] = fc.indicator_column(\n fc.categorical_column_with_hash_bucket(\n \"mother_race\", hash_bucket_size=17, dtype=tf.dtypes.string))\n \n feature_columns[\"gender_x_plurality\"] = fc.embedding_column(\n fc.crossed_column([\"is_male\", \"plurality\"], hash_bucket_size=18),\n dimension=2)\n\n return feature_columns", "We can double-check the output of create_feature_columns.", "feature_columns = create_feature_columns()\nprint(\"Feature column keys: \\n{}\\n\".format(list(feature_columns.keys())))\nprint(\"Feature column values: \\n{}\\n\".format(list(feature_columns.values())))", "Define a DNN model\nNext we define our model. This is regression so make sure the output layer activation is correct and that the shape is right. We'll create deep neural network model, similar to what we use in BigQuery.", "def get_model_outputs(inputs):\n # Create two hidden layers of [64, 32] just in like the BQML DNN\n h1 = layers.Dense(64, activation=\"relu\", name=\"h1\")(inputs)\n h2 = layers.Dense(32, activation=\"relu\", name=\"h2\")(h1)\n\n # Final output is a linear activation because this is regression\n output = layers.Dense(units=1, activation=\"linear\", name=\"weight\")(h2)\n\n return output\n\ndef rmse(y_true, y_pred):\n return tf.sqrt(tf.reduce_mean((y_pred - y_true) ** 2))", "Finally, we will build the model using tf.keras.models.Model giving our inputs and outputs and then compile our model with an optimizer, a loss function, and evaluation metrics.", "def build_dnn_model():\n \"\"\"Builds simple DNN using Keras Functional API.\n\n Returns:\n `tf.keras.models.Model` object.\n \"\"\"\n # Create input layer\n inputs = create_input_layers()\n\n # Create feature columns\n feature_columns = create_feature_columns()\n\n # The constructor for DenseFeatures takes a list of numeric columns\n # The Functional API in Keras requires: LayerConstructor()(inputs)\n dnn_inputs = layers.DenseFeatures(\n feature_columns=feature_columns.values())(inputs)\n\n # Get output of model given inputs\n output = get_model_outputs(dnn_inputs)\n\n # Build model and compile it all together\n model = tf.keras.models.Model(inputs=inputs, outputs=output)\n model.compile(optimizer=\"adam\", loss=\"mse\", metrics=[rmse, \"mse\"])\n\n return model\n\nprint(\"Here is our DNN architecture so far:\\n\")\nmodel = build_dnn_model()\nprint(model.summary())\n\ntf.keras.utils.plot_model(\n model=model, to_file=\"dnn_model.png\", show_shapes=False, rankdir=\"LR\")", "Train and evaluate our model\nWe've built our Keras model using our inputs from our CSV files and the architecture we designed. Let's now run our model by training our model parameters and periodically running an evaluation to track how well we are doing on outside data as training goes on. We'll need to load both our train and eval datasets and send those to our model through the fit method. Make sure you have the right pattern, batch size, and mode when loading the data.", "%%time\n\ntf.random.set_seed(33)\n\nTRAIN_BATCH_SIZE = 32\nNUM_TRAIN_EXAMPLES = 1000 * 5 # training dataset repeats, it'll wrap around\nNUM_EVALS = 5 # how many times to evaluate\n# Enough to get a reasonable sample, but not so much that it slows down\nNUM_EVAL_EXAMPLES = 1000\n\ntrainds = load_dataset(\n pattern=\"./data/babyweight_train*\",\n batch_size=TRAIN_BATCH_SIZE,\n mode=tf.estimator.ModeKeys.TRAIN)\n\nevalds = load_dataset(\n pattern=\"./data/babyweight_eval*\",\n batch_size=1000,\n mode=tf.estimator.ModeKeys.EVAL).take(count=NUM_EVAL_EXAMPLES // 1000)\n\nsteps_per_epoch = NUM_TRAIN_EXAMPLES // (TRAIN_BATCH_SIZE * NUM_EVALS)\n\nlogdir = os.path.join(\n \"logs\", datetime.datetime.now().strftime(\"%Y%m%d-%H%M%S\"))\ntensorboard_callback = tf.keras.callbacks.TensorBoard(\n log_dir=logdir, histogram_freq=1)\n\nhistory = model.fit(\n trainds,\n validation_data=evalds,\n epochs=NUM_EVALS,\n steps_per_epoch=steps_per_epoch,\n callbacks=[tensorboard_callback])", "Need for regularization\nLet's use a high-cardinality feature cross to illustrate the point. In this model, we are predicting taxifare in New York city using a feature cross of lat and lon", "!bq show mlpatterns || bq mk mlpatterns\n\n%%bigquery\nCREATE OR REPLACE TABLE mlpatterns.taxi_data AS\n\nSELECT\n (tolls_amount + fare_amount) AS fare_amount,\n pickup_datetime,\n pickup_longitude AS pickuplon,\n pickup_latitude AS pickuplat,\n dropoff_longitude AS dropofflon,\n dropoff_latitude AS dropofflat,\n passenger_count*1.0 AS passengers\nFROM `nyc-tlc.yellow.trips`\n# The full dataset has 1+ Billion rows, let's take only 1 out of 1,000 (or 1 Million total)\nWHERE ABS(MOD(FARM_FINGERPRINT(CAST(pickup_datetime AS STRING)), 1000)) = 1\nAND\n trip_distance > 0\n AND fare_amount >= 2.5\n AND pickup_longitude > -78\n AND pickup_longitude < -70\n AND dropoff_longitude > -78\n AND dropoff_longitude < -70\n AND pickup_latitude > 37\n AND pickup_latitude < 45\n AND dropoff_latitude > 37\n AND dropoff_latitude < 45\n AND passenger_count > 0\n\n%%bigquery\nCREATE OR REPLACE MODEL mlpatterns.taxi_noreg\nTRANSFORM(\n fare_amount\n , ML.FEATURE_CROSS(STRUCT(CAST(EXTRACT(DAYOFWEEK FROM pickup_datetime) AS STRING) AS dayofweek,\n CAST(EXTRACT(HOUR FROM pickup_datetime) AS STRING) AS hourofday), 2) AS day_hr\n , CONCAT(\n ML.BUCKETIZE(pickuplon, GENERATE_ARRAY(-78, -70, 0.01)),\n ML.BUCKETIZE(pickuplat, GENERATE_ARRAY(37, 45, 0.01)),\n ML.BUCKETIZE(dropofflon, GENERATE_ARRAY(-78, -70, 0.01)),\n ML.BUCKETIZE(dropofflat, GENERATE_ARRAY(37, 45, 0.01))\n ) AS pickup_and_dropoff\n)\nOPTIONS(input_label_cols=['fare_amount'], model_type='linear_reg') \nAS\n\nSELECT * FROM mlpatterns.taxi_data\n\n%%bigquery\nSELECT SQRT(mean_squared_error) AS rmse FROM ML.EVALUATE(MODEL mlpatterns.taxi_noreg)\n\n%%bigquery\nCREATE OR REPLACE MODEL mlpatterns.taxi_l2reg\nTRANSFORM(\n fare_amount\n , ML.FEATURE_CROSS(STRUCT(CAST(EXTRACT(DAYOFWEEK FROM pickup_datetime) AS STRING) AS dayofweek,\n CAST(EXTRACT(HOUR FROM pickup_datetime) AS STRING) AS hourofday), 2) AS day_hr\n , CONCAT(\n ML.BUCKETIZE(pickuplon, GENERATE_ARRAY(-78, -70, 0.01)),\n ML.BUCKETIZE(pickuplat, GENERATE_ARRAY(37, 45, 0.01)),\n ML.BUCKETIZE(dropofflon, GENERATE_ARRAY(-78, -70, 0.01)),\n ML.BUCKETIZE(dropofflat, GENERATE_ARRAY(37, 45, 0.01))\n ) AS pickup_and_dropoff\n)\nOPTIONS(input_label_cols=['fare_amount'], model_type='linear_reg', l2_reg=0.5) \nAS\n\nSELECT * FROM mlpatterns.taxi_data\n\n%%bigquery\nSELECT SQRT(mean_squared_error) AS rmse FROM ML.EVALUATE(MODEL mlpatterns.taxi_l2reg)\n\n100 * (4.814606 - 4.828183)/4.828183", "Copyright 2020 Google Inc. Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
BrownDwarf/ApJdataFrames
notebooks/Lada2006.ipynb
mit
[ "ApJdataFrames 013: Lada2006\nTitle: Spitzer Observations of IC 348: The Disk Population at 2-3 Million Years\nAuthors: Charles J Lada, August A Muench, Kevin L Luhman, Lori E Allen, Lee Hartmann, Tom Megeath, Philip Myers, Giovanni Fazio, Kenneth Wood, James Muzerolle, George Rieke, Nick Siegler, and Erick Young \nData is from this paper:\nhttp://iopscience.iop.org/1538-3881/131/3/1574/fulltext/204953.html", "%pylab inline\nimport seaborn as sns\n\nimport warnings\nwarnings.filterwarnings(\"ignore\")\n\nimport pandas as pd\n\nfrom astropy.io import ascii\nfrom astropy.table import Table, join", "Table 1 - Spitzer IRAC/MIPS IC348 catalog", "tbl1 = ascii.read(\"http://iopscience.iop.org/1538-3881/131/3/1574/fulltext/datafile1.txt\")\ntbl1[0:4]", "Table 2 - SED Derived $\\alpha_{IRAC}$ and $A_V$\nBut really... spectral types", "tbl2 = ascii.read(\"http://iopscience.iop.org/1538-3881/131/3/1574/fulltext/datafile2.txt\")\ntbl2[0:4]\n\njoin_tbls = join(tbl1, tbl2, keys=\"Seq\")\nprint \"There are {} rows in tbl1, {} in tbl2, and {} in the joined table.\".format(len(tbl1), len(tbl2), len(join_tbls))\n\njoin_tbls[0:4]", "Table 3 - Convenient passbands table", "names = [\"PASSBAND\",\"DATA SYSTEM\",\"REFERENCES\",\"center_wavelength\",\"F_{nu} (Jy)\",\"References\"]\ntbl3 = pd.read_csv(\"http://iopscience.iop.org/1538-3881/131/3/1574/fulltext/204953.tb3.txt\", \n na_values=\"\\ldots\", names = names, sep='\\t')\ntbl3.head()", "The end." ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
PMBio/cyclone
py/demo/demo_cyclone.ipynb
apache-2.0
[ "Classify cell cycle\n1. Load data\nThis notebook allows classification based on known cell cycle markers using a variety of (semi-)supervised machine learning algorithms. First, load a few modules and specify the test file. Training and util files should remain as below. The hdf5 file containing the test data can be generated in R from a data frame using the write_hf.R function.", "%pylab inline\n\nfrom __future__ import print_function\nimport h5py\nimport sys\nimport os\nfrom os.path import exists\nsys.path.append('../')\nsys.path.append('../core')\nimport scipy as SP\nfrom cyclone import cyclone\nfrom load_data import *\n\nCFG = {}\nCFG['test_file'] = '../../data/data_mESCbulk.h5f'\n#CFG['test_file'] = './data/normCounts_mESCquartz.h5f'\nCFG['train_file'] = '../../data/normCountsBuettnerEtAl.h5f'\nCFG['util_file'] = '../../data/normCounts_mESCquartz.h5f'\n\nout_dir = './out_mESCbulk/'", "Load data and show loaded variables. The data dictionary contains the full normalised read count marices for training and test file as well as a list of the respective gene names (either gene symbols or ENSEMBL - specify in the is_Ens option) and a list of cell cycle genes. In addition labels for traning and testing should be provided.", "data = load_data(CFG, is_Ens=True, het_only=True, het_onlyCB=False, gene_set='GOCB')#gene_set can be either a list of genes,\nclass_labels = data['class_labels']#['G1','G2M','S']#['T-cells']#d#['Liver']#['early', 'late', 'mid']#data['class_labels']#['G1', 'S','G2M']#['Liver']#[data['class_labels']#['T-cells']##['G1', 'S','G2M']#['T-cells']#['G1', 'S','G2M']# # #or 'all' (all genes), 'GOCB' GO and cyclebase or 'CB' or 'GO'\ndata.keys()\n\nprint(data['cc_ens'].shape[0], 'Cell cycle genes used for training and prediction')\nprint(data['class_labels'])", "The data required to build the model are loaded. Next, we initialise the model.", "cyclone = cyclone(data['Y'],row_namesY= data['genes'],cc_geneNames= data['cc_ens'],labels = data['labels'], \n Y_tst = data['Y_test'], row_namesY_tst = data['genes_tst'], labels_tst = data['labels_tst'])", "2. Train model\nBy default, a 10-fold corss-validation is performed on the training data to estimate the gernealizability of the gene set used for a number of classifers (PCA based, random forest, logistic regression, lasso and SVM (with rbf kernel)); then the model is trained on the entire data-set and applied to the test data-set. Once training and testing are completed, a plot with variable importances form the Random Forest method is shown together with a classification report in terms of precision and recall.", "cyclone.trainModel(rftop = 40, cv=10, out_dir = out_dir, do_pca=1, npc=1, is_SVM=0)", "3. Plot results\nResults can be visualised in terms of barplots indicating the distributions of predicted cell cycle phases for the individual classes/labels in the test data (both int erms of absolute cells and as relative plot). In addition a barplot for the cross-validation results as well as cell-cycle phase specific ROC cruves are shown to make sure the model performs well in the cross-validation.", "cyclone.plotHistograms(class_labels = class_labels, out_dir = out_dir, method='GNB', do_h=True)\ncyclone.plotPerformance(plot_test=False, out_dir =out_dir, method='GNB')", "In addition to the barplots the confidence of the classifier can be visualised in form of a scatter plot. By default, a scatter plot for the test data is shown; a scatter plot for the training data can be shown by setting the plot_test argument to False. The scores to be shown on the x- and y-axis can be chosen using the xaxis and yaxis argument.", "cyclone.plotScatter(plot_test = True, xaxis = 0, yaxis = 2, xlab = 'G1 score', ylab = 'G2M score', class_labels = class_labels, out_dir = out_dir, method='GNB')\ncyclone.plotScatter(plot_test = False, xaxis = 0, yaxis = 2, xlab = 'G1 score', ylab = 'G2M score', class_labels = ['G1', 'S', 'G2M'], out_dir = out_dir, method='GNB')" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
piskvorky/gensim
docs/notebooks/Word2Vec_FastText_Comparison.ipynb
lgpl-2.1
[ "Comparison of FastText and Word2Vec\nFacebook Research open sourced a great project recently - fastText, a fast (no surprise) and effective method to learn word representations and perform text classification. I was curious about comparing these embeddings to other commonly used embeddings, so word2vec seemed like the obvious choice, especially considering fastText embeddings are an extension of word2vec. \nI've used gensim to train the word2vec models, and the analogical reasoning task (described in Section 4.1 of [2]) for comparing the word2vec and fastText models. I've compared embeddings trained using the skipgram architecture.\nDownload data", "import nltk\nfrom smart_open import smart_open\nnltk.download('brown') \n# Only the brown corpus is needed in case you don't have it.\n\n# Generate brown corpus text file\nwith smart_open('brown_corp.txt', 'w+') as f:\n for word in nltk.corpus.brown.words():\n f.write('{word} '.format(word=word))\n\n# Make sure you set FT_HOME to your fastText directory root\nFT_HOME = 'fastText/'\n# download the text8 corpus (a 100 MB sample of cleaned wikipedia text)\nimport os.path\nif not os.path.isfile('text8'):\n !wget -c http://mattmahoney.net/dc/text8.zip\n !unzip text8.zip\n# download and preprocess the text9 corpus\nif not os.path.isfile('text9'):\n !wget -c http://mattmahoney.net/dc/enwik9.zip\n !unzip enwik9.zip\n !perl {FT_HOME}wikifil.pl enwik9 > text9", "Train models\nFor training the models yourself, you'll need to have both Gensim and FastText set up on your machine.", "MODELS_DIR = 'models/'\n!mkdir -p {MODELS_DIR}\n\nlr = 0.05\ndim = 100\nws = 5\nepoch = 5\nminCount = 5\nneg = 5\nloss = 'ns'\nt = 1e-4\n\nfrom gensim.models import Word2Vec, KeyedVectors\nfrom gensim.models.word2vec import Text8Corpus\n\n# Same values as used for fastText training above\nparams = {\n 'alpha': lr,\n 'size': dim,\n 'window': ws,\n 'iter': epoch,\n 'min_count': minCount,\n 'sample': t,\n 'sg': 1,\n 'hs': 0,\n 'negative': neg\n}\n\ndef train_models(corpus_file, output_name):\n output_file = '{:s}_ft'.format(output_name)\n if not os.path.isfile(os.path.join(MODELS_DIR, '{:s}.vec'.format(output_file))):\n print('Training fasttext on {:s} corpus..'.format(corpus_file))\n %time !{FT_HOME}fasttext skipgram -input {corpus_file} -output {MODELS_DIR+output_file} -lr {lr} -dim {dim} -ws {ws} -epoch {epoch} -minCount {minCount} -neg {neg} -loss {loss} -t {t}\n else:\n print('\\nUsing existing model file {:s}.vec'.format(output_file))\n \n output_file = '{:s}_ft_no_ng'.format(output_name)\n if not os.path.isfile(os.path.join(MODELS_DIR, '{:s}.vec'.format(output_file))):\n print('\\nTraining fasttext on {:s} corpus (without char n-grams)..'.format(corpus_file))\n %time !{FT_HOME}fasttext skipgram -input {corpus_file} -output {MODELS_DIR+output_file} -lr {lr} -dim {dim} -ws {ws} -epoch {epoch} -minCount {minCount} -neg {neg} -loss {loss} -t {t} -maxn 0\n else:\n print('\\nUsing existing model file {:s}.vec'.format(output_file))\n \n output_file = '{:s}_gs'.format(output_name)\n if not os.path.isfile(os.path.join(MODELS_DIR, '{:s}.vec'.format(output_file))):\n print('\\nTraining word2vec on {:s} corpus..'.format(corpus_file))\n \n # Text8Corpus class for reading space-separated words file\n %time gs_model = Word2Vec(Text8Corpus(corpus_file), **params); gs_model\n # Direct local variable lookup doesn't work properly with magic statements (%time)\n locals()['gs_model'].wv.save_word2vec_format(os.path.join(MODELS_DIR, '{:s}.vec'.format(output_file)))\n print('\\nSaved gensim model as {:s}.vec'.format(output_file))\n else:\n print('\\nUsing existing model file {:s}.vec'.format(output_file))\n\nevaluation_data = {}\ntrain_models('brown_corp.txt', 'brown')\n\ntrain_models(corpus_file='text8', output_name='text8')\n\ntrain_models(corpus_file='text9', output_name='text9')", "Comparisons", "# download the file questions-words.txt to be used for comparing word embeddings\n!wget https://raw.githubusercontent.com/tmikolov/word2vec/master/questions-words.txt", "Once you have downloaded or trained the models and downloaded questions-words.txt, you're ready to run the comparison.", "import logging\nlogging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)\n\n# Training times in seconds\nevaluation_data['brown'] = [(18, 54.3, 32.5)]\nevaluation_data['text8'] = [(402, 942, 496)]\nevaluation_data['text9'] = [(3218, 6589, 3550)]\n\ndef print_accuracy(model, questions_file):\n print('Evaluating...\\n')\n acc = model.accuracy(questions_file)\n\n sem_correct = sum((len(acc[i]['correct']) for i in range(5)))\n sem_total = sum((len(acc[i]['correct']) + len(acc[i]['incorrect'])) for i in range(5))\n sem_acc = 100*float(sem_correct)/sem_total\n print('\\nSemantic: {:d}/{:d}, Accuracy: {:.2f}%'.format(sem_correct, sem_total, sem_acc))\n \n syn_correct = sum((len(acc[i]['correct']) for i in range(5, len(acc)-1)))\n syn_total = sum((len(acc[i]['correct']) + len(acc[i]['incorrect'])) for i in range(5,len(acc)-1))\n syn_acc = 100*float(syn_correct)/syn_total\n print('Syntactic: {:d}/{:d}, Accuracy: {:.2f}%\\n'.format(syn_correct, syn_total, syn_acc))\n return (sem_acc, syn_acc)\n\nword_analogies_file = 'questions-words.txt'\naccuracies = []\nprint('\\nLoading Gensim embeddings')\nbrown_gs = KeyedVectors.load_word2vec_format(MODELS_DIR + 'brown_gs.vec')\nprint('Accuracy for Word2Vec:')\naccuracies.append(print_accuracy(brown_gs, word_analogies_file))\n\nprint('\\nLoading FastText embeddings')\nbrown_ft = KeyedVectors.load_word2vec_format(MODELS_DIR + 'brown_ft.vec')\nprint('Accuracy for FastText (with n-grams):')\naccuracies.append(print_accuracy(brown_ft, word_analogies_file))", "The accuracy takes an optional parameter restrict_vocab, which limits the vocabulary of model considered for fast approximate evaluation (default is 30000).\nWord2Vec embeddings seem to be slightly better than fastText embeddings at the semantic tasks, while the fastText embeddings do significantly better on the syntactic analogies. Makes sense, since fastText embeddings are trained for understanding morphological nuances, and most of the syntactic analogies are morphology based. \nLet me explain that better.\nAccording to the paper [1], embeddings for words are represented by the sum of their n-gram embeddings. This is meant to be useful for morphologically rich languages - so theoretically, the embedding for apparently would include information from both character n-grams apparent and ly (as well as other n-grams), and the n-grams would combine in a simple, linear manner. This is very similar to what most of our syntactic tasks look like.\nExample analogy:\namazing amazingly calm calmly\nThis analogy is marked correct if: \nembedding(amazing) - embedding(amazingly) = embedding(calm) - embedding(calmly)\nBoth these subtractions would result in a very similar set of remaining ngrams.\nNo surprise the fastText embeddings do extremely well on this.\nLet's do a small test to validate this hypothesis - fastText differs from word2vec only in that it uses char n-gram embeddings as well as the actual word embedding in the scoring function to calculate scores and then likelihoods for each word, given a context word. In case char n-gram embeddings are not present, this reduces (at least theoretically) to the original word2vec model. This can be implemented by setting 0 for the max length of char n-grams for fastText.", "print('Loading FastText embeddings')\nbrown_ft_no_ng = KeyedVectors.load_word2vec_format(MODELS_DIR + 'brown_ft_no_ng.vec')\nprint('Accuracy for FastText (without n-grams):')\naccuracies.append(print_accuracy(brown_ft_no_ng, word_analogies_file))\nevaluation_data['brown'] += [[acc[0] for acc in accuracies], [acc[1] for acc in accuracies]]", "A-ha! The results for FastText with no n-grams and Word2Vec look a lot more similar (as they should) - the differences could easily result from differences in implementation between fastText and Gensim, and randomization. Especially telling is that the semantic accuracy for FastText has improved slightly after removing n-grams, while the syntactic accuracy has taken a giant dive. Our hypothesis that the char n-grams result in better performance on syntactic analogies seems fair. It also seems possible that char n-grams hurt semantic accuracy a little. However, the brown corpus is too small to be able to draw any definite conclusions - the accuracies seem to vary significantly over different runs.\nLet's try with a larger corpus now - text8 (collection of wiki articles). I'm also curious about the impact on semantic accuracy - for models trained on the brown corpus, the difference in the semantic accuracy and the accuracy values themselves are too small to be conclusive. Hopefully a larger corpus helps, and the text8 corpus likely has a lot more information about capitals, currencies, cities etc, which should be relevant to the semantic tasks.", "accuracies = []\nprint('Loading Gensim embeddings')\ntext8_gs = KeyedVectors.load_word2vec_format(MODELS_DIR + 'text8_gs.vec')\nprint('Accuracy for word2vec:')\naccuracies.append(print_accuracy(text8_gs, word_analogies_file))\n\nprint('Loading FastText embeddings (with n-grams)')\ntext8_ft = KeyedVectors.load_word2vec_format(MODELS_DIR + 'text8_ft.vec')\nprint('Accuracy for FastText (with n-grams):')\naccuracies.append(print_accuracy(text8_ft, word_analogies_file))\n\nprint('Loading FastText embeddings')\ntext8_ft_no_ng = KeyedVectors.load_word2vec_format(MODELS_DIR + 'text8_ft_no_ng.vec')\nprint('Accuracy for FastText (without n-grams):')\naccuracies.append(print_accuracy(text8_ft_no_ng, word_analogies_file))\n\nevaluation_data['text8'] += [[acc[0] for acc in accuracies], [acc[1] for acc in accuracies]]", "With the text8 corpus, we observe a similar pattern. Semantic accuracy falls by a small but significant amount when n-grams are included in FastText, while FastText with n-grams performs far better on the syntactic analogies. FastText without n-grams are largely similar to Word2Vec.\nMy hypothesis for semantic accuracy being lower for the FastText-with-ngrams model is that most of the words in the semantic analogies are standalone words and are unrelated to their morphemes (eg: father, mother, France, Paris), hence inclusion of the char n-grams into the scoring function actually makes the embeddings worse.\nThis trend is observed in the original paper too where the performance of embeddings with n-grams is worse on semantic tasks than both word2vec cbow and skipgram models.\nLet's do a quick comparison on an even larger corpus - text9", "accuracies = []\nprint('Loading Gensim embeddings')\ntext9_gs = KeyedVectors.load_word2vec_format(MODELS_DIR + 'text9_gs.vec')\nprint('Accuracy for word2vec:')\naccuracies.append(print_accuracy(text9_gs, word_analogies_file))\n\nprint('Loading FastText embeddings (with n-grams)')\ntext9_ft = KeyedVectors.load_word2vec_format(MODELS_DIR + 'text9_ft.vec')\nprint('Accuracy for FastText (with n-grams):')\naccuracies.append(print_accuracy(text9_ft, word_analogies_file))\n\nprint('Loading FastText embeddings')\ntext9_ft_no_ng = KeyedVectors.load_word2vec_format(MODELS_DIR + 'text9_ft_no_ng.vec')\nprint('Accuracy for FastText (without n-grams):')\naccuracies.append(print_accuracy(text9_ft_no_ng, word_analogies_file))\n\nevaluation_data['text9'] += [[acc[0] for acc in accuracies], [acc[1] for acc in accuracies]]\n\n%matplotlib inline\nimport matplotlib.pyplot as plt\n\ndef plot(ax, data, corpus_name='brown'):\n width = 0.25\n pos = [(i, i + width, i + 2*width) for i in range(len(data))]\n colors = ['#EE3224', '#F78F1E', '#FFC222']\n acc_ax = ax.twinx()\n # Training time\n ax.bar(pos[0],\n data[0],\n width,\n alpha=0.5,\n color=colors\n )\n # Semantic accuracy\n acc_ax.bar(pos[1],\n data[1],\n width,\n alpha=0.5,\n color=colors\n )\n\n # Syntactic accuracy\n acc_ax.bar(pos[2],\n data[2],\n width,\n alpha=0.5,\n color=colors\n )\n\n ax.set_ylabel('Training time (s)')\n acc_ax.set_ylabel('Accuracy (%)')\n ax.set_title(corpus_name)\n\n acc_ax.set_xticks([p[0] + 1.5 * width for p in pos])\n acc_ax.set_xticklabels(['Training Time', 'Semantic Accuracy', 'Syntactic Accuracy'])\n\n # Proxy plots for adding legend correctly\n proxies = [ax.bar([0], [0], width=0, color=c, alpha=0.5)[0] for c in colors]\n models = ('Gensim', 'FastText', 'FastText (no-ngrams)')\n ax.legend((proxies), models, loc='upper left')\n \n ax.set_xlim(pos[0][0]-width, pos[-1][0]+width*4)\n ax.set_ylim([0, max(data[0])*1.1] )\n acc_ax.set_ylim([0, max(data[1] + data[2])*1.1] )\n\n plt.grid()\n\n# Plotting the bars\nfig = plt.figure(figsize=(10,15))\nfor corpus, subplot in zip(sorted(evaluation_data.keys()), [311, 312, 313]):\n ax = fig.add_subplot(subplot)\n plot(ax, evaluation_data[corpus], corpus)\n\nplt.show()", "The results from text9 seem to confirm our hypotheses so far. Briefly summarising the main points -\n\nFastText models with n-grams do significantly better on syntactic tasks, because of the syntactic questions being related to morphology of the words\nBoth Gensim word2vec and the fastText model with no n-grams do slightly better on the semantic tasks, presumably because words from the semantic questions are standalone words and unrelated to their char n-grams\nIn general, the performance of the models seems to get closer with the increasing corpus size. However, this might possibly be due to the size of the model staying constant at 100, and a larger model size for large corpora might result in higher performance gains.\nThe semantic accuracy for all models increases significantly with the increase in corpus size.\nHowever, the increase in syntactic accuracy from the increase in corpus size for the n-gram FastText model is lower (in both relative and absolute terms). This could possibly indicate that advantages gained by incorporating morphological information could be less significant in case of larger corpus sizes (the corpuses used in the original paper seem to indicate this too)\nTraining times for gensim are slightly lower than the fastText no-ngram model, and significantly lower than the n-gram variant. This is quite impressive considering fastText is implemented in C++ and Gensim in Python (with calls to low-level BLAS routines for much of the heavy lifting). You could read this post for more details regarding word2vec optimisation in Gensim. Note that these times include importing any dependencies and serializing the models to disk, and not just the training times.\n\nConclusions\nThese preliminary results seem to indicate fastText embeddings are significantly better than word2vec at encoding syntactic information. This is expected, since most syntactic analogies are morphology based, and the char n-gram approach of fastText takes such information into account. The original word2vec model seems to perform better on semantic tasks, since words in semantic analogies are unrelated to their char n-grams, and the added information from irrelevant char n-grams worsens the embeddings. It'd be interesting to see how transferable these embeddings are for different kinds of tasks by comparing their performance in a downstream supervised task.\nReferences\n[1] Enriching Word Vectors with Subword Information\n[2] Efficient Estimation of Word Representations in Vector Space" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
liganega/Gongsu-DataSci
previous/y2017/W12-pandas-intro2/GongSu26_Pandas_Introduction_2.ipynb
gpl-3.0
[ "Pandas 소개 2\nGonsSu24 내용에 이어서 Pandas 라이브러리를 소개한다.\n먼저 GongSu24를 임포트 한다.", "from GongSu24_Pandas_Introduction_1 import *", "색인(Index) 클래스\nPandas에 정의된 색인(Index) 클래스는 Series와 DataFrame 자료형의 행과 열을 구분하는 이름들의 목록을 저장하는 데에 사용된다. \nSeries 객체에서 사용되는 Index 객체\n\nindex 속성\n\n아래와 같이 Series 객체를 생성한 후에 index를 확인해보자.", "s6 = Series(range(3), index=['a', 'b', 'c'])\ns6", "index의 자료형이 Index 클래스의 객체임을 확인할 수 있다.", "s6_index = s6.index\ns6_index", "Index 객체에 대해 인덱싱과 슬라이싱을 리스트의 경우처럼 활용할 수 있다.", "s6_index[2]\n\ns6_index[1:]", "Index 객체는 불변(immutable) 자료형이다.", "s6_index[1] = 'd'", "색인 객체는 변경될 수 없기에 자료 구조 사이에서 안전하게 공유될 수 있다.", "an_index = pd.Index(np.arange(3))\nan_index", "앞서 선언된 an_index를 새로운 Series 나 DataFrame 을 생성하는 데에 사용할 수 있으며, 사용된 index가 무엇인지를 확인할 수도 있다.", "s7= Series([1.5, -2.5, 0], index=an_index)\ns7.index is an_index", "DataFrame 객체에서 사용되는 Index 객체\n\nindex 속성\ncolumns 속성", "df3", "columns와 index 속성 모두 Index 객체이다.", "df3.columns\n\ndf3.index\n\ndf3.columns[:2]", "in 연산자 활용하기\nin 연산자를 활용하여 index 와 columns에 사용된 행과 열의 이름의 존재여부를 확인할 수 있다.", "'debt' in df3.columns\n\n'four' in df3.index", "각각의 색인은 담고 있는 데이터에 대한 정보를 취급하는 여러 가지 메서드와 속성을 가지고 있다. [표 5-3]을 참고하자.\nSeries와 DataFrame 관련 연산 및 주요 메소드\nSeries나 DataFrame 형식으로 저장된 데이터를 다루는 주요 연산 및 기능을 설명한다.\n재색인(reindex) 메소드\nreindex() 메소드는 지정된 색인을 사용해서 새로운 Series나 DataFrame 객체를 생성한다.\nSeries의 경우 재색인", "s8 = Series([4.3, 9.2, 8.1, 3.9], index= ['b', 'c', 'a', 'd'])\ns8", "reindex() 메소드를 이용하여 인덱스를 새로 지정할 수 있다. \n주의: 새로 사용되는 항목이 index에 추가되면 NaN이 값으로 사용된다.", "s9 = s8.reindex(['a', 'b', 'c', 'd', 'e', 'f'])\ns9", "누락된 값을 지정된 값으로 채울 수도 있다.", "s8.reindex(['a','b','c','d','e', 'f'], fill_value=0.0)", "method 옵션\n시계열(time series) 등과 데이터 처럼 어떠 순서에 따라 정렬된 데이터를 재색인할 때 \n보간법을 이용하여 누락된 값들을 채워 넣어야 하는 경우가 있다. \n이런 경우 method 옵션을 이용하며, ffill, bfill, nearest 등을 옵션값으로 활용한다.", "s9 = Series(['blue', 'purple', 'yellow'], index=[0, 2, 4])\ns9\n\ns9.reindex(range(6))\n\ns9.reindex(range(6), method='ffill')\n\ns9.reindex(range(6), method='bfill')\n\ns9.reindex(range(6), method='nearest')", "DataFrame의 경우 재색인\n행과 열에 대해 모두 사용이 가능하다.", "data = np.arange(9).reshape(3, 3)\ndata\n\ndf6 = DataFrame(data, index=['a', 'b', 'd'], columns= ['Ohio', 'Texas', 'California'])\ndf6", "index 속성의 재색인은 Series의 경우와 동일하다.", "df7 = df6.reindex(['a', 'b', 'c', 'd'])\ndf7", "columns 속성의 재색인은 키워드(예약어)를 사용한다.", "states = ['Texas', 'Utah', 'California']\ndf6.reindex(columns=states)", "method 옵션을 이용한 보간은 행 대해서만 이루어진다.", "df6.reindex(index=['a', 'b', 'c', 'd'], method='ffill')\n\ndf6.reindex(index=['a', 'b', 'c', 'd'], method='bfill')\n\ndf6.reindex(index=['a', 2, 3, 4])", "method='nearest'는 인덱스가 모두 숫자인 경우에만 적용할 수 있다.", "df6.reindex(index=['a', 'b', 'c', 'd'], method='nearest')", "주의\nreindex는 기존 자료를 변경하지 않는다.", "df6", "loc 메소드를 이용한 재색인\nloc 메소드를 이용하여 재색인이 가능하다.", "states\n\ndf6.loc[['a', 'b', 'c', 'd'], states]" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
timzhangau/ml_nano
practice_projects/cnn/mnist-mlp/mnist_mlp.ipynb
mit
[ "Convolutional Neural Networks\n\nIn this notebook, we train an MLP to classify images from the MNIST database.\n1. Load MNIST Database", "from keras.datasets import mnist\n\n# use Keras to import pre-shuffled MNIST database\n(X_train, y_train), (X_test, y_test) = mnist.load_data()\n\nprint(\"The MNIST database has a training set of %d examples.\" % len(X_train))\nprint(\"The MNIST database has a test set of %d examples.\" % len(X_test))", "2. Visualize the First Six Training Images", "import matplotlib.pyplot as plt\n%matplotlib inline\nimport matplotlib.cm as cm\nimport numpy as np\n\n# plot first six training images\nfig = plt.figure(figsize=(20,20))\nfor i in range(6):\n ax = fig.add_subplot(1, 6, i+1, xticks=[], yticks=[])\n ax.imshow(X_train[i], cmap='gray')\n ax.set_title(str(y_train[i]))", "3. View an Image in More Detail", "def visualize_input(img, ax):\n ax.imshow(img, cmap='gray')\n width, height = img.shape\n thresh = img.max()/2.5\n for x in range(width):\n for y in range(height):\n ax.annotate(str(round(img[x][y],2)), xy=(y,x),\n horizontalalignment='center',\n verticalalignment='center',\n color='white' if img[x][y]<thresh else 'black')\n\nfig = plt.figure(figsize = (12,12)) \nax = fig.add_subplot(111)\nvisualize_input(X_train[0], ax)", "4. Rescale the Images by Dividing Every Pixel in Every Image by 255", "# rescale [0,255] --> [0,1]\nX_train = X_train.astype('float32')/255\nX_test = X_test.astype('float32')/255 ", "5. Encode Categorical Integer Labels Using a One-Hot Scheme", "from keras.utils import np_utils\n\n# print first ten (integer-valued) training labels\nprint('Integer-valued labels:')\nprint(y_train[:10])\n\n# one-hot encode the labels\ny_train = np_utils.to_categorical(y_train, 10)\ny_test = np_utils.to_categorical(y_test, 10)\n\n# print first ten (one-hot) training labels\nprint('One-hot labels:')\nprint(y_train[:10])", "6. Define the Model Architecture", "from keras.models import Sequential\nfrom keras.layers import Dense, Dropout, Flatten\n\n# define the model\nmodel = Sequential()\nmodel.add(Flatten(input_shape=X_train.shape[1:]))\nmodel.add(Dense(512, activation='relu'))\nmodel.add(Dropout(0.2))\nmodel.add(Dense(512, activation='relu'))\nmodel.add(Dropout(0.2))\nmodel.add(Dense(10, activation='softmax'))\n\n# summarize the model\nmodel.summary()", "7. Compile the Model", "# compile the model\nmodel.compile(loss='categorical_crossentropy', optimizer='rmsprop', \n metrics=['accuracy'])", "8. Calculate the Classification Accuracy on the Test Set (Before Training)", "# evaluate test accuracy\nscore = model.evaluate(X_test, y_test, verbose=0)\naccuracy = 100*score[1]\n\n# print test accuracy\nprint('Test accuracy: %.4f%%' % accuracy)", "9. Train the Model", "from keras.callbacks import ModelCheckpoint \n\n# train the model\ncheckpointer = ModelCheckpoint(filepath='mnist.model.best.hdf5', \n verbose=1, save_best_only=True)\nhist = model.fit(X_train, y_train, batch_size=128, epochs=10,\n validation_split=0.2, callbacks=[checkpointer],\n verbose=1, shuffle=True)", "10. Load the Model with the Best Classification Accuracy on the Validation Set", "# load the weights that yielded the best validation accuracy\nmodel.load_weights('mnist.model.best.hdf5')", "11. Calculate the Classification Accuracy on the Test Set", "# evaluate test accuracy\nscore = model.evaluate(X_test, y_test, verbose=0)\naccuracy = 100*score[1]\n\n# print test accuracy\nprint('Test accuracy: %.4f%%' % accuracy)" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
fastai/course-v3
zh-nbs/Lesson3_head_pose.ipynb
apache-2.0
[ "Practical Deep Learning for Coders, v3\nLesson3_head_pose\nRegression with BIWI head pose dataset<br>\n用BIWI头部姿势数据集进行回归建模\nThis is a more advanced example to show how to create custom datasets and do regression with images. Our task is to find the center of the head in each image. The data comes from the BIWI head pose dataset, thanks to Gabriele Fanelli et al. We have converted the images to jpeg format, so you should download the converted dataset from this link.<br>\n这个案例是一个更高级的示例,它展示了如何创建自定义数据集,并且对图像进行回归建模。 我们的任务是在每个图片中确定头部的中心位置。数据来自BIWI头部姿势数据集。感谢Gabriele Fanelli等人的努力。我们已经把图片转化为jpeg格式,因此你应该从这里下载转化好的数据。", "%reload_ext autoreload\n%autoreload 2\n%matplotlib inline\n\nfrom fastai.vision import *", "Getting and converting the data\n数据获取与格式转换", "path = untar_data(URLs.BIWI_HEAD_POSE)\n\ncal = np.genfromtxt(path/'01'/'rgb.cal', skip_footer=6); cal\n\nfname = '09/frame_00667_rgb.jpg'\n\ndef img2txt_name(f): return path/f'{str(f)[:-7]}pose.txt'\n\nimg = open_image(path/fname)\nimg.show()\n\nctr = np.genfromtxt(img2txt_name(fname), skip_header=3); ctr\n\ndef convert_biwi(coords):\n c1 = coords[0] * cal[0][0]/coords[2] + cal[0][2]\n c2 = coords[1] * cal[1][1]/coords[2] + cal[1][2]\n return tensor([c2,c1])\n\ndef get_ctr(f):\n ctr = np.genfromtxt(img2txt_name(f), skip_header=3)\n return convert_biwi(ctr)\n\ndef get_ip(img,pts): return ImagePoints(FlowField(img.size, pts), scale=True)\n\nget_ctr(fname)\n\nctr = get_ctr(fname)\nimg.show(y=get_ip(img, ctr), figsize=(6, 6))", "Creating a dataset\n创建一个数据集", "data = (PointsItemList.from_folder(path)\n .split_by_valid_func(lambda o: o.parent.name=='13')\n .label_from_func(get_ctr)\n .transform(get_transforms(), tfm_y=True, size=(120,160))\n .databunch().normalize(imagenet_stats)\n )\n\ndata.show_batch(3, figsize=(9,6))", "Train model\n训练模型", "learn = cnn_learner(data, models.resnet34)\n\nlearn.lr_find()\nlearn.recorder.plot()\n\nlr = 2e-2\n\nlearn.fit_one_cycle(5, slice(lr))\n\nlearn.save('stage-1')\n\nlearn.load('stage-1');\n\nlearn.show_results()", "Data augmentation\n数据增强", "tfms = get_transforms(max_rotate=20, max_zoom=1.5, max_lighting=0.5, max_warp=0.4, p_affine=1., p_lighting=1.)\n\ndata = (PointsItemList.from_folder(path)\n .split_by_valid_func(lambda o: o.parent.name=='13')\n .label_from_func(get_ctr)\n .transform(tfms, tfm_y=True, size=(120,160))\n .databunch().normalize(imagenet_stats)\n )\n\ndef _plot(i,j,ax):\n x,y = data.train_ds[0]\n x.show(ax, y=y)\n\nplot_multi(_plot, 3, 3, figsize=(8,6))" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
tuanavu/coursera-university-of-washington
machine_learning/2_regression/assignment/week5/week-5-lasso-assignment-1-exercise.ipynb
mit
[ "Regression Week 5: Feature Selection and LASSO (Interpretation)\nIn this notebook, you will use LASSO to select features, building on a pre-implemented solver for LASSO (using GraphLab Create, though you can use other solvers). You will:\n* Run LASSO with different L1 penalties.\n* Choose best L1 penalty using a validation set.\n* Choose best L1 penalty using a validation set, with additional constraint on the size of subset.\nIn the second notebook, you will implement your own LASSO solver, using coordinate descent. \nFire up graphlab create", "import sys\nsys.path.append('C:\\Anaconda2\\envs\\dato-env\\Lib\\site-packages')\nimport graphlab", "Load in house sales data\nDataset is from house sales in King County, the region where the city of Seattle, WA is located.", "sales = graphlab.SFrame('kc_house_data.gl/')", "Create new features\nAs in Week 2, we consider features that are some transformations of inputs.", "from math import log, sqrt\nsales['sqft_living_sqrt'] = sales['sqft_living'].apply(sqrt)\nsales['sqft_lot_sqrt'] = sales['sqft_lot'].apply(sqrt)\nsales['bedrooms_square'] = sales['bedrooms']*sales['bedrooms']\n\n# In the dataset, 'floors' was defined with type string, \n# so we'll convert them to float, before creating a new feature.\nsales['floors'] = sales['floors'].astype(float) \nsales['floors_square'] = sales['floors']*sales['floors']", "Squaring bedrooms will increase the separation between not many bedrooms (e.g. 1) and lots of bedrooms (e.g. 4) since 1^2 = 1 but 4^2 = 16. Consequently this variable will mostly affect houses with many bedrooms.\nOn the other hand, taking square root of sqft_living will decrease the separation between big house and small house. The owner may not be exactly twice as happy for getting a house that is twice as big.\n\nLearn regression weights with L1 penalty\nLet us fit a model with all the features available, plus the features we just created above.", "all_features = ['bedrooms', 'bedrooms_square',\n 'bathrooms',\n 'sqft_living', 'sqft_living_sqrt',\n 'sqft_lot', 'sqft_lot_sqrt',\n 'floors', 'floors_square',\n 'waterfront', 'view', 'condition', 'grade',\n 'sqft_above',\n 'sqft_basement',\n 'yr_built', 'yr_renovated']", "Applying L1 penalty requires adding an extra parameter (l1_penalty) to the linear regression call in GraphLab Create. (Other tools may have separate implementations of LASSO.) Note that it's important to set l2_penalty=0 to ensure we don't introduce an additional L2 penalty.", "model_all = graphlab.linear_regression.create(sales, target='price', features=all_features,\n validation_set=None, \n l2_penalty=0., l1_penalty=1e10)", "Find what features had non-zero weight.", "# non_zero_weight = model_all.get(\"coefficients\")[\"value\"]\nnon_zero_weight = model_all[\"coefficients\"][model_all[\"coefficients\"][\"value\"] > 0]\nnon_zero_weight.print_rows(num_rows=20)", "Note that a majority of the weights have been set to zero. So by setting an L1 penalty that's large enough, we are performing a subset selection. \nQUIZ QUESTION:\nAccording to this list of weights, which of the features have been chosen? \nSelecting an L1 penalty\nTo find a good L1 penalty, we will explore multiple values using a validation set. Let us do three way split into train, validation, and test sets:\n* Split our sales data into 2 sets: training and test\n* Further split our training data into two sets: train, validation\nBe very careful that you use seed = 1 to ensure you get the same answer!", "(training_and_validation, testing) = sales.random_split(.9,seed=1) # initial train/test split\n(training, validation) = training_and_validation.random_split(0.5, seed=1) # split training into train and validate", "Next, we write a loop that does the following:\n* For l1_penalty in [10^1, 10^1.5, 10^2, 10^2.5, ..., 10^7] (to get this in Python, type np.logspace(1, 7, num=13).)\n * Fit a regression model with a given l1_penalty on TRAIN data. Specify l1_penalty=l1_penalty and l2_penalty=0. in the parameter list.\n * Compute the RSS on VALIDATION data (here you will want to use .predict()) for that l1_penalty\n* Report which l1_penalty produced the lowest RSS on validation data.\nWhen you call linear_regression.create() make sure you set validation_set = None.\nNote: you can turn off the print out of linear_regression.create() with verbose = False", "import numpy as np\nimport pprint \n\nvalidation_rss = {}\nfor l1_penalty in np.logspace(1, 7, num=13):\n model = graphlab.linear_regression.create(training, target='price', features=all_features,\n validation_set=None, verbose = False,\n l2_penalty=0., l1_penalty=l1_penalty)\n predictions = model.predict(validation)\n residuals = validation['price'] - predictions\n rss = sum(residuals**2)\n validation_rss[l1_penalty] = rss\n\n# pprint.pprint(result_dict)\nprint min(validation_rss.items(), key=lambda x: x[1]) ", "QUIZ QUESTIONS \n1. What was the best value for the l1_penalty?\n2. What is the RSS on TEST data of the model with the best l1_penalty?", "model_test = graphlab.linear_regression.create(training, target='price', features=all_features,\n validation_set=None, verbose = False,\n l2_penalty=0., l1_penalty=10.0)\npredictions_test = model.predict(testing)\nresiduals_test = testing['price'] - predictions_test\nrss_test = sum(residuals_test**2)\nprint rss_test", "QUIZ QUESTION\nAlso, using this value of L1 penalty, how many nonzero weights do you have?", "non_zero_weight_test = model_test[\"coefficients\"][model_test[\"coefficients\"][\"value\"] > 0]\nprint model_test[\"coefficients\"][\"value\"].nnz()\nnon_zero_weight_test.print_rows(num_rows=20)", "Limit the number of nonzero weights\nWhat if we absolutely wanted to limit ourselves to, say, 7 features? This may be important if we want to derive \"a rule of thumb\" --- an interpretable model that has only a few features in them.\nIn this section, you are going to implement a simple, two phase procedure to achive this goal:\n1. Explore a large range of l1_penalty values to find a narrow region of l1_penalty values where models are likely to have the desired number of non-zero weights.\n2. Further explore the narrow region you found to find a good value for l1_penalty that achieves the desired sparsity. Here, we will again use a validation set to choose the best value for l1_penalty.", "max_nonzeros = 7", "Exploring the larger range of values to find a narrow range with the desired sparsity\nLet's define a wide range of possible l1_penalty_values:", "l1_penalty_values = np.logspace(8, 10, num=20)\nprint l1_penalty_values", "Now, implement a loop that search through this space of possible l1_penalty values:\n\nFor l1_penalty in np.logspace(8, 10, num=20):\nFit a regression model with a given l1_penalty on TRAIN data. Specify l1_penalty=l1_penalty and l2_penalty=0. in the parameter list. When you call linear_regression.create() make sure you set validation_set = None\nExtract the weights of the model and count the number of nonzeros. Save the number of nonzeros to a list.\nHint: model['coefficients']['value'] gives you an SArray with the parameters you learned. If you call the method .nnz() on it, you will find the number of non-zero parameters!", "coef_dict = {}\nfor l1_penalty in l1_penalty_values:\n model = graphlab.linear_regression.create(training, target ='price', features=all_features,\n validation_set=None, verbose=None,\n l2_penalty=0., l1_penalty=l1_penalty)\n coef_dict[l1_penalty] = model['coefficients']['value'].nnz()\n\npprint.pprint(coef_dict)", "Out of this large range, we want to find the two ends of our desired narrow range of l1_penalty. At one end, we will have l1_penalty values that have too few non-zeros, and at the other end, we will have an l1_penalty that has too many non-zeros. \nMore formally, find:\n* The largest l1_penalty that has more non-zeros than max_nonzero (if we pick a penalty smaller than this value, we will definitely have too many non-zero weights)\n * Store this value in the variable l1_penalty_min (we will use it later)\n* The smallest l1_penalty that has fewer non-zeros than max_nonzero (if we pick a penalty larger than this value, we will definitely have too few non-zero weights)\n * Store this value in the variable l1_penalty_max (we will use it later)\nHint: there are many ways to do this, e.g.:\n* Programmatically within the loop above\n* Creating a list with the number of non-zeros for each value of l1_penalty and inspecting it to find the appropriate boundaries.", "l1_penalty_min = 2976351441.6313128\nl1_penalty_max = 3792690190.7322536", "QUIZ QUESTIONS\nWhat values did you find for l1_penalty_min andl1_penalty_max? \nExploring the narrow range of values to find the solution with the right number of non-zeros that has lowest RSS on the validation set\nWe will now explore the narrow region of l1_penalty values we found:", "l1_penalty_values = np.linspace(l1_penalty_min,l1_penalty_max,20)", "For l1_penalty in np.linspace(l1_penalty_min,l1_penalty_max,20):\nFit a regression model with a given l1_penalty on TRAIN data. Specify l1_penalty=l1_penalty and l2_penalty=0. in the parameter list. When you call linear_regression.create() make sure you set validation_set = None\nMeasure the RSS of the learned model on the VALIDATION set\n\n\n\nFind the model that the lowest RSS on the VALIDATION set and has sparsity equal to max_nonzero.", "validation_rss = {}\nfor l1_penalty in l1_penalty_values:\n model = graphlab.linear_regression.create(training, target='price', features=all_features,\n validation_set=None, verbose = False,\n l2_penalty=0., l1_penalty=l1_penalty)\n predictions = model.predict(validation)\n residuals = validation['price'] - predictions\n rss = sum(residuals**2)\n validation_rss[l1_penalty] = rss, model['coefficients']['value'].nnz()\n \n\nfor k,v in validation_rss.iteritems(): \n if (v[1] == max_nonzeros) and (v[0] < bestRSS):\n bestRSS = v[0]\n bestl1 = k\n \nprint bestRSS, bestl1\n\nfor k,v in validation_rss.iteritems():\n if (v[1] == max_nonzeros) and (v[0] < bestRSS):\n bestRSS = v[0]\n print k, bestRSS", "QUIZ QUESTIONS\n1. What value of l1_penalty in our narrow range has the lowest RSS on the VALIDATION set and has sparsity equal to max_nonzeros?\n2. What features in this model have non-zero coefficients?", "model = graphlab.linear_regression.create(training, target='price', features=all_features,\n validation_set=None, verbose = False,\n l2_penalty=0., l1_penalty=3448968612.16)\n\nnon_zero_weight_test = model[\"coefficients\"][model[\"coefficients\"][\"value\"] > 0]\nnon_zero_weight_test.print_rows(num_rows=8)" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
dsacademybr/PythonFundamentos
Cap03/Notebooks/DSA-Python-Cap03-Exercicios-Loops-Condiconais-Solucao.ipynb
gpl-3.0
[ "<font color='blue'>Data Science Academy - Python Fundamentos - Capítulo 3</font>\nDownload: http://github.com/dsacademybr", "# Versão da Linguagem Python\nfrom platform import python_version\nprint('Versão da Linguagem Python Usada Neste Jupyter Notebook:', python_version())", "Exercícios - Loops e Condiconais - Solução", "# Exercício 1 - Crie uma estrutura que pergunte ao usuário qual o dia da semana. Se o dia for igual a Domingo ou \n# igual a sábado, imprima na tela \"Hoje é dia de descanso\", caso contrário imprima na tela \"Você precisa trabalhar!\"\ndia = input('Digite o dia da semana: ')\nif dia == 'Domingo' or dia == 'Sábado':\n print(\"Hoje é dia de descanso\")\nelse:\n print(\"Você precisa trabalhar!\")\n\n# Exercício 2 - Crie uma lista de 5 frutas e verifique se a fruta 'Morango' faz parte da lista\nlista = ['Laranja', 'Maça', 'Abacaxi', 'Uva', 'Morango']\nfor fruta in lista:\n if fruta == 'Morango':\n print(\"Morango faz parte da lista de frutas\")\n\n# Exercício 3 - Crie uma tupla de 4 elementos, multiplique cada elemento da tupla por 2 e guarde os resultados em uma \n# lista\ntup1 = (1, 2, 3, 4)\nlst1 = []\nfor i in tup1:\n novo_valor = i * 2\n lst1.append(novo_valor)\nprint(lst1)\n\n# Exercício 4 - Crie uma sequência de números pares entre 100 e 150 e imprima na tela\nfor i in range(100, 151, 2):\n print(i)\n\n# Exercício 5 - Crie uma variável chamada temperatura e atribua o valor 40. Enquanto temperatura for maior que 35, \n# imprima as temperaturas na tela\ntemperatura = 40 \nwhile temperatura > 35: \n print(temperatura)\n temperatura = temperatura - 1\n\n# Exercício 6 - Crie uma variável chamada contador = 0. Enquanto counter for menor que 100, imprima os valores na tela,\n# mas quando for encontrado o valor 23, interrompa a execução do programa\ncontador = 0\nwhile contador < 100:\n if contador == 23:\n break\n print(contador)\n contador += 1\n\n# Exercício 7 - Crie uma lista vazia e uma variável com valor 4. Enquanto o valor da variável for menor ou igual a 20, \n# adicione à lista, apenas os valores pares e imprima a lista\nnumeros = list()\ni = 4\nwhile (i <= 20):\n numeros.append(i)\n i = i+2\nprint(numeros)\n\n# Exercício 8 - Transforme o resultado desta função range em uma lista: range(5, 45, 2)\nnums = range(5, 45, 2)\nprint(list(nums))\n\n# Exercício 9 - Faça a correção dos erros no código abaixo e execute o programa. Dica: são 3 erros.\ntemperatura = float(input('Qual a temperatura? '))\nif temperatura > 30:\n print('Vista roupas leves.')\nelse:\n print('Busque seus casacos.')\n\n# Exercício 10 - Faça um programa que conte quantas vezes a letra \"r\" aparece na frase abaixo. Use um placeholder na \n# sua instrução de impressão\n\n# “É melhor, muito melhor, contentar-se com a realidade; se ela não é tão brilhante como os sonhos, tem pelo menos a \n# vantagem de existir.” (Machado de Assis)\n\nfrase = \"É melhor, muito melhor, contentar-se com a realidade; se ela não é tão brilhante como os sonhos, tem pelo menos a vantagem de existir.\" \ncount = 0\nfor caracter in frase:\n if caracter == 'r':\n count += 1\nprint(\"O caracter r aparece %s vezes na frase.\" %(count))", "Fim\nObrigado\nVisite o Blog da Data Science Academy - <a href=\"http://blog.dsacademy.com.br\">Blog DSA</a>" ]
[ "markdown", "code", "markdown", "code", "markdown" ]
psumank/DATA643
Final/DATA643_pySpark_Final_Project.ipynb
mit
[ "DATA 643 - Final Project\nSreejaya Nair and Suman K Polavarapu\nDescription:\nExplore the Apache Spark Cluster Computing Framework by analysing the movielens dataset. Provide recommendations using MLLib", "import os\nimport sys\nimport urllib2\nimport collections\nimport matplotlib.pyplot as plt\nimport math\nfrom time import time, sleep\n%pylab inline", "Prepare the pySpark Environment", "spark_home = os.environ.get('SPARK_HOME', None)\n\nif not spark_home: \n raise ValueError(\"Please set SPARK_HOME environment variable!\")\n\n# Add the py4j to the path.\nsys.path.insert(0, os.path.join(spark_home, 'python'))\nsys.path.insert(0, os.path.join(spark_home, 'C:/spark/python/lib/py4j-0.9-src.zip'))\n", "Initialize Spark Context", "from pyspark.mllib.recommendation import ALS, Rating\nfrom pyspark import SparkConf, SparkContext\n\nconf = SparkConf().setMaster(\"local[*]\").setAppName(\"MovieRecommendationsALS\").set(\"spark.executor.memory\", \"2g\")\nsc = SparkContext(conf = conf)", "Load and Analyse Data", "def loadMovieNames():\n movieNames = {}\n for line in urllib2.urlopen(\"https://raw.githubusercontent.com/psumank/DATA643/master/WK5/ml-100k/u.item\"):\n fields = line.split('|')\n movieNames[int(fields[0])] = fields[1].decode('ascii', 'ignore')\n return movieNames\n\nprint \"\\nLoading movie names...\"\nnameDict = loadMovieNames()\n\nprint \"\\nLoading ratings data...\"\ndata = sc.textFile(\"file:///C:/Users/p_sum/.ipynb_checkpoints/ml-100k/u.data\")\n\nratings = data.map(lambda x: x.split()[2])\n\n#action -- just to trigger the driver [ lazy evaluation ]\nrating_results = ratings.countByValue()\n\nsortedResults = collections.OrderedDict(sorted(rating_results.items()))\nfor key, value in sortedResults.iteritems():\n print \"%s %i\" % (key, value)", "Ratings Histogram", "ratPlot = plt.bar(range(len(sortedResults)), sortedResults.values(), align='center')\nplt.xticks(range(len(sortedResults)), list(sortedResults.keys()))\nratPlot[3].set_color('g')\nprint \"Ratings Histogram\"\n", "Most popular movies", "movies = data.map(lambda x: (int(x.split()[1]), 1))\nmovieCounts = movies.reduceByKey(lambda x, y: x + y)\nflipped = movieCounts.map( lambda (x, y) : (y, x))\nsortedMovies = flipped.sortByKey(False)\nsortedMoviesWithNames = sortedMovies.map(lambda (count, movie) : (nameDict[movie], count))\n\nresults = sortedMoviesWithNames.collect()\nsubset = results[0:10]\npopular_movieNm = [str(i[0]) for i in subset]\npopularity_strength = [int(i[1]) for i in subset]\n\npopMovplot = plt.barh(range(len(subset)), popularity_strength, align='center')\nplt.yticks(range(len(subset)), popular_movieNm)\npopMovplot[0].set_color('g')\nprint \"Most Popular Movies from the Dataset\"\n", "Similar Movies\nFind similar movies for a given movie using cosine similarity", "ratingsRDD = data.map(lambda l: l.split()).map(lambda l: (int(l[0]), (int(l[1]), float(l[2]))))\n\n\nratingsRDD.takeOrdered(10, key = lambda x: x[0])\n\nratingsRDD.take(4)\n\n# Movies rated by same user. ==> [ user ID ==> ( (movieID, rating), (movieID, rating)) ]\nuserJoinedRatings = ratingsRDD.join(ratingsRDD)\n\nuserJoinedRatings.takeOrdered(10, key = lambda x: x[0])\n\n# Remove dups\ndef filterDups( (userID, ratings) ):\n (movie1, rating1) = ratings[0]\n (movie2, rating2) = ratings[1]\n return movie1 < movie2\n\nuniqueUserJoinedRatings = userJoinedRatings.filter(filterDups)\n\nuniqueUserJoinedRatings.takeOrdered(10, key = lambda x: x[0])\n\n# Now key by (movie1, movie2) pairs ==> (movie1, movie2) => (rating1, rating2)\ndef makeMovieRatingPairs((user, ratings)):\n (movie1, rating1) = ratings[0]\n (movie2, rating2) = ratings[1]\n return ((movie1, movie2), (rating1, rating2))\n\nmoviePairs = uniqueUserJoinedRatings.map(makeMovieRatingPairs)\n\nmoviePairs.takeOrdered(10, key = lambda x: x[0])\n\n#collect all ratings for each movie pair and compute similarity. (movie1, movie2) = > (rating1, rating2), (rating1, rating2) ...\nmoviePairRatings = moviePairs.groupByKey()\nmoviePairRatings.takeOrdered(10, key = lambda x: x[0])\n\n#Compute Similarity\ndef cosineSimilarity(ratingPairs):\n numPairs = 0\n sum_xx = sum_yy = sum_xy = 0\n for ratingX, ratingY in ratingPairs:\n sum_xx += ratingX * ratingX\n sum_yy += ratingY * ratingY\n sum_xy += ratingX * ratingY\n numPairs += 1\n\n numerator = sum_xy\n denominator = sqrt(sum_xx) * sqrt(sum_yy)\n\n score = 0\n if (denominator):\n score = (numerator / (float(denominator)))\n\n return (score, numPairs)\n\nmoviePairSimilarities = moviePairRatings.mapValues(cosineSimilarity).cache()\n\nmoviePairSimilarities.takeOrdered(10, key = lambda x: x[0])", "Lets find similar movies for Toy Story (Movie ID: 1)", "scoreThreshold = 0.97\ncoOccurenceThreshold = 50\n\ninputMovieID = 1 #Toy Story. \n\n# Filter for movies with this sim that are \"good\" as defined by our quality thresholds.\nfilteredResults = moviePairSimilarities.filter(lambda((pair,sim)): \\\n (pair[0] == inputMovieID or pair[1] == inputMovieID) and sim[0] > scoreThreshold and sim[1] > coOccurenceThreshold)\n\n #Top 10 by quality score.\n \nresults = filteredResults.map(lambda((pair,sim)): (sim, pair)).sortByKey(ascending = False).take(10)\n\nprint \"Top 10 similar movies for \" + nameDict[inputMovieID]\n\nfor result in results:\n (sim, pair) = result\n # Display the similarity result that isn't the movie we're looking at\n similarMovieID = pair[0]\n if (similarMovieID == inputMovieID):\n similarMovieID = pair[1]\n print nameDict[similarMovieID] + \"\\tscore: \" + str(sim[0]) + \"\\tstrength: \" + str(sim[1])", "Recommender using MLLib\nTraining the recommendation model", "ratings = data.map(lambda l: l.split()).map(lambda l: Rating(int(l[0]), int(l[1]), float(l[2]))).cache()\nratings.take(3)\n\nnratings = ratings.count()\nnUsers = ratings.keys().distinct().count()\nnMovies = ratings.values().distinct().count()\n\nprint \"We have Got %d ratings from %d users on %d movies.\" % (nratings, nUsers, nMovies)\n\n# Build the recommendation model using Alternating Least Squares\n#Train a matrix factorization model given an RDD of ratings given by users to items, in the form of\n#(userID, itemID, rating) pairs. We approximate the ratings matrix as the product of two lower-rank matrices \n#of a given rank (number of features). To solve for these features, we run a given number of iterations of ALS. \n#The level of parallelism is determined automatically based on the number of partitions in ratings.\n\nstart = time()\n\nseed = 5L\niterations = 10\nrank = 8\n\nmodel = ALS.train(ratings, rank, iterations)\n\nduration = time() - start\n\nprint \"Model trained in %s seconds\" % round(duration,3)", "Recommendations", "#Lets recommend movies for the user id - 2\nuserID = 2\n\nprint \"\\nTop 10 recommendations:\"\nrecommendations = model.recommendProducts(userID, 10)\nfor recommendation in recommendations:\n print nameDict[int(recommendation[1])] + \\\n \" score \" + str(recommendation[2])" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
automl/SpySMAC
examples/autopytorch/apt_notebook.ipynb
bsd-3-clause
[ "Using CAVE with AutoPyTorch\nAutoPyTorch provides a framework for automated neural-network-configuration. Currently it supports BOHB for hyperparameter search.\nCAVE integrates with AutoPyTorch, providing further insights and visualizations.\nThis notebook provides an exemplary pipeline for using CAVE on / with AutoPyTorch.\nWe will generate some AutoPyTorch-Output.\nYou can use your own AutoPyTorch-routine here, we will use the openml-tasks, inspired by AutoPyTorch's tutorial notebook.\nNote: This example adapts with the refactor of the APT project.\nSince logging is not yet finally implemented in the APT project, this example is not necessarily fully executable...\nHowever, feel free to open issues on errors you encounter in the issue tracker.", "# Remove the old example output\nimport os\nimport logging\nimport tempfile\nimport shutil\nlog_dir = \"logs/apt-cave-notebook/\"\nrerun_apt = False\n\nlogging.basicConfig(level=logging.DEBUG)\n\nfrom autoPyTorch import AutoNetClassification\nimport os as os\nimport openml\nimport json\nfrom ConfigSpace.read_and_write import json as pcs_json\n# Logging\nfrom autoPyTorch.components.metrics.additional_logs import *\nfrom autoPyTorch.pipeline.nodes import LogFunctionsSelector\n\nif rerun_apt:\n # Remove old results\n if os.path.exists(log_dir):\n archive_path = shutil.make_archive(os.path.join(tempfile.mkdtemp(), '.OLD'), 'zip', log_dir)\n shutil.rmtree(log_dir)\n os.makedirs(log_dir)\n shutil.move(archive_path, log_dir)\n else:\n os.makedirs(log_dir)\n\n\n task = openml.tasks.get_task(task_id=31)\n\n X, y = task.get_X_and_y()\n ind_train, ind_test = task.get_train_test_split_indices()\n X_train, Y_train = X[ind_train], y[ind_train]\n X_test, Y_test = X[ind_test], y[ind_test]\n\n autopytorch = AutoNetClassification(config_preset=\"medium_cs\",\n result_logger_dir=log_dir,\n log_every_n_datapoints=10,\n use_tensorboard_logger=True,\n additional_logs=[test_result.__name__,\n test_cross_entropy.__name__,\n test_balanced_accuracy.__name__],\n )\n\n # Get data from the openml task \"Supervised Classification on credit-g (https://www.openml.org/t/31)\"\n task = openml.tasks.get_task(task_id=31)\n X, y = task.get_X_and_y()\n ind_train, ind_test = task.get_train_test_split_indices()\n X_train, Y_train = X[ind_train], y[ind_train]\n X_test, Y_test = X[ind_test], y[ind_test]\n \n \n # Equip autopytorch with additional logs\n gl = GradientLogger()\n lw_gl = LayerWiseGradientLogger()\n additional_logs = [gradient_max(gl), gradient_mean(gl), gradient_median(gl), gradient_std(gl),\n gradient_q10(gl), gradient_q25(gl), gradient_q75(gl), gradient_q90(gl),\n layer_wise_gradient_max(lw_gl), layer_wise_gradient_mean(lw_gl),\n layer_wise_gradient_median(lw_gl), layer_wise_gradient_std(lw_gl),\n layer_wise_gradient_q10(lw_gl), layer_wise_gradient_q25(lw_gl),\n layer_wise_gradient_q75(lw_gl), layer_wise_gradient_q90(lw_gl),\n gradient_norm()]\n\n for additional_log in additional_logs:\n autopytorch.pipeline[LogFunctionsSelector.get_name()].add_log_function(name=type(additional_log).__name__,\n log_function=additional_log)\n\n #sampling_space[\"additional_logs\"].append(type(additional_log).__name__)\n\n autopytorch.pipeline[LogFunctionsSelector.get_name()].add_log_function(name=test_result.__name__, \n log_function=test_result(autopytorch, X[ind_test], y[ind_test]))\n autopytorch.pipeline[LogFunctionsSelector.get_name()].add_log_function(name=test_cross_entropy.__name__,\n log_function=test_cross_entropy(autopytorch, X[ind_test], y[ind_test]))\n autopytorch.pipeline[LogFunctionsSelector.get_name()].add_log_function(name=test_balanced_accuracy.__name__,\n log_function=test_balanced_accuracy(autopytorch, X[ind_test], y[ind_test]))\n\n # Fit to find an incumbent configuration with BOHB\n results_fit = autopytorch.fit(X_train=X_train,\n Y_train=Y_train,\n validation_split=0.3,\n max_runtime=750,\n min_budget=10,\n max_budget=50,\n refit=True,\n )\n autopytorch.refit_all_incumbents(X_train, Y_train)", "Note: APT is supposed to automatically log the results to the output directory. Until then, do in manually:", "if rerun_apt:\n # Save fit results as json\n with open(os.path.join(log_dir, \"results_fit.json\"), \"w\") as f:\n json.dump(results_fit, f, indent=2)\n \n # Also necessary information (can be migrated either to CAVE or (preferably) to autopytorch)\n with open(os.path.join(log_dir, 'configspace.json'), 'w') as f:\n f.write(pcs_json.write(autopytorch.get_hyperparameter_search_space(X_train=X_train,\n Y_train=Y_train)))\n with open(os.path.join(log_dir, 'autonet_config.json'), 'w') as f:\n json.dump(autopytorch.get_current_autonet_config(), f, indent=2)", "Next, spin up CAVE pass along the output directory.", "from cave.cavefacade import CAVE\n\ncave_output_dir = \"cave_output\"\n\ncave = CAVE([log_dir], # List of folders holding results\n cave_output_dir, # Output directory\n ['.'], # Target Algorithm Directory (only relevant for SMAC)\n file_format=\"APT\",\n verbose=\"DEBUG\")\n\ncave.apt_overview()\n\ncave.compare_default_incumbent()", "Other analyzers also run on the APT-data:", "cave.apt_tensorboard()" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
massimo-nocentini/on-python
fdg/intro.ipynb
mit
[ "<p>\n<img src=\"http://www.cerm.unifi.it/chianti/images/logo%20unifi_positivo.jpg\" \n alt=\"UniFI logo\" style=\"float: left; width: 20%; height: 20%;\">\n<div align=\"right\">\nMassimo Nocentini<br>\n<small>\n<br>May 2018: intro\n</small>\n</div>\n</p>\n<br>\n<br>\n<div align=\"center\">\n<b>Abstract</b><br>\nTranslation of code in the introduction chapter of <a href=\"https://mitpress.mit.edu/books/functional-differential-geometry\">Functional Differential Geometry</a> book.\n</div>", "from operator import attrgetter\nfrom sympy import *\ninit_printing()", "", "f, L, w = Function('f'), Function('L'), Function('w') # abstract, Lagrangian and path functions, respectively\nt, q, q_point = symbols(r't q \\dot{q}') # symbols for the Leibniz notation\n\nLagrangian_eq = Eq(Derivative(Derivative(L(t, q, q_point),q_point,evaluate=False),\n t, evaluate=False) - L(t,q,q_point).diff(q),0)\nLagrangian_eq # compact Lagrangian equation, implicit indeed\n\nLagrangian_eq = Eq(Derivative(Subs(L(t, q, q_point).diff(q_point), [q,q_point], [w(t),w(t).diff(t)]),t) - \n Subs(L(t,q,q_point).diff(q), [q,q_point], [w(t),w(t).diff(t)]),0)\nLagrangian_eq\n\nLagrangian_eq.doit() # a complex explosion by automatic computation\n\ndef diff_positional(f, i):\n a = IndexedBase('a')\n der = f.diff(f.args[i])\n def D(*args):\n #return Lambda([a[i] for i in range(len(f.args))], \n return der.subs({f.args[i]:args[i] for i in range(len(f.args))}, simultaneous=True)\n return D # function D is a function of the meta-language, not in the object language\n\ndiff_positional(L(t, q, q_point), 2)(t,w(t),w(t).diff(t)) # :/\n\ndef diff_positional(f, i):\n return Function(r'\\partial_{{{}}}{}'.format(i, str(f))) # the \\partial is just a symbol, it hasn't meaning\n\n(Derivative(diff_positional(L, 2)(t,w(t), w(t).diff(t)), t) - \n diff_positional(L, 1)(t,w(t), w(t).diff(t))) # :(\n\nLagrangian_eq = Eq(Derivative(L(t,w(t), w(t).diff(t)).fdiff(argindex=3),t, evaluate=False) - \n L(t,w(t), w(t).diff(t)).fdiff(argindex=2),\n 0, evaluate=False)\nLagrangian_eq # :) , although it doesn't use \"positional derivative\" operator explicitly\n\ndef Derivative_eq(f, argindex=1):\n d = Dummy()\n args = [d if i+1 == argindex else a for i,a in enumerate(f.args)]\n lhs = f.fdiff(argindex)\n rhs = Subs(f.func(*args).diff(d), d, f.args[argindex-1])\n return Eq(lhs, rhs, evaluate=False)\n\nDerivative_eq(f(t,q,q_point),2) # applicative Derivative operator\n\ndef Gamma(w):\n return Lambda([t], (t, w(t), w(t).diff(t))) # state-space function\n\nGamma(w), Gamma(w)(t)\n\nLagrangian_eq = Eq(Derivative(L(*Gamma(w)(t)).fdiff(argindex=3),t, evaluate=False) - \n L(t,w(t), w(t).diff(t)).fdiff(argindex=2),\n 0, evaluate=False)\nLagrangian_eq\n\nclass FunctionsComposition(Function):\n\n nargs = 2\n \n def _latex(self, sexp):\n return r' \\circ '.join(map(latex, \n map(lambda a: a.func if isinstance(a, Function) else a, \n self.args)))\n \n def _eval_subs(self, old, new):\n f, g = self.args\n if old == f:\n return new.subs({f.args[0]:g}, simultaneous=True)\n \n \n\nF_o_w = FunctionsComposition(Function(r'\\mathcal{F}')(t), w(t))\nF_o_w\n\nF_o_w.subs({Function(r'\\mathcal{F}')(t):1/(1-t)})\n\nF_o_w.subs({w(t):2*t})\n\n_.subs({Function(r'\\mathcal{F}')(t):2*t+1})\n\nFunctionsComposition(L(t, w(t), w(t).diff(t)).fdiff(argindex=3),Gamma(w))\n\nm,v,k,q = symbols('m v k q')\nLagrangian_eq.subs({L:Lambda([t,q,v], (m*v**2)/2-(k*q**2)/2)}) # plug in the Lagrangian function\n\neq = _.doit() # do derivatives\neq\n\na, omega, phi = symbols(r'a \\omega \\phi')\n_.subs({w(t):a*cos(omega*t+phi)})\n\n_.doit().factor()\n\nsolve(_, omega)\n\ndsolve(eq, w(t)) # solve with respect to path function w(t)", "<a rel=\"license\" href=\"http://creativecommons.org/licenses/by-nc-sa/4.0/\"><img alt=\"Creative Commons License\" style=\"border-width:0\" src=\"https://i.creativecommons.org/l/by-nc-sa/4.0/88x31.png\" /></a><br />This work is licensed under a <a rel=\"license\" href=\"http://creativecommons.org/licenses/by-nc-sa/4.0/\">Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International License</a>." ]
[ "markdown", "code", "markdown", "code", "markdown" ]
joshnsolomon/phys202-2015-work
assignments/assignment03/NumpyEx03.ipynb
mit
[ "Numpy Exercise 3\nImports", "import numpy as np\n%matplotlib inline\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\nimport antipackage\nimport github.ellisonbg.misc.vizarray as va", "Geometric Brownian motion\nHere is a function that produces standard Brownian motion using NumPy. This is also known as a Wiener Process.", "def brownian(maxt, n):\n \"\"\"Return one realization of a Brownian (Wiener) process with n steps and a max time of t.\"\"\"\n t = np.linspace(0.0,maxt,n)\n h = t[1]-t[0]\n Z = np.random.normal(0.0,1.0,n-1)\n dW = np.sqrt(h)*Z\n W = np.zeros(n)\n W[1:] = dW.cumsum()\n return t, W", "Call the brownian function to simulate a Wiener process with 1000 steps and max time of 1.0. Save the results as two arrays t and W.", "# YOUR CODE HERE\nt, W = brownian(1.0,1000)\n\n\nassert isinstance(t, np.ndarray)\nassert isinstance(W, np.ndarray)\nassert t.dtype==np.dtype(float)\nassert W.dtype==np.dtype(float)\nassert len(t)==len(W)==1000", "Visualize the process using plt.plot with t on the x-axis and W(t) on the y-axis. Label your x and y axes.", "# YOUR CODE HERE\nplt.plot(t,W)\nplt.xlabel('time')\nplt.ylabel('Wiener Process')\n\nassert True # this is for grading", "Use np.diff to compute the changes at each step of the motion, dW, and then compute the mean and standard deviation of those differences.", "# YOUR CODE HERE\ndW = np.diff(W)\nmean = dW.mean()\nstandard_deviation = dW.std()\nmean, standard_deviation\n\nassert len(dW)==len(W)-1\nassert dW.dtype==np.dtype(float)", "Write a function that takes $W(t)$ and converts it to geometric Brownian motion using the equation:\n$$\nX(t) = X_0 e^{((\\mu - \\sigma^2/2)t + \\sigma W(t))}\n$$\nUse Numpy ufuncs and no loops in your function.", "def geo_brownian(t, W, X0, mu, sigma):\n \"Return X(t) for geometric brownian motion with drift mu, volatility sigma.\"\"\"\n Xt = X0*np.exp((((mu-(sigma**2))/2)*t)+(sigma*W))\n return Xt\n\nassert True # leave this for grading", "Use your function to simulate geometric brownian motion, $X(t)$ for $X_0=1.0$, $\\mu=0.5$ and $\\sigma=0.3$ with the Wiener process you computed above.\nVisualize the process using plt.plot with t on the x-axis and X(t) on the y-axis. Label your x and y axes.", "# YOUR CODE HERE\nXt = geo_brownian(t, W, 1.0, .5, .3)\nplt.plot(t,Xt)\nplt.xlabel('time')\nplt.ylabel('position')\n\nassert True # leave this for grading" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
sofianehaddad/gosa
doc/example_contrast.ipynb
lgpl-3.0
[ "Implementing and using a new contrast with pygosa\nThe benefit ot the module is to make easy the definition a new contrast class (see for example New sensitivity analysis subordinated to a contrast\nImplementing such contrast and apply it within the pygosa module relies on:\n\nThe definintion of a new class that inherites from ContrastSensitivityClass;\nThe definintion of the internal contrast method as function of y and t;\nThe definintion of the get_risk_value method as function that takes into account a data sample and returns the \n quantity of interest;\n\nOf course, the underlying contrast should be associated to the quantity of interest defined in get_rist_value. As an example, we illustrate how to define the mean contrast class in less than 10 lines and applying the contrast to compute the sensitivity factors:", "import openturns as ot\nimport numpy as np\nimport pygosa\n%pylab inline", "We define Sobol use-case, which is very common in case of sensitivity analysis:", "model = ot.SymbolicFunction([\"x1\",\"x2\",\"x3\"], [\"sin(x1) + 7*sin(x2)^2 + 0.1*(x3^4)*sin(x1)\"])\ndist = ot.ComposedDistribution( 3 * [ot.Uniform(-np.pi, np.pi)] )", "Design of experiment\nWe define the experiment design. The benefits of using a crude Monte-Carlo approach is the potential use of several contrasts.", "mcsp = pygosa.SensitivityDesign(dist=dist, model=model, size=5000)", "Moment of second order\nHereafter we define a new contrast class that helps evaluating sensitivities of $\\mathbb{E}(Y^2)$. The contrast class should :\n\nInherits from ContrastSensitivityAnalysis\nDefine the contrast method with signature contrast(self, y,t, **kwargs). It should have kwargs as arguments even if not used\nDefine the get_risk_value method with signature get_risk_value(self, data, **kwargs). Same remark concerning kwargs", "class Moment2SA(pygosa.ContrastSensitivityAnalysis):\n def __init__(self, design):\n super(Moment2SA, self).__init__(design)\n\n # contrast method\n def contrast(self, y,t, **kwargs):\n \"\"\"\n Contrast for moment of second order\n \"\"\"\n return (y*y-t)*(y*y-t)\n\n # Define risk function (second order moment)\n # moments2 = var + mean * mean\n def get_risk_value(self, data, **kwargs):\n mu = ot.Sample(data).computeMean()\n var = ot.Sample(data).computeVariance()\n return np.array(mu) * np.array(mu) + np.array(var)\n", "The previous class is a contrast similar to those provided by the module. We can thus easily apply it using the previous design:", "sam = Moment2SA(mcsp)\nfactors_m = sam.compute_factors()\nfig, ax = sam.boxplot()\n\nprint(factors_m)" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
ameliecordier/iutdoua-info_algo2015
2015-11-18 - TD12 - Introduction aux tableaux.ipynb
cc0-1.0
[ "Durant ce TD, nous avons introduit la notion de tableau. \nNous avons précisé qu'un tableau a une longueur fixe et contient des éléments du même type. \nNous avons également dit que chaque case du tableau est désignée par un indice (les indices commencent à 0).\nEnfin, len(tab) renvoie la longueur du tableau tab.", "tableau = [1, 3, 5, 6]\nprint(tableau[0])\nprint(len(tableau))", "Nous pouvons parcourir les éléments d'un tableau :", "tab = [\"pommes\", \"tomates\", \"fromage\", \"lait\", \"sucre\"]\ni = 0\nwhile i < len(tab):\n print(tab[i])\n i = i+1\n \n# Attention, notez la différence avec :\nj = 0\nwhile j < len(tab):\n print(j)\n j = j+1", "Exercice 1 : Recherchez si un élément est présent dans un tableau.", "def cherche(tab, elt):\n i = 0\n while i < len(tab):\n if tab[i] == elt:\n print(\"J'ai trouvé !\")\n i = i+1\n \ntableau = [\"pommes\", \"tomates\", \"fromage\", \"lait\", \"sucre\"]\ncherche(tableau, \"tomates\")", "Cette solution contient de nombreux défauts. À vous de les détecter et de proposer une solution améliorée." ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
jacksu/machine-learning
src/ml/kaggle/titanic/titanic.ipynb
mit
[ "import pandas as pd #数据分析\nimport numpy as np #科学计算\nfrom pandas import Series,DataFrame\n\ndata_train = pd.read_csv(\"train.csv\")\n#我们看到,总共有12列,其中Survived字段表示的是该乘客是否获救,其余都是乘客的个人信息,包括:\n\n#PassengerId => 乘客ID\n#Pclass => 乘客等级(1/2/3等舱位)\n#Name => 乘客姓名\n#Sex => 性别\n#Age => 年龄\n#SibSp => 堂兄弟/妹个数\n#Parch => 父母与小孩个数\n#Ticket => 船票信息\n#Fare => 票价\n#Cabin => 客舱\n#Embarked => 登船港口\ndata_train", "数据探索\n上面数据是详细数据列表,一眼看不出来啥,先来看一下数据的大致情况。", "data_train.info()", "从整体数据信息来看,总共包含891个顾客信息,总共有714个顾客有年龄信息,船舱信息缺失比较严重。", "data_train.describe()", "从上可以看出,头等舱顾客比较少,不到25%,平均年龄不到30,看起来都比较年轻啊,家里人平均数都不到1,\n看来计划生育搞得不错,数字看起来太不直观了,画图看看。", "#每个/多个 属性和最后的Survived之间有着什么样的关系\n#中文乱码:http://blog.csdn.net/heloowird/article/details/46343519\nimport matplotlib.pyplot as plt\nplt.rcParams['font.sans-serif']=['SimHei'] #用来正常显示中文标签\nplt.rcParams['axes.unicode_minus']=False #用来正常显示负号\n\nfig = plt.figure()\nfig.set(alpha=0.2) # 设定图表颜色alpha参数\n\nplt.subplot2grid((2,3),(0,0)) # 在一张大图里分列几个小图\ndata_train.Survived.value_counts().plot(kind='bar')# 柱状图 \nplt.title(\"获救情况 (1为获救)\") # 标题\nplt.ylabel(\"人数\") \n\nplt.subplot2grid((2,3),(0,1))\ndata_train.Pclass.value_counts().plot(kind=\"bar\")\nplt.ylabel(u\"人数\")\nplt.title(u\"乘客等级分布\")\n\nplt.subplot2grid((2,3),(0,2))\nplt.scatter(data_train.Survived, data_train.Age)\nplt.ylabel(u\"年龄\") # 设定纵坐标名称\nplt.grid(b=True, which='major', axis='y') \nplt.title(u\"按年龄看获救分布 (1为获救)\")\n\n\nplt.subplot2grid((2,3),(1,0), colspan=2)\ndata_train.Age[data_train.Pclass == 1].plot(kind='kde') \ndata_train.Age[data_train.Pclass == 2].plot(kind='kde')\ndata_train.Age[data_train.Pclass == 3].plot(kind='kde')\nplt.xlabel(u\"年龄\")# plots an axis lable\nplt.ylabel(u\"密度\") \nplt.title(u\"各等级的乘客年龄分布\")\nplt.legend((u'头等舱', u'2等舱',u'3等舱'),loc='best') # sets our legend for our graph.\n\n\nplt.subplot2grid((2,3),(1,2))\ndata_train.Embarked.value_counts().plot(kind='bar')\nplt.title(u\"各登船口岸上船人数\")\nplt.ylabel(u\"人数\") \n\nplt.tight_layout()\nplt.show()", "这个时候我们可能会有一些想法了:\n\n不同舱位/乘客等级可能和财富/地位有关系,最后获救概率可能会不一样\n年龄对获救概率也一定是有影响的,副船长曾说『小孩和女士先走』呢\n和登船港口是不是有关系呢?也许登船港口不同,人的出身地位不同?", "#看看各乘客等级的获救情况\nfig = plt.figure()\nfig.set(alpha=0.2) # 设定图表颜色alpha参数\n\nSurvived_0 = data_train.Pclass[data_train.Survived == 0].value_counts()\nSurvived_1 = data_train.Pclass[data_train.Survived == 1].value_counts()\ndf=pd.DataFrame({u'获救':Survived_1, u'未获救':Survived_0})\ndf.plot(kind='bar', stacked=True)\nplt.title(u\"各乘客等级的获救情况\")\nplt.xlabel(u\"乘客等级\") \nplt.ylabel(u\"人数\") \nplt.show()", "头等舱明显获救的概率高", "#看看各性别的获救情况\nfig = plt.figure()\nfig.set(alpha=0.2) # 设定图表颜色alpha参数\n\nSurvived_m = data_train.Survived[data_train.Sex == 'male'].value_counts()\nSurvived_f = data_train.Survived[data_train.Sex == 'female'].value_counts()\ndf=pd.DataFrame({u'男性':Survived_m, u'女性':Survived_f})\ndf.plot(kind='bar', stacked=True)\nplt.title(u\"按性别看获救情况\")\nplt.xlabel(u\"性别\") \nplt.ylabel(u\"人数\")\nplt.show()", "歪果盆友果然很尊重lady,lady first践行得不错。性别无疑也要作为重要特征加入最后的模型之中。", "#然后我们再来看看各种舱级别情况下各性别的获救情况\nfig=plt.figure()\nfig.set(alpha=0.65) # 设置图像透明度,无所谓\nplt.title(u\"根据舱等级和性别的获救情况\")\n\nax1=fig.add_subplot(141)\ndata_train.Survived[data_train.Sex == 'female'][data_train.Pclass != 3].value_counts().plot(kind='bar', label=\"female highclass\", color='#FA2479')\nax1.set_xticklabels([u\"获救\", u\"未获救\"], rotation=0)\nax1.legend([u\"女性/高级舱\"], loc='best')\n\nax2=fig.add_subplot(142, sharey=ax1)\ndata_train.Survived[data_train.Sex == 'female'][data_train.Pclass == 3].value_counts().plot(kind='bar', label='female, low class', color='pink')\nax2.set_xticklabels([u\"未获救\", u\"获救\"], rotation=0)\nplt.legend([u\"女性/低级舱\"], loc='best')\n\nax3=fig.add_subplot(143, sharey=ax1)\ndata_train.Survived[data_train.Sex == 'male'][data_train.Pclass != 3].value_counts().plot(kind='bar', label='male, high class',color='lightblue')\nax3.set_xticklabels([u\"未获救\", u\"获救\"], rotation=0)\nplt.legend([u\"男性/高级舱\"], loc='best')\n\nax4=fig.add_subplot(144, sharey=ax1)\ndata_train.Survived[data_train.Sex == 'male'][data_train.Pclass == 3].value_counts().plot(kind='bar', label='male low class', color='steelblue')\nax4.set_xticklabels([u\"未获救\", u\"获救\"], rotation=0)\nplt.legend([u\"男性/低级舱\"], loc='best')\n\nplt.tight_layout()\nplt.show()\n\n#看看各登船港口获救情况\nfig = plt.figure()\nfig.set(alpha=0.2) # 设定图表颜色alpha参数\n\nSurvived_0 = data_train.Embarked[data_train.Survived == 0].value_counts()\nSurvived_1 = data_train.Embarked[data_train.Survived == 1].value_counts()\ndf=pd.DataFrame({u'获救':Survived_1, u'未获救':Survived_0})\ndf.plot(kind='bar', stacked=True)\nplt.title(u\"各登船港口的获救情况\")\nplt.xlabel(u\"登船港口\") \nplt.ylabel(u\"人数\") \nplt.show()\n\n#看看堂兄妹个数的获救情况\nfig = plt.figure()\nfig.set(alpha=0.2) # 设定图表颜色alpha参数\n\nSurvived_0 = data_train.SibSp[data_train.Survived == 0].value_counts()\nSurvived_1 = data_train.SibSp[data_train.Survived == 1].value_counts()\ndf=pd.DataFrame({u'获救':Survived_1, u'未获救':Survived_0})\ndf.plot(kind='bar', stacked=True)\nplt.title(u\"堂兄妹的获救情况\")\nplt.xlabel(u\"堂兄妹数\") \nplt.ylabel(u\"人数\") \nplt.show()\n\n\n#看看父母孩子数的获救情况\nfig = plt.figure()\nfig.set(alpha=0.2) # 设定图表颜色alpha参数\n\nSurvived_0 = data_train.Parch[data_train.Survived == 0].value_counts()\nSurvived_1 = data_train.Parch[data_train.Survived == 1].value_counts()\ndf=pd.DataFrame({u'获救':Survived_1, u'未获救':Survived_0})\ndf.plot(kind='bar', stacked=True)\nplt.title(u\"父母孩子数的获救情况\")\nplt.xlabel(u\"父母孩子数\") \nplt.ylabel(u\"人数\") \nplt.show()\n\n#ticket是船票编号,应该是unique的,和最后的结果没有太大的关系,先不纳入考虑的特征范畴把\n#cabin只有204个乘客有值,我们先看看它的一个分布\ndata_train.Cabin.value_counts()\n\nfig = plt.figure()\nfig.set(alpha=0.2) # 设定图表颜色alpha参数\n\nSurvived_cabin = data_train.Survived[pd.notnull(data_train.Cabin)].value_counts()\nSurvived_nocabin = data_train.Survived[pd.isnull(data_train.Cabin)].value_counts()\ndf=pd.DataFrame({u'有':Survived_cabin, u'无':Survived_nocabin}).transpose()\ndf.plot(kind='bar', stacked=True)\nplt.title(u\"按Cabin有无看获救情况\")\nplt.xlabel(u\"Cabin有无\") \nplt.ylabel(u\"人数\")\nplt.show()", "数据预处理\n处理missing value\n这里学问有点深,如果各位有好的经验可以跟我交流下。以我浅薄的经验来说我一般会分情况处理\n\n\n如果missing value占总体的比例非常小,那么直接填入平均值或者众数\n如果missing value所占比例不算小也不算大,那么可以考虑它跟其他特征的关系,如果关系明显,那么直接根据其他特征填入;也可以建立简单的模型,比如线性回归,随机森林等。\n如果missing value所占比例大,那么直接将miss value当做一种特殊的情况,另取一个值填入\n\n\n用scikit-learn中的RandomForest来拟合一下缺失的年龄数据", "from sklearn.ensemble import RandomForestRegressor\n\n### 使用 RandomForestClassifier 填补缺失的年龄属性\ndef set_missing_ages(df):\n\n # 把已有的数值型特征取出来丢进Random Forest Regressor中\n age_df = df[['Age','Fare', 'Parch', 'SibSp', 'Pclass']]\n\n # 乘客分成已知年龄和未知年龄两部分\n known_age = age_df[age_df.Age.notnull()].as_matrix()\n unknown_age = age_df[age_df.Age.isnull()].as_matrix()\n\n # y即目标年龄\n y = known_age[:, 0]\n\n # X即特征属性值\n X = known_age[:, 1:]\n\n # fit到RandomForestRegressor之中\n rfr = RandomForestRegressor(random_state=0, n_estimators=2000, n_jobs=-1)\n rfr.fit(X, y)\n\n # 用得到的模型进行未知年龄结果预测\n predictedAges = rfr.predict(unknown_age[:, 1:])\n\n # 用得到的预测结果填补原缺失数据\n df.loc[ (df.Age.isnull()), 'Age' ] = predictedAges \n\n return df, rfr\n\ndef set_Cabin_type(df):\n df.loc[ (df.Cabin.notnull()), 'Cabin' ] = \"Yes\"\n df.loc[ (df.Cabin.isnull()), 'Cabin' ] = \"No\"\n return df\n\ndata_train, rfr = set_missing_ages(data_train)\ndata_train = set_Cabin_type(data_train)\nprint(data_train.head())\n\n#因为逻辑回归建模时,需要输入的特征都是数值型特征,我们通常会先对类目型的特征因子化。\ndummies_Cabin = pd.get_dummies(data_train['Cabin'], prefix= 'Cabin')\n\ndummies_Embarked = pd.get_dummies(data_train['Embarked'], prefix= 'Embarked')\n\ndummies_Sex = pd.get_dummies(data_train['Sex'], prefix= 'Sex')\n\ndummies_Pclass = pd.get_dummies(data_train['Pclass'], prefix= 'Pclass')\n\ndf = pd.concat([data_train, dummies_Cabin, dummies_Embarked, dummies_Sex, dummies_Pclass], axis=1)\ndf.drop(['Pclass', 'Name', 'Sex', 'Ticket', 'Cabin', 'Embarked'], axis=1, inplace=True)\ndf", "有一种临近结果的宠宠欲动感吧,莫急莫急,我们还得做一些处理,仔细看看Age和Fare两个属性,乘客的数值幅度变化,也忒大了吧!!如果大家了解逻辑回归与梯度下降的话,会知道,各属性值之间scale差距太大,将对收敛速度造成几万点伤害值!甚至不收敛!所以我们先用scikit-learn里面的preprocessing模块对这俩货做一个标准化。可以参考机器学习之特征工程-数据预处理", "import sklearn.preprocessing as preprocessing\nscaler = preprocessing.StandardScaler()\nage_scale_param = scaler.fit(df['Age'])\ndf['Age_scaled'] = age_scale_param.fit_transform(df['Age'])\nfare_scale_param = scaler.fit(df['Fare'])\ndf['Fare_scaled'] = fare_scale_param.fit_transform(df['Fare'])\ndf\n\n#选择线性回归\nfrom sklearn import linear_model\n\n# 用正则取出我们要的属性值\ntrain_df = df.filter(regex='Survived|Age_.*|SibSp|Parch|Fare_.*|Cabin_.*|Embarked_.*|Sex_.*|Pclass_.*')\ntrain_np = train_df.as_matrix()\n\n# y即Survival结果\ny = train_np[:, 0]\n\n# X即特征属性值\nX = train_np[:, 1:]\n\n# fit到RandomForestRegressor之中\nclf = linear_model.LogisticRegression(C=1.0, penalty='l1', tol=1e-6)\nclf.fit(X, y)\n\nclf\n\npd.DataFrame({\"columns\":list(train_df.columns)[1:], \"coef\":list(clf.coef_.T)})\n" ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
tjwei/class2016
mnist.ipynb
mit
[ "起手式,導入 numpy, matplotlib", "%pylab inline", "下載 mnist 資料", "import os\nimport urllib\ndataset = 'mnist.pkl.gz'\ndef reporthook(a,b,c):\n print \"\\rdownloading: %5.1f%%\"%(a*b*100.0/c),\n \nif not os.path.isfile(dataset):\n origin = \"https://github.com/mnielsen/neural-networks-and-deep-learning/raw/master/data/mnist.pkl.gz\"\n print('Downloading data from %s' % origin)\n urllib.urlretrieve(origin, dataset, reporthook=reporthook)", "載入訓練資料 train_set 和測試資料 test_set", "import gzip\nimport pickle\nwith gzip.open(dataset, 'rb') as f:\n train_set, valid_set, test_set = pickle.load(f)", "查看 mnist 資料的概況,用 .shape 看 np.array 的形狀\ntrain_set 有五萬筆資料,第一部份是五萬筆長度為 784 的向量。第二部份是五萬個數字\ntest_set 則有一萬筆同樣形式的資料", "print \"train_set\", train_set[0].shape, train_set[1].shape\nprint \"valid_set\", valid_set[0].shape, valid_set[1].shape\nprint \"test_set\", test_set[0].shape, test_set[1].shape", "資料的第一部份,每一筆都是一個 28x28 的圖片(28*28=784)\n用 reshape 把長度784 的向量轉成 28*28 的方陣,就能當成圖片來看\n下面是第一筆訓練資料的圖片", "imshow(train_set[0][0].reshape((28, 28)), cmap=\"gray\")", "寫一個函數可以更方面的看圖。\n我們查看前 5 筆資料,分別是 5張圖片,以及對應的 5 個數字", "def show(x, i=[0]):\n plt.figure(i[0])\n imshow(x.reshape((28,28)), cmap=\"gray\")\n i[0]+=1\nfor i in range(5):\n print train_set[1][i]\n show(train_set[0][i])", "train_set 是用來訓練我們的模型用的\n我們的模型是很簡單的 logistic regression 模型,用到的參數只有一個 784x10 的矩陣 W 和一個長度 10 的向量 b。\n我們先用均勻隨機亂數來設定 W 和 b 。", "W = np.random.uniform(low=-1, high=1, size=(28*28,10))\nb = np.random.uniform(low=-1, high=1, size=10)\n", "完整的模型如下\n將圖片看成是長度 784 的向量 x\n計算 Wx+b, 然後再取 exp。 最後得到的十個數值。將這些數值除以他們的總和。\n我們希望出來的數字會符合這張圖片是這個數字的機率。\n$softmax_i(W x + b) = \\frac {e^{W_i x + b_i}} {\\sum_j e^{W_j x + b_j}}$\n先拿第一筆資料試試看, x 是輸入。 y 是這張圖片對應到的數字(以這個例子來說 y=5)。", "x = train_set[0][0]\ny = train_set[1][0]", "先計算 exp(Wx+b)", "Pr = exp(dot(x, W)+b)\nPr.shape", "然後 normalize,讓總和變成 1 (符合機率的意義)", "Pr = Pr/Pr.sum()\nprint Pr", "由於 W 和 b 都是隨機設定的,所以上面我們算出的機率也是隨機的。\n如果照上面的機率來看,y=2 的機率有 54.5% 為最高。 y=5 的機率只有 24% (也不低,但只是運氣好)\n為了要評斷我們的預測的品質,要設計一個評斷誤差的方式,我們用的方法如下(不是常見的方差,而是用熵的方式來算,好處是容易微分,效果好)\n$ error = - \\log(P(Y=y^{(i)}|x^{(i)}, W,b)) $\n上述的誤差評分方式,常常稱作 error 或者 loss,數學式可能有點費解。實際計算其實很簡單,就是下面的式子", "loss = -log(Pr[y])\nloss", "目前的誤差 1.4215 不算太差,畢竟我們運氣很好,隨機的 W 和 b,居然能讓正確答案有 24% 的機率。\n不過我們還是要想辦法改進。 我們用一種被稱作是 gradient descent 的方式來改善我們的誤差。\n因為我們知道 gradient 是讓函數上升最快的方向。所以我們如果朝 gradient 的反方向走一點點(也就是下降最快的方向),那麼得到的函數值應該會小一點。\n記得我們的變數是 W 和 b (裡面總共有 28*20+10 個變數),所以我們要把 loss 對 W 和 b 裡面的每一個參數來偏微分。\n還好這個偏微分是可以用手算出他的形式,而最後偏微分的式子也不會很複雜。\n對 b 的偏微分如下", "gradb = Pr.copy()\ngradb[y] -= 1\nprint gradb", "對 W 的偏微分也不難", "print Pr.shape, x.shape, W.shape\ngradW = dot(x.reshape(784,1), Pr.reshape(1,10), )\ngradW[:, y] -= x", "算好 gradient 後,讓 W 和 b 分別往 gradient 反方向走一點點,得到新的 W 和 b", "W -= 0.1 * gradW\nb -= 0.1 * gradb", "再一次計算 Pr 以及 loss", "Pr = exp(dot(x, W)+b)\nPr = Pr/Pr.sum()\nloss = -log(Pr[y])\nloss", "發現這次誤差下降到 0.0005 左右,改進不少\n我們將同樣的方式輪流對五萬筆訓練資料來做,看看情形會如何", "W = np.random.uniform(low=-1, high=1, size=(28*28,10))\nb = np.random.uniform(low=-1, high=1, size=10)\nscore = 0\nN=50000*20\nd = 0.001\nlearning_rate = 1e-2\nfor i in xrange(N):\n if i%50000==0:\n print i, \"%5.3f%%\"%(score*100)\n x = train_set[0][i%50000]\n y = train_set[1][i%50000]\n Pr = exp(dot(x, W)+b)\n Pr = Pr/Pr.sum()\n loss = -log(Pr[y])\n score *=(1-d)\n if Pr.argmax() == y:\n score += d\n gradb = Pr.copy()\n gradb[y] -= 1\n gradW = dot(x.reshape(784,1), Pr.reshape(1,10), )\n gradW[:, y] -= x\n W -= learning_rate * gradW\n b -= learning_rate * gradb\n ", "結果發現正確率大約是 92.42%, 但這是對訓練資料而不是對測試資料\n而且,一筆一筆的訓練資也有點慢,線性代數的特點就是能夠向量運算。如果把很多筆 x 當成列向量組合成一個矩陣(然後還是叫做 x),由於矩陣乘法的原理,我們還是一樣計算 Wx+b , 就可以同時得到多筆結果。\n下面的函數,可以一次輸入多筆 x, 同時一次計算多筆 x 的結果和準確率。", "def compute_Pr(x):\n Pr = exp(dot(x, W)+b)\n return Pr/Pr.sum(axis=1, keepdims=True)\ndef compute_accuracy(Pr, y):\n return mean(Pr.argmax(axis=1)==y)\n \n ", "下面是更新過得訓練過程, 當 i%100000 時,順便計算一下 test accuracy 和 valid accuracy。", "W = np.random.uniform(low=-1, high=1, size=(28*28,10))\nb = np.random.uniform(low=-1, high=1, size=10)\nscore = 0\nN=50000*100\nbatch_size = 500\nlearning_rate = .7\nfor i in xrange(0, N, batch_size):\n if i%100000==0:\n x, y = test_set[0], test_set[1]\n test_score = compute_accuracy(compute_Pr(x), y)*100\n x, y = valid_set[0], valid_set[1]\n valid_score = compute_accuracy(compute_Pr(x), y)*100\n print i, \"%5.2f%%\"%test_score, \"%5.2f%%\"%valid_score\n # 隨機選出一些訓練資料出來\n rndidx = np.random.choice(train_set[0].shape[0], batch_size, replace=False)\n x, y = train_set[0][rndidx], train_set[1][rndidx]\n # 一次計算所有的 Pr\n Pr = compute_Pr(x)\n # 計算平均 gradient \n gradb = Pr.mean(axis=0)-[(y==i).mean() for i in range(10)]\n gradW = dot(x.T, Pr)\n for i in range(batch_size):\n gradW[:, y[i]]-=x[i]\n gradW /= batch_size\n # 更新 W 和 b\n W -= learning_rate * gradW\n b -= learning_rate * gradb", "最後得到的準確率是 92%-93%\n不算完美,不過畢竟這只有一個矩陣而已。", "x, y = test_set[0], test_set[1]\nPr = compute_Pr(x)\ntest_score = compute_accuracy(Pr, y)*100\nx, y = valid_set[0], valid_set[1]\nPr = compute_Pr(x)\nvalid_score = compute_accuracy(Pr, y)*100\nprint \"test accuracy %5.2f%%\"%test_score, \"valid accuracy %5.2f%%\"%valid_score\nx, y = train_set[0], train_set[1]\nPr = compute_Pr(x)\ntrain_score = compute_accuracy(Pr, y)*100\nprint \"train accuracy %5.2f%%\"%train_score", "光看數據沒感覺,我們來看看前十筆測試資料跑起來的情形\n可以看到前十筆只有錯一個", "x = test_set[0][:10]\ny = test_set[1][:10]\nPr = compute_Pr(x)\nprint Pr.argmax(axis=1)\nprint y\nfor i in range(10):\n show(x[i])", "看看前一百筆資料中,是哪些情況算錯", "x = test_set[0][:100]\ny = test_set[1][:100]\nPr = compute_Pr(x)\ny2 = Pr.argmax(axis=1)\nfor i in range(100):\n if y2[i] != y[i]:\n print y2[i], y[i]\n show(x[i])" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
NathanYee/ThinkBayes2
code/survival.ipynb
gpl-2.0
[ "Survival Analysis\nThis notebook presents code and exercises from Think Bayes, second edition.\nCopyright 2016 Allen B. Downey\nMIT License: https://opensource.org/licenses/MIT", "from __future__ import print_function, division\n\n% matplotlib inline\nimport warnings\nwarnings.filterwarnings('ignore')\n\nimport math\nimport numpy as np\n\nfrom thinkbayes2 import Pmf, Cdf, Suite, Joint\nimport thinkplot", "The Weibull distribution\nThe Weibull distribution is often used in survival analysis because it models the distribution of lifetimes for manufactured products, at least over some parts of the range.\nThe following functions evaluate its PDF and CDF.", "def EvalWeibullPdf(x, lam, k):\n \"\"\"Computes the Weibull PDF.\n\n x: value\n lam: parameter lambda in events per unit time\n k: parameter\n\n returns: float probability density\n \"\"\"\n arg = (x / lam)\n return k / lam * arg**(k-1) * np.exp(-arg**k)\n\ndef EvalWeibullCdf(x, lam, k):\n \"\"\"Evaluates CDF of the Weibull distribution.\"\"\"\n arg = (x / lam)\n return 1 - np.exp(-arg**k)\n\ndef MakeWeibullPmf(lam, k, high, n=200):\n \"\"\"Makes a PMF discrete approx to a Weibull distribution.\n\n lam: parameter lambda in events per unit time\n k: parameter\n high: upper bound\n n: number of values in the Pmf\n\n returns: normalized Pmf\n \"\"\"\n xs = np.linspace(0, high, n)\n ps = EvalWeibullPdf(xs, lam, k)\n return Pmf(dict(zip(xs, ps)))", "SciPy also provides functions to evaluate the Weibull distribution, which I'll use to check my implementation.", "from scipy.stats import weibull_min\n\nlam = 2\nk = 1.5\nx = 0.5\n\nweibull_min.pdf(x, k, scale=lam)\n\nEvalWeibullPdf(x, lam, k)\n\nweibull_min.cdf(x, k, scale=lam)\n\nEvalWeibullCdf(x, lam, k)", "And here's what the PDF looks like, for these parameters.", "pmf = MakeWeibullPmf(lam, k, high=10)\nthinkplot.Pdf(pmf)\nthinkplot.Config(xlabel='Lifetime',\n ylabel='PMF')", "We can use np.random.weibull to generate random values from a Weibull distribution with given parameters.\nTo check that it is correct, I generate a large sample and compare its CDF to the analytic CDF.", "def SampleWeibull(lam, k, n=1):\n return np.random.weibull(k, size=n) * lam\n\ndata = SampleWeibull(lam, k, 10000)\ncdf = Cdf(data)\nmodel = pmf.MakeCdf()\nthinkplot.Cdfs([cdf, model])", "Exercise: Write a class called LightBulb that inherits from Suite and Joint and provides a Likelihood function that takes an observed lifespan as data and a tuple, (lam, k), as a hypothesis. It should return a likelihood proportional to the probability of the observed lifespan in a Weibull distribution with the given parameters.\nTest your method by creating a LightBulb object with an appropriate prior and update it with a random sample from a Weibull distribution.\nPlot the posterior distributions of lam and k. As the sample size increases, does the posterior distribution converge on the values of lam and k used to generate the sample?", "# Solution goes here\n\n# Solution goes here\n\n# Solution goes here\n\n# Solution goes here\n\n# Solution goes here\n\n# Solution goes here", "Exercise: Now suppose that instead of observing a lifespan, k, you observe a lightbulb that has operated for 1 year and is still working. Write another version of LightBulb that takes data in this form and performs an update.", "# Solution goes here\n\n# Solution goes here\n\n# Solution goes here\n\n# Solution goes here\n\n# Solution goes here", "Exercise: Now let's put it all together. Suppose you have 15 lightbulbs installed at different times over a 10 year period. When you observe them, some have died and some are still working. Write a version of LightBulb that takes data in the form of a (flag, x) tuple, where:\n\nIf flag is eq, it means that x is the actual lifespan of a bulb that has died.\nIf flag is gt, it means that x is the current age of a bulb that is still working, so it is a lower bound on the lifespan.\n\nTo help you test, I will generate some fake data.\nFirst, I'll generate a Pandas DataFrame with random start times and lifespans. The columns are:\nstart: time when the bulb was installed\nlifespan: lifespan of the bulb in years\nend: time when bulb died or will die\nage_t: age of the bulb at t=10", "import pandas as pd\n\nlam = 2\nk = 1.5\nn = 15\nt_end = 10\nstarts = np.random.uniform(0, t_end, n)\nlifespans = SampleWeibull(lam, k, n)\n\ndf = pd.DataFrame({'start': starts, 'lifespan': lifespans})\ndf['end'] = df.start + df.lifespan\ndf['age_t'] = t_end - df.start\n\ndf.head()", "Now I'll process the DataFrame to generate data in the form we want for the update.", "data = []\nfor i, row in df.iterrows():\n if row.end < t_end:\n data.append(('eq', row.lifespan))\n else:\n data.append(('gt', row.age_t))\n \nfor pair in data:\n print(pair)\n\n# Solution goes here\n\n# Solution goes here\n\n# Solution goes here\n\n# Solution goes here\n\n# Solution goes here", "Exercise: Suppose you install a light bulb and then you don't check on it for a year, but when you come back, you find that it has burned out. Extend LightBulb to handle this kind of data, too.", "# Solution goes here", "Prediction\nExercise: Suppose we know that, for a particular kind of lightbulb in a particular location, the distribution of lifespans is well modeled by a Weibull distribution with lam=2 and k=1.5. If we install n=100 lightbulbs and come back one year later, what is the distribution of c, the number of lightbulbs that have burned out?", "# Solution goes here\n\n# Solution goes here", "Exercise: Now suppose that lam and k are not known precisely, but we have a LightBulb object that represents the joint posterior distribution of the parameters after seeing some data. Compute the posterior predictive distribution for c, the number of bulbs burned out after one year.", "# Solution goes here\n\n# Solution goes here" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
shareactorIO/pipeline
source.ml/jupyterhub.ml/notebooks/zz_old/Spark/Intro/Lab 3 - Machine Learning/IntroToSparkMLlib.ipynb
apache-2.0
[ "Lab 3 - Online Purchase Recommendations\nLearn how to create a recommendation engine using the Alternating Least Squares algorithm in Spark's machine learning library\nThe data\nThis is a transnational data set which contains all the transactions occurring between 01/12/2010 and 09/12/2011 for a UK-based and registered non-store online retail. The company mainly sells unique all-occasion gifts. Many customers of the company are wholesalers.\nhttp://archive.ics.uci.edu/ml/datasets/Online+Retail\n<img src='https://raw.githubusercontent.com/rosswlewis/RecommendationPoT/master/FullFile.png' width=\"80%\" height=\"80%\"></img>\nCreate an RDD from the csv data", "#Put the csv into an RDD (at first, each row in the RDD is a string which\n#correlates to a line in the csv\nretailData = sc.textFile(\"OnlineRetail.csv\")\nprint retailData.take(2)", "Prepare and shape the data", "from pyspark.mllib.recommendation import ALS, Rating\nimport re\n\n#Remove the header from the RDD\nheader = retailData.first()\nretailData = retailData.filter(lambda line: line != header)\n\n#To produce the ALS model, we need to train it with each individual\n#purchase. Each record in the RDD must be the customer id, \n#item id, and the rating. In this case, the rating is the quantity\n#ordered. MLlib converts these into a sparce, unfactored matrix.\nretailData = retailData.map(lambda l: l.split(\",\")).\\\n filter(lambda l: int(l[3]) > 0 and len(re.sub(\"\\D\", \"\", l[1])) != 0 and len(l[6]) != 0).\\\n map(lambda l: (int(l[6]),int(re.sub(\"\\D\", \"\", l[1])),int(l[3])))\n\n#Randomly split the data into a testing set and a training set\ntestRDD, trainRDD = retailData.randomSplit([.2,.8])\n\ntrainData = trainRDD.map(lambda l: Rating(l[0],l[1],l[2]))\n\nprint trainData.take(2)\nprint\nprint testRDD.take(2)", "Build the recommendation model", "#Use trainging RDD to train a model with Alternating Least Squares \n#rank=5\n#5 columns in the user-feature and product-feature matricies\n#iterations=10\n#10 factorization runs\nrank = 5\nnumIterations = 10\nmodel = ALS.train(trainData, rank, numIterations)\n\nprint \"The model has been trained\"", "Test the model", "#Evaluate the model with the test rdd by using the predictAll function\npredict = model.predictAll(testRDD.map(lambda l: (l[0],l[1])))\n\n#Calculate and print the Mean Squared Error\npredictions = predict.map(lambda l: ((l[0],l[1]), l[2]))\nratingsAndPredictions = testRDD.map(lambda l: ((l[0], l[1]), l[2])).join(predictions)\n\nratingsAndPredictions.cache()\nprint ratingsAndPredictions.take(3)\n\nmeanSquaredError = ratingsAndPredictions.map(lambda l: (l[1][0] - l[1][1])**2).mean()\nprint\nprint 'Mean squared error = %.4f' % meanSquaredError", "This doesn't give us that good of a representation of ranking becuase the ranks are number of purchases. Something better may be to look at some actual recommendations.", "recs = model.recommendProducts(15544,5)\nfor rec in recs:\n print rec", "<img src='https://raw.githubusercontent.com/rosswlewis/RecommendationPoT/master/FullFile.png' width=\"80%\" height=\"80%\"></img>\nThis user seems to have purchased a lot of childrens gifts and some holiday items. The recomendation engine we created suggested some aitems along these lines", "#Rating(user=15544, product=84568, rating=193.03195106065823)\n#GIRLS ALPHABET IRON ON PATCHES \n\n#Rating(user=15544, product=16033, rating=179.45915040198466)\n#MINI HIGHLIGHTER PENS\n\n#Rating(user=15544, product=22266, rating=161.04293255928698)\n#EASTER DECORATION HANGING BUNNY\n\n#Rating(user=15544, product=84598, rating=141.00162368678377)\n#BOYS ALPHABET IRON ON PATCHES\n\n#Rating(user=15544, product=72803, rating=129.54033486738518)\n#ROSE SCENT CANDLE JEWELLED DRAWER", "Data Citation\nDaqing Chen, Sai Liang Sain, and Kun Guo, Data mining for the online retail industry: A case study of RFM model-based customer segmentation using data mining, Journal of Database Marketing and Customer Strategy Management, Vol. 19, No. 3, pp. 197–208, 2012 (Published online before print: 27 August 2012. doi: 10.1057/dbm.2012.17)." ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
statsmodels/statsmodels.github.io
v0.12.2/examples/notebooks/generated/kernel_density.ipynb
bsd-3-clause
[ "Kernel Density Estimation\nKernel density estimation is the process of estimating an unknown probability density function using a kernel function $K(u)$. While a histogram counts the number of data points in somewhat arbitrary regions, a kernel density estimate is a function defined as the sum of a kernel function on every data point. The kernel function typically exhibits the following properties:\n\nSymmetry such that $K(u) = K(-u)$.\nNormalization such that $\\int_{-\\infty}^{\\infty} K(u) \\ du = 1$ .\nMonotonically decreasing such that $K'(u) < 0$ when $u > 0$.\nExpected value equal to zero such that $\\mathrm{E}[K] = 0$.\n\nFor more information about kernel density estimation, see for instance Wikipedia - Kernel density estimation.\nA univariate kernel density estimator is implemented in sm.nonparametric.KDEUnivariate.\nIn this example we will show the following:\n\nBasic usage, how to fit the estimator.\nThe effect of varying the bandwidth of the kernel using the bw argument.\nThe various kernel functions available using the kernel argument.", "%matplotlib inline\nimport numpy as np\nfrom scipy import stats\nimport statsmodels.api as sm\nimport matplotlib.pyplot as plt\nfrom statsmodels.distributions.mixture_rvs import mixture_rvs", "A univariate example", "np.random.seed(12345) # Seed the random number generator for reproducible results", "We create a bimodal distribution: a mixture of two normal distributions with locations at -1 and 1.", "# Location, scale and weight for the two distributions\ndist1_loc, dist1_scale, weight1 = -1 , .5, .25\ndist2_loc, dist2_scale, weight2 = 1 , .5, .75\n\n# Sample from a mixture of distributions\nobs_dist = mixture_rvs(prob=[weight1, weight2], size=250,\n dist=[stats.norm, stats.norm],\n kwargs = (dict(loc=dist1_loc, scale=dist1_scale),\n dict(loc=dist2_loc, scale=dist2_scale)))", "The simplest non-parametric technique for density estimation is the histogram.", "fig = plt.figure(figsize=(12, 5))\nax = fig.add_subplot(111)\n\n# Scatter plot of data samples and histogram\nax.scatter(obs_dist, np.abs(np.random.randn(obs_dist.size)),\n zorder=15, color='red', marker='x', alpha=0.5, label='Samples')\nlines = ax.hist(obs_dist, bins=20, edgecolor='k', label='Histogram')\n\nax.legend(loc='best')\nax.grid(True, zorder=-5)", "Fitting with the default arguments\nThe histogram above is discontinuous. To compute a continuous probability density function,\nwe can use kernel density estimation.\nWe initialize a univariate kernel density estimator using KDEUnivariate.", "kde = sm.nonparametric.KDEUnivariate(obs_dist)\nkde.fit() # Estimate the densities", "We present a figure of the fit, as well as the true distribution.", "fig = plt.figure(figsize=(12, 5))\nax = fig.add_subplot(111)\n\n# Plot the histrogram\nax.hist(obs_dist, bins=20, density=True, label='Histogram from samples',\n zorder=5, edgecolor='k', alpha=0.5)\n\n# Plot the KDE as fitted using the default arguments\nax.plot(kde.support, kde.density, lw=3, label='KDE from samples', zorder=10)\n\n# Plot the true distribution\ntrue_values = (stats.norm.pdf(loc=dist1_loc, scale=dist1_scale, x=kde.support)*weight1\n + stats.norm.pdf(loc=dist2_loc, scale=dist2_scale, x=kde.support)*weight2)\nax.plot(kde.support, true_values, lw=3, label='True distribution', zorder=15)\n\n# Plot the samples\nax.scatter(obs_dist, np.abs(np.random.randn(obs_dist.size))/40,\n marker='x', color='red', zorder=20, label='Samples', alpha=0.5)\n\nax.legend(loc='best')\nax.grid(True, zorder=-5)", "In the code above, default arguments were used. We can also vary the bandwidth of the kernel, as we will now see.\nVarying the bandwidth using the bw argument\nThe bandwidth of the kernel can be adjusted using the bw argument.\nIn the following example, a bandwidth of bw=0.2 seems to fit the data well.", "fig = plt.figure(figsize=(12, 5))\nax = fig.add_subplot(111)\n\n# Plot the histrogram\nax.hist(obs_dist, bins=25, label='Histogram from samples',\n zorder=5, edgecolor='k', density=True, alpha=0.5)\n\n# Plot the KDE for various bandwidths\nfor bandwidth in [0.1, 0.2, 0.4]:\n kde.fit(bw=bandwidth) # Estimate the densities\n ax.plot(kde.support, kde.density, '--', lw=2, color='k', zorder=10,\n label='KDE from samples, bw = {}'.format(round(bandwidth, 2)))\n\n# Plot the true distribution\nax.plot(kde.support, true_values, lw=3, label='True distribution', zorder=15)\n\n# Plot the samples\nax.scatter(obs_dist, np.abs(np.random.randn(obs_dist.size))/50,\n marker='x', color='red', zorder=20, label='Data samples', alpha=0.5)\n\nax.legend(loc='best')\nax.set_xlim([-3, 3])\nax.grid(True, zorder=-5)", "Comparing kernel functions\nIn the example above, a Gaussian kernel was used. Several other kernels are also available.", "from statsmodels.nonparametric.kde import kernel_switch\nlist(kernel_switch.keys())", "The available kernel functions", "# Create a figure\nfig = plt.figure(figsize=(12, 5))\n\n# Enumerate every option for the kernel\nfor i, (ker_name, ker_class) in enumerate(kernel_switch.items()):\n\n # Initialize the kernel object\n kernel = ker_class()\n\n # Sample from the domain\n domain = kernel.domain or [-3, 3]\n x_vals = np.linspace(*domain, num=2**10)\n y_vals = kernel(x_vals)\n\n # Create a subplot, set the title\n ax = fig.add_subplot(2, 4, i + 1)\n ax.set_title('Kernel function \"{}\"'.format(ker_name))\n ax.plot(x_vals, y_vals, lw=3, label='{}'.format(ker_name))\n ax.scatter([0], [0], marker='x', color='red')\n plt.grid(True, zorder=-5)\n ax.set_xlim(domain)\n\nplt.tight_layout()", "The available kernel functions on three data points\nWe now examine how the kernel density estimate will fit to three equally spaced data points.", "# Create three equidistant points\ndata = np.linspace(-1, 1, 3)\nkde = sm.nonparametric.KDEUnivariate(data)\n\n# Create a figure\nfig = plt.figure(figsize=(12, 5))\n\n# Enumerate every option for the kernel\nfor i, kernel in enumerate(kernel_switch.keys()):\n\n # Create a subplot, set the title\n ax = fig.add_subplot(2, 4, i + 1)\n ax.set_title('Kernel function \"{}\"'.format(kernel))\n\n # Fit the model (estimate densities)\n kde.fit(kernel=kernel, fft=False, gridsize=2**10)\n\n # Create the plot\n ax.plot(kde.support, kde.density, lw=3, label='KDE from samples', zorder=10)\n ax.scatter(data, np.zeros_like(data), marker='x', color='red')\n plt.grid(True, zorder=-5)\n ax.set_xlim([-3, 3])\n\nplt.tight_layout()", "A more difficult case\nThe fit is not always perfect. See the example below for a harder case.", "obs_dist = mixture_rvs([.25, .75], size=250, dist=[stats.norm, stats.beta],\n kwargs = (dict(loc=-1, scale=.5), dict(loc=1, scale=1, args=(1, .5))))\n\nkde = sm.nonparametric.KDEUnivariate(obs_dist)\nkde.fit()\n\nfig = plt.figure(figsize=(12, 5))\nax = fig.add_subplot(111)\nax.hist(obs_dist, bins=20, density=True, edgecolor='k', zorder=4, alpha=0.5)\nax.plot(kde.support, kde.density, lw=3, zorder=7)\n# Plot the samples\nax.scatter(obs_dist, np.abs(np.random.randn(obs_dist.size))/50,\n marker='x', color='red', zorder=20, label='Data samples', alpha=0.5)\nax.grid(True, zorder=-5)", "The KDE is a distribution\nSince the KDE is a distribution, we can access attributes and methods such as:\n\nentropy\nevaluate\ncdf\nicdf\nsf\ncumhazard", "obs_dist = mixture_rvs([.25, .75], size=1000, dist=[stats.norm, stats.norm],\n kwargs = (dict(loc=-1, scale=.5), dict(loc=1, scale=.5)))\nkde = sm.nonparametric.KDEUnivariate(obs_dist)\nkde.fit(gridsize=2**10)\n\nkde.entropy\n\nkde.evaluate(-1)", "Cumulative distribution, it's inverse, and the survival function", "fig = plt.figure(figsize=(12, 5))\nax = fig.add_subplot(111)\n\nax.plot(kde.support, kde.cdf, lw=3, label='CDF')\nax.plot(np.linspace(0, 1, num = kde.icdf.size), kde.icdf, lw=3, label='Inverse CDF')\nax.plot(kde.support, kde.sf, lw=3, label='Survival function')\nax.legend(loc = 'best')\nax.grid(True, zorder=-5)", "The Cumulative Hazard Function", "fig = plt.figure(figsize=(12, 5))\nax = fig.add_subplot(111)\nax.plot(kde.support, kde.cumhazard, lw=3, label='Cumulative Hazard Function')\nax.legend(loc = 'best')\nax.grid(True, zorder=-5)" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
thesby/CaffeAssistant
tutorial/ipynb/detection.ipynb
mit
[ "R-CNN is a state-of-the-art detector that classifies region proposals by a finetuned Caffe model. For the full details of the R-CNN system and model, refer to its project site and the paper:\n\nRich feature hierarchies for accurate object detection and semantic segmentation. Ross Girshick, Jeff Donahue, Trevor Darrell, Jitendra Malik. CVPR 2014. Arxiv 2013.\n\nIn this example, we do detection by a pure Caffe edition of the R-CNN model for ImageNet. The R-CNN detector outputs class scores for the 200 detection classes of ILSVRC13. Keep in mind that these are raw one vs. all SVM scores, so they are not probabilistically calibrated or exactly comparable across classes. Note that this off-the-shelf model is simply for convenience, and is not the full R-CNN model.\nLet's run detection on an image of a bicyclist riding a fish bike in the desert (from the ImageNet challenge—no joke).\nFirst, we'll need region proposals and the Caffe R-CNN ImageNet model:\n\nSelective Search is the region proposer used by R-CNN. The selective_search_ijcv_with_python Python module takes care of extracting proposals through the selective search MATLAB implementation. To install it, download the module and name its directory selective_search_ijcv_with_python, run the demo in MATLAB to compile the necessary functions, then add it to your PYTHONPATH for importing. (If you have your own region proposals prepared, or would rather not bother with this step, detect.py accepts a list of images and bounding boxes as CSV.)\n\n-Run ./scripts/download_model_binary.py models/bvlc_reference_rcnn_ilsvrc13 to get the Caffe R-CNN ImageNet model.\nWith that done, we'll call the bundled detect.py to generate the region proposals and run the network. For an explanation of the arguments, do ./detect.py --help.", "!mkdir -p _temp\n!echo `pwd`/images/fish-bike.jpg > _temp/det_input.txt\n!../python/detect.py --crop_mode=selective_search --pretrained_model=../models/bvlc_reference_rcnn_ilsvrc13/bvlc_reference_rcnn_ilsvrc13.caffemodel --model_def=../models/bvlc_reference_rcnn_ilsvrc13/deploy.prototxt --gpu --raw_scale=255 _temp/det_input.txt _temp/det_output.h5", "This run was in GPU mode. For CPU mode detection, call detect.py without the --gpu argument.\nRunning this outputs a DataFrame with the filenames, selected windows, and their detection scores to an HDF5 file.\n(We only ran on one image, so the filenames will all be the same.)", "import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n%matplotlib inline\n\ndf = pd.read_hdf('_temp/det_output.h5', 'df')\nprint(df.shape)\nprint(df.iloc[0])", "1570 regions were proposed with the R-CNN configuration of selective search. The number of proposals will vary from image to image based on its contents and size -- selective search isn't scale invariant.\nIn general, detect.py is most efficient when running on a lot of images: it first extracts window proposals for all of them, batches the windows for efficient GPU processing, and then outputs the results.\nSimply list an image per line in the images_file, and it will process all of them.\nAlthough this guide gives an example of R-CNN ImageNet detection, detect.py is clever enough to adapt to different Caffe models’ input dimensions, batch size, and output categories. You can switch the model definition and pretrained model as desired. Refer to python detect.py --help for the parameters to describe your data set. There's no need for hardcoding.\nAnyway, let's now load the ILSVRC13 detection class names and make a DataFrame of the predictions. Note you'll need the auxiliary ilsvrc2012 data fetched by data/ilsvrc12/get_ilsvrc12_aux.sh.", "with open('../data/ilsvrc12/det_synset_words.txt') as f:\n labels_df = pd.DataFrame([\n {\n 'synset_id': l.strip().split(' ')[0],\n 'name': ' '.join(l.strip().split(' ')[1:]).split(',')[0]\n }\n for l in f.readlines()\n ])\nlabels_df.sort('synset_id')\npredictions_df = pd.DataFrame(np.vstack(df.prediction.values), columns=labels_df['name'])\nprint(predictions_df.iloc[0])", "Let's look at the activations.", "plt.gray()\nplt.matshow(predictions_df.values)\nplt.xlabel('Classes')\nplt.ylabel('Windows')", "Now let's take max across all windows and plot the top classes.", "max_s = predictions_df.max(0)\nmax_s.sort(ascending=False)\nprint(max_s[:10])", "The top detections are in fact a person and bicycle.\nPicking good localizations is a work in progress; we pick the top-scoring person and bicycle detections.", "# Find, print, and display the top detections: person and bicycle.\ni = predictions_df['person'].argmax()\nj = predictions_df['bicycle'].argmax()\n\n# Show top predictions for top detection.\nf = pd.Series(df['prediction'].iloc[i], index=labels_df['name'])\nprint('Top detection:')\nprint(f.order(ascending=False)[:5])\nprint('')\n\n# Show top predictions for second-best detection.\nf = pd.Series(df['prediction'].iloc[j], index=labels_df['name'])\nprint('Second-best detection:')\nprint(f.order(ascending=False)[:5])\n\n# Show top detection in red, second-best top detection in blue.\nim = plt.imread('images/fish-bike.jpg')\nplt.imshow(im)\ncurrentAxis = plt.gca()\n\ndet = df.iloc[i]\ncoords = (det['xmin'], det['ymin']), det['xmax'] - det['xmin'], det['ymax'] - det['ymin']\ncurrentAxis.add_patch(plt.Rectangle(*coords, fill=False, edgecolor='r', linewidth=5))\n\ndet = df.iloc[j]\ncoords = (det['xmin'], det['ymin']), det['xmax'] - det['xmin'], det['ymax'] - det['ymin']\ncurrentAxis.add_patch(plt.Rectangle(*coords, fill=False, edgecolor='b', linewidth=5))", "That's cool. Let's take all 'bicycle' detections and NMS them to get rid of overlapping windows.", "def nms_detections(dets, overlap=0.3):\n \"\"\"\n Non-maximum suppression: Greedily select high-scoring detections and\n skip detections that are significantly covered by a previously\n selected detection.\n\n This version is translated from Matlab code by Tomasz Malisiewicz,\n who sped up Pedro Felzenszwalb's code.\n\n Parameters\n ----------\n dets: ndarray\n each row is ['xmin', 'ymin', 'xmax', 'ymax', 'score']\n overlap: float\n minimum overlap ratio (0.3 default)\n\n Output\n ------\n dets: ndarray\n remaining after suppression.\n \"\"\"\n x1 = dets[:, 0]\n y1 = dets[:, 1]\n x2 = dets[:, 2]\n y2 = dets[:, 3]\n ind = np.argsort(dets[:, 4])\n\n w = x2 - x1\n h = y2 - y1\n area = (w * h).astype(float)\n\n pick = []\n while len(ind) > 0:\n i = ind[-1]\n pick.append(i)\n ind = ind[:-1]\n\n xx1 = np.maximum(x1[i], x1[ind])\n yy1 = np.maximum(y1[i], y1[ind])\n xx2 = np.minimum(x2[i], x2[ind])\n yy2 = np.minimum(y2[i], y2[ind])\n\n w = np.maximum(0., xx2 - xx1)\n h = np.maximum(0., yy2 - yy1)\n\n wh = w * h\n o = wh / (area[i] + area[ind] - wh)\n\n ind = ind[np.nonzero(o <= overlap)[0]]\n\n return dets[pick, :]\n\nscores = predictions_df['bicycle']\nwindows = df[['xmin', 'ymin', 'xmax', 'ymax']].values\ndets = np.hstack((windows, scores[:, np.newaxis]))\nnms_dets = nms_detections(dets)", "Show top 3 NMS'd detections for 'bicycle' in the image and note the gap between the top scoring box (red) and the remaining boxes.", "plt.imshow(im)\ncurrentAxis = plt.gca()\ncolors = ['r', 'b', 'y']\nfor c, det in zip(colors, nms_dets[:3]):\n currentAxis.add_patch(\n plt.Rectangle((det[0], det[1]), det[2]-det[0], det[3]-det[1],\n fill=False, edgecolor=c, linewidth=5)\n )\nprint 'scores:', nms_dets[:3, 4]", "This was an easy instance for bicycle as it was in the class's training set. However, the person result is a true detection since this was not in the set for that class.\nYou should try out detection on an image of your own next!\n(Remove the temp directory to clean up, and we're done.)", "!rm -rf _temp" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
crackhopper/TFS-toolbox
notebook/1.Save-and-load.ipynb
mit
[ "Save and Load\nIn this notebook, we will \n- train a LeNet and save it\n- load the model from the file\n- test the loaded model", "from tfs.models import LeNet\nfrom tfs.dataset import Mnist\nnet = LeNet()\ndataset = Mnist()", "modify the network:\nL1 regularizer\nSGD optimizer", "from tfs.core.optimizer import GradientDecentOptimizer\nfrom tfs.core.regularizers import L1\n\nnet.optimizer = GradientDecentOptimizer(net)\nnet.regularizer = L1(net,l1=0.001)\n\nnet.build()\n\nnet.fit(dataset,batch_size=200,n_epoch=1,max_step=100)\n\nnet.save('lenet_epoch_1')\n\n!ls ./", "load the model", "from tfs.network import Network\nnet2 = Network()\nnet2.load('lenet_epoch_1')\n\nprint net2\n\nprint net2.optimizer\n\nprint net2.initializer\n\nprint net2.losser\n\nprint 'accuracy',net2.score(dataset.test)", "fine-tune the loaded model", "net2.fit(dataset,batch_size=200,n_epoch=1,max_step=100)\n\nnet2.score(dataset.test)" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
DS-100/sp17-materials
sp17/hw/hw6/hw6.ipynb
gpl-3.0
[ "Homework 6: Prediction on Housing Prices", "import numpy as np\nimport pandas as pd\n%matplotlib inline\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\nfrom sklearn import linear_model as lm\n\nfrom IPython.display import display, Latex, Markdown\n\n!pip install -U okpy\nfrom client.api.notebook import Notebook\nok = Notebook('hw6.ok')", "Kaggle\nThis assignment is purposefully left nearly open-ended. The Ames data in your possession comes from a larger data set. Your goal is to provide a linear model that accurately predicts the prices of the held-out homes, measured by root mean square error. That is, the score you will see on the Kaggle leaderboard is calculated as follows:\n$$score = \\sqrt{\\dfrac{\\sum_{\\text{houses in public test set}}(\\text{actual price for house} - \\text{predicted price for house})^2}{\\text{# of houses}}}$$\nPerfect prediction of house prices would have a score of 0, so you want your score to be as low as possible!\nKaggle Submission Site: https://inclass.kaggle.com/c/ds100-2017-hw6\nMax number of submissions per day: 2\nMax number of final submissions: 1 \nThe Data\nThe Ames data set consists of 2930 records taken from the Ames Assessor’s Office. The data set has 23 nominal, 23 ordinal, 14 discrete, and 20 continuous variables (and 2 additional observation identifiers) --- 82 features in total. An explanation of each variable can be found in the included README.txt file. The information was used in computing assessed values for individual residential properties sold in Ames, Iowa from 2006 to 2010. Since the data is publicly available, we have injected noise into all the sale prices to remove the temptation to do \"oracle learning.\"\nThe data are split into training and test sets with 2000 and 930 observations, respectively. The actual sale price is withheld from you in the test set. In addition, the test data are further split into public and private test sets. When you upload a test set prediction onto Kaggle for validation, the score you receive will be calculated using the public test set. The private test set will be used in the final evaluation of this homework assignment.", "raw_data = pd.read_csv(\"ames_train.csv\")", "Example Data\nThroughout this assignment, we will use this reduced data set for examples. This is only for demonstration; in your final submission you'll want to use more features than just these.", "small_data = (\n raw_data[[\"SalePrice\", \"Gr_Liv_Area\", \"Lot_Area\", \"Bedroom_AbvGr\"]]\n .rename(columns = {\n \"SalePrice\": \"price\",\n \"Gr_Liv_Area\": \"sqft\",\n \"Lot_Area\": \"lotsize\",\n \"Bedroom_AbvGr\": \"bedrooms\"\n })\n)\n\nsmall_data.iloc[1:5]", "Grading\nGrading will be based on a number of set criteria, enumerated below:\nTask | Description\n--- | ---\nEDA | You create exploratory plots for at least 3 (basic) features to motivate your work. The minimal 3 should cover each of the 3 variable types: categorical, discrete, continuous.\nTransformations | Your final model includes transformations of the data.\nDiagnostics | You have diagnostic checks with commentary for your model\nRMSE | Your model beats the RMSE threshold of $30,000. This should be attainable with a well-thought-out model.\nModel | Your modeling pipeline is encapsulated in a pipeline object called final_pipeline.\nWritten Questions | Your submission should include answers to the written questions at the bottom of this notebook.\nSubmission\nThis assignment requires a Kaggle submission in addition to the usual okpy one. To submit to Kaggle, you should create a csv file with 930 rows---one for each house in the test data---and 2 columns:\n\nPID The house identification number\nSalePrice Your estimate for the sale price of the house\n\nAn example kaggle submission file has been included with this assignment.\nRestrictions\nWhile we want you to be creative with your models, we want to make it fair to students who are seeing these techniques for the first time. As such, you are only allowed to train linear models and their regularized forms (e.g. ridge and lasso). This means no random forest, CART, neural nets, etc. However, you are free to feature engineer to your heart's content. Remember that domain knowledge is the third component of data science...\nThat being said, you may want to explore the sklearn API for more information on Lasso, Ridge, and ElasticNet.\nPrizes\nThe top 10 students, evaluated by their score in the private test set will: \n1. Have bragging rights \n2. Be invited to attend a lunch at the Faculty Club, hosted by Professor Yu.\nEDA\nMake plots to explore the data. You may create as many plots as you wish and you will choose three of them to be graded.\nThe 3 plots you submit should cover each of the three variable types (categorical, discrete, continuous).\nInsert this comment at the top of the three cells you want to submit for EDA:\n# EDA_SUBMIT\n\nWe will use this tag to grade your 3 submitted plots.", "# EDA_SUBMIT\n\n# EDA_SUBMIT\n\n# EDA_SUBMIT", "Transformations\nYou have already encountered one of sklearn's Transformer classes: the DictVectorizer from lab 10. A transformer is an object that cleans, reduces, expands, or generates features. \nA transformer's fit method, will learn parameters. For the DictVectorizer, the parameters are the allowed values for a categorical variable.\nThe transform method takes the learned parameters and transforms any inputted new data. For DictVectorizer, this means taking a vector of categorical values and transforming the data into a matrix where each row has at most one non-zero value (they may all be zero if this vector contains a category that was previously unseen).\nfit_transform simply learns from and transforms the input data all in one go\nSince we might want to perform different transformations on different columns, we've provided you with a ColumnSelector class. You may want to use our code as a template for your own custom transformers.", "from sklearn.base import BaseEstimator, TransformerMixin\n\nclass ColumnSelector(BaseEstimator, TransformerMixin):\n \"\"\"\n Transformer that extracts a column of a data frame\n \n Example Usage\n >> data = pd.DataFrame({'a': [1, 2, 3, 4],\n 'b': [5, 6, 7, 8],\n 'c': [9, 10, 11, 12]})\n >> cs = ColumnSelector(cols=['a', 'b'])\n >> data['a'] == cs.transform(data)\n\n Parameters\n ----------\n col : list of strings, required\n The name(s) corresponding to the desired column of a DataFrame.\n \"\"\"\n \n def __init__(self, cols):\n self.cols = cols\n\n def fit(self, X, y=None):\n \"\"\"\n Returns itself, nothing to be fit\n \"\"\"\n return self\n\n def transform(self, X, y=None):\n \"\"\"\n Returns the desired column as a matrix\n \"\"\"\n return X.as_matrix(self.cols)", "We have also seen another transformation in class: the polynomial transformation. In practice, you would use sklearn's nice PolynomialFeatures. To give you experience implementing your own transformer class, write a bivariate (exactly 2 input features) BiPolyTrans transformer class that, given two features, $W$ and $Z$ of a matrix $X$, calculates all powers up to a given degree. That is for every record (row) $x_i = \\begin{bmatrix} w_i & z_i \\end{bmatrix}$, \n$$\\phi_{degree}(x_i) = \\begin{bmatrix} 1 & w_i & z_i & w_iz_i & w_i^2z_i & w_iz_i^2 & \\dots & w_iz_i^{degree-1} & w_i^{degree} & z_i^{degree} \\end{bmatrix} $$\nIf you are worried about efficiency, you may want to make use of Python's itertools. Namely, chain and combinations_with_replacement should be helpful.", "from itertools import chain, combinations_with_replacement\n\nclass BiPolyTrans(BaseEstimator, TransformerMixin):\n \"\"\"\n Transforms the data from a n x 2 matrix to a matrix with\n polynomial features up to the specified degree.\n \n Example Usage\n data = np.array([[1, 2], [3, 4]])\n d3polytrans = BiPolyTrans(2)\n d3polytrans.fit_transform(data) == np.array([[1, 1, 2, 1, 2, 4], [1, 3, 4, 9, 12,16]])\n\n Parameters\n ----------\n degree : integer, required\n largest polynomial degree to calculate with the two features\n \"\"\"\n def __init__(self, degree):\n self.degree = ...\n \n def fit(self, X, y=None):\n \"\"\"\n Calculates the number of input and output features\n \"\"\"\n self.n_input_features = ...\n self.n_output_features = ...\n return self\n \n def transform(self, X, y=None):\n \"\"\"\n Transforms the data into polynomial features\n \n Input\n -----\n X : an n x 2 matrix, required.\n \n Output\n ------\n A higher-dimensional matrix with polynomial features up to the specified degree\n \"\"\"\n n_records = ...\n output = np.empty((..., ...), dtype=X.dtype)\n \n ...\n \n return(output)\n\n_ = ok.grade('qtransform')", "Pipelines", "from sklearn.pipeline import Pipeline, FeatureUnion\nfrom sklearn.preprocessing import OneHotEncoder, PolynomialFeatures", "At this point, we will formalize our data cleaning, extraction, transformation, and training all into an abstraction called a Pipeline. In a nutshell, a pipeline is the recipe for going from a clean but untransformed data set to a trained model. For more information, see sklearn's docs. In the example below, we extract polynomial features from each home's square footage and then fit a linear model.", "ex_pipeline1 = Pipeline([\n ('selector', ColumnSelector(['sqft'])), \n ('poly_feats', PolynomialFeatures(3, include_bias=False)),\n ('lm', lm.LinearRegression(fit_intercept=False))\n])\n\nex_pipeline1.fit(small_data, small_data[['price']])\n\nprint(\"Training RMSE:\",\n (np.mean((ex_pipeline1.predict(small_data) \n - small_data[['price']])**2)**(.5)).item())", "As we've learned, training error definitely isn't everything! In addition to our training error, we want to be able to calculate validation error using cross validation. Luckily, sklearn makes this quite easy for us. Below, we calculate validation error for our initial pipeline, using 10-fold cross-validation.", "from sklearn.metrics import make_scorer\nfrom sklearn.model_selection import cross_val_score\n\ndef score_func(y, y_pred, **kwargs): \n return np.mean((y-y_pred)**2)**0.5\nscorer = make_scorer(score_func)\n\ncv_scores = cross_val_score(ex_pipeline1, small_data, small_data[['price']], cv=10, scoring=scorer)\nprint(\"Validation RMSE:\", np.mean(cv_scores))", "Of course we wouldn't want to just use one predictor given how rich our dataset is. To append more features, we can use FeatureUnion, which combines several transformers into a MEGATRANSFORMER, which outputs a concatenation of the output of its constituents. Note: FeatureUnion does NOT check if the transformations create linearly independent features. In the example below, we combine the polynomial lift of square footage and lot size.\n<img src=\"pipeline.png\" style=\"height: 3in;\"/>", "ex_pipeline2 = Pipeline([\n ('union', FeatureUnion(n_jobs=1, transformer_list=[\n ('poly_lotsize', Pipeline([\n ('selector', ColumnSelector(['lotsize'])), \n ('poly_feats', PolynomialFeatures(3, include_bias=False))\n ])),\n \n ('poly_sqft', Pipeline([\n ('selector', ColumnSelector(['sqft'])), \n ('poly_feats', PolynomialFeatures(3, include_bias=False)),\n ]))\n ])),\n \n ('lm', lm.LinearRegression(fit_intercept=False))\n])\n\nex_pipeline2.fit(small_data, small_data[['price']])\n\nprint(\"Training RMSE:\",\n (np.mean((ex_pipeline2.predict(small_data) \n - small_data[['price']])**2)**(.5)).item())\n\ncv_scores = cross_val_score(ex_pipeline2, small_data, small_data[['price']], cv=10, scoring=scorer)\nprint(\"Validation RMSE:\", np.mean(cv_scores))", "Your final model should be presented as a data pipeline. We should be able to train the pipeline on a new (clean) data set without any issues.", "final_pipeline = ...", "Submitting to Kaggle\nThe following code will write your predictions on the test dataset to a CSV, which you can submit to kaggle. You may need to modify it a little to suit your needs.", "from datetime import datetime\n\ntest_data = pd.read_csv(\"ames_test.csv\")\n\nsubmission_df = pd.DataFrame(\n {\n \"PID\": test_data[\"PID\"], \n \"SalePrice\": final_pipeline.predict(test_data).reshape(-1,)\n }\n)\n\ntimestamp = datetime.isoformat(datetime.now()).split(\".\")[0]\n\nsubmission_df.to_csv(\"submission_{}\".format(timestamp), index=False)", "Your final kaggle submission should achieve a test-set RMSE threshold of 30,000 or lower. Write your best test-set RMSE (as shown on kaggle) here:", "my_test_RMSE = 53240\n\n_ = ok.grade('qkaggle')", "Diagnostics\nMake some plots to investigate how well your models fit the data. Pick an intermediate (not final) model for your diagnostics submission. Provide commentary about patterns you notice and how you addressed them. Include this comment on top of the cells you would like us to grade.\n# DIAGNOSTIC_SUBMIT", "# DIAGNOSTIC_SUBMIT\n\n# Code for plot\n\n# Commentary\ndiagnostic_commentary = r\"\"\"\n\nPut your commentary about diagnostics here, replacing this text.\n\n\"\"\"\n\ndisplay(Markdown(diagnostic_commentary))", "FYI: Recall from lecture that stability is a measure of how robust your modeling procedure is to perturbations of the data. While the formal definition is a little technical, the concept is intuitive: if you create pseudoreplicates of the data, the coefficients of your model shouldn't change too much since that would mean that your model is too sensitive to small changes in the training data. Below, we use our pipeline to do a five-fold stability check. This method is really a heuristic (as easily noted by the arbitrary choice of 5 folds). To get a better assessment of your model, you could carry out a bootstrap analysis. For this particular model, it would seem that the coefficients are not changing too crazily relative to the magnitude of their impact on home prices.", "from sklearn.model_selection import KFold\n\nfivefold = KFold(n_splits=5, shuffle=True)\n\ndef calc_coefs(X, y, modeler):\n model = modeler\n model.fit(X, y)\n\n return(model.steps[1][1].coef_[0])\n \nnp.vstack(calc_coefs(small_data.iloc[fold,:], small_data.iloc[fold, :][['price']], ex_pipeline2) \n for (fold, _) in fivefold.split(small_data))\n ", "Properties of Least Squares\nHere we ask you to prove some basic properties about least squares. While the focus of the class isn't the mathematical machinery behind data science, we want to at least motivate how theory can inform application.\nQuestion 1\nLet $y$ be an $n \\times 1$ response vector and $X$ be an $n \\times p$ full rank design matrix with a column of 1s. We use the least squares procedure to fit $y$ on $X$:\n$$\\hat y = X\\hat\\theta$$ where $\\hat\\theta = (X^TX)^{-1}X^Ty$. The residuals are given by $e = y - \\hat y$.\nPart a\nShow that $\\sum_{i=1}^n e_i = 0$\nPart b\nShow that $e$ is in the null space of $X^T$. In other words, prove that $X^Te=0$. Note that this is property is where the name \"Normal Equations\" come from: $e$ must be normal (orthogonal) to the space spanned by the columns of $X$.\nPart c\nYour friend fits a linear model of sale price on home square footage with an intercept as shown below:", "model = lm.LinearRegression(fit_intercept = True)\nmodel.fit(small_data[[\"sqft\"]], small_data[[\"price\"]])\nprint(\"Intercept:\", model.intercept_[0])\nprint(\"Slope:\", model.coef_[0,0])", "She wants to know if the stochastic model $Y = X\\theta + \\epsilon$, where $\\epsilon$ is a mean 0 vector independent of the columns of the design matrix $X$ is plausible. One assumption is that sqft must be independent of the noise term $\\epsilon$. To test for this, your friend writes the following:", "def test_independent(variable, error):\n # Inputs\n # variable: n x 1 numpy array with variable of interest\n # error: n x 1 numpy array estimates of the error term epsilon given by y - y_fitted\n # Outputs\n # boolean, True if the variable passes test for independence\n return sum(variable * error)[0]\n\nn = small_data.shape[0]\nsqft = small_data[[\"sqft\"]].values.reshape(n, 1)\nfitted = (small_data[[\"price\"]] - model.predict(sqft)).values.reshape(n, 1)\ntest_independent(sqft, fitted)", "She concludes that since this value is very small, sqft and the noise are most likely independent of each other. Is this a reasonable conclusion? Why or why not?\nWrite your answer here, replacing this text.\nQuestion 2\nCentering takes every data point and subtracts the overall mean from it. We can write the transformation function $\\phi$ as:\n$$\\begin{align}\\phi(X) &= \\left[\\begin{array}{c|c|c|c} X_1 - \\bar{X}_1 & X_2 - \\bar{X}_2 & \\dots & X_d - \\bar{X}_d \\end{array}\\right] \\\n\\phi(y) &= y - \\bar{y} \\end{align}$$\nwhere $\\bar{X}_j$ is the arithmetic mean of the $j^{th}$ column of $X$ and $\\bar{y}$ is the average of the responses. Show that if a bias/intercept term is included in a regression after centering, then it will always be 0. This, of course, means that adding a column of 1s to your design matrix after centering your data might be a little silly.\nHint: You will want to use what we've proved in Question 1a.\nSubmitting your assignment\nCongratulations, you're done with this homework!\nRun the next cell to submit the assignment to OkPy so that the staff will know to grade it. You can submit as many times as you want, and you can choose which submission you want us to grade by going to https://okpy.org/cal/data100/sp17/. After you've done that, make sure you've pushed your changes to Github as well!", "_ = ok.submit()" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
maurov/xraysloth
notebooks/larch.ipynb
bsd-3-clause
[ "Examples of XAFS data analysis with Larch\nFirst read in some data", "import numpy as np\nfrom larch.io import read_ascii\nfeo = read_ascii('./larch_data/feo_xafs.dat', labels = 'energy ctime i0 i1 nothing')\nfeo.mu = - np.log(feo.i1/feo.i0)", "Normalization and backgroud removal (= EXAFS extraction)", "from larch.xafs import autobk\nautobk(feo, kweight=2, rbkg=0.8, e0=7119.0)", "Fourier transform", "from larch.xafs import xftf\nxftf(feo, kweight=2, kmin=2, kmax=13.0, dk=5, kwindow='Kaiser-Bessel')", "Basic plots can be done directly with matplotlib. The command %matplotlib inline permits in-line plots, that is, images are saved in the notebook. This means that the figures are visible when the notebook is open, even without execution.", "%matplotlib inline\nimport matplotlib.pyplot as plt\nplt.plot(feo.energy, feo.mu)\n\nfrom larch.wxlib import plotlabels as plab\nplt.plot(feo.k, feo.chi*feo.k**2)\nplt.xlabel(plab.k)\nplt.ylabel(plab.chikw.format(2))\n\nplt.plot(feo.k, feo.chi*feo.k**2, label='chi(k)')\nplt.plot(feo.k, feo.kwin, label='window')\nplt.xlabel(plab.k)\nplt.ylabel(plab.chikw.format(2))\nplt.legend()", "A work-in-progress utility is available in sloth.utils.xafsplotter. It is simply a wrapper on top of the wonderful plt.subplots(). The goal of this utility is to produce in-line nice figures with standard layouts ready for reporting your analysis to colleagues. With little effort/customization, those plots could be converted to publication quality figures...\nCurrently (September 2019), not much is available. To show the idea behind, previous plots are condensed in a single figure.", "from sloth.utils.xafsplotter import XAFSPlotter\np = XAFSPlotter(ncols=2, nrows=2, dpi=150, figsize=(6, 4))\np.plot(feo.energy, feo.mu, label='raw', win=0)\np.plot(feo.energy, feo.i0, label='i0', win=0, side='right')\np.plot(feo.energy, feo.norm, label='norm', win=1)\np.plot(feo.k, feo.chi*feo.k**2, label='chi2', win=2)\np.plot(feo.k, feo.chi*feo.k**2, label='chi(k)', win=3)\np.plot(feo.k, feo.kwin, label='window', win=3)\np.subplots_adjust(top=0.9)\n\ndir(feo)", "Test interactive plot with wxmplot.interactive\nWith the following commands is possible to open an external plotting window (based on Wxpython) permitting interactive tasks.", "from wxmplot.interactive import plot\nplot(feo.energy, feo.mu, label='mu', xlabel='Energy', ylabel='mu', show_legend=True)" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
tkurfurst/deep-learning
embeddings/Skip-Gram word2vec.ipynb
mit
[ "Skip-gram word2vec\nIn this notebook, I'll lead you through using TensorFlow to implement the word2vec algorithm using the skip-gram architecture. By implementing this, you'll learn about embedding words for use in natural language processing. This will come in handy when dealing with things like translations.\nReadings\nHere are the resources I used to build this notebook. I suggest reading these either beforehand or while you're working on this material.\n\nA really good conceptual overview of word2vec from Chris McCormick \nFirst word2vec paper from Mikolov et al.\nNIPS paper with improvements for word2vec also from Mikolov et al.\nAn implementation of word2vec from Thushan Ganegedara\nTensorFlow word2vec tutorial\n\nWord embeddings\nWhen you're dealing with language and words, you end up with tens of thousands of classes to predict, one for each word. Trying to one-hot encode these words is massively inefficient, you'll have one element set to 1 and the other 50,000 set to 0. The word2vec algorithm finds much more efficient representations by finding vectors that represent the words. These vectors also contain semantic information about the words. Words that show up in similar contexts, such as \"black\", \"white\", and \"red\" will have vectors near each other. There are two architectures for implementing word2vec, CBOW (Continuous Bag-Of-Words) and Skip-gram.\n<img src=\"assets/word2vec_architectures.png\" width=\"500\">\nIn this implementation, we'll be using the skip-gram architecture because it performs better than CBOW. Here, we pass in a word and try to predict the words surrounding it in the text. In this way, we can train the network to learn representations for words that show up in similar contexts.\nFirst up, importing packages.", "import time\n\nimport numpy as np\nimport tensorflow as tf\n\nimport utils", "Load the text8 dataset, a file of cleaned up Wikipedia articles from Matt Mahoney. The next cell will download the data set to the data folder. Then you can extract it and delete the archive file to save storage space.", "from urllib.request import urlretrieve\nfrom os.path import isfile, isdir\nfrom tqdm import tqdm\nimport zipfile\n\ndataset_folder_path = 'data'\ndataset_filename = 'text8.zip'\ndataset_name = 'Text8 Dataset'\n\nclass DLProgress(tqdm):\n last_block = 0\n\n def hook(self, block_num=1, block_size=1, total_size=None):\n self.total = total_size\n self.update((block_num - self.last_block) * block_size)\n self.last_block = block_num\n\nif not isfile(dataset_filename):\n with DLProgress(unit='B', unit_scale=True, miniters=1, desc=dataset_name) as pbar:\n urlretrieve(\n 'http://mattmahoney.net/dc/text8.zip',\n dataset_filename,\n pbar.hook)\n\nif not isdir(dataset_folder_path):\n with zipfile.ZipFile(dataset_filename) as zip_ref:\n zip_ref.extractall(dataset_folder_path)\n \nwith open('data/text8') as f:\n text = f.read()", "Preprocessing\nHere I'm fixing up the text to make training easier. This comes from the utils module I wrote. The preprocess function coverts any punctuation into tokens, so a period is changed to &lt;PERIOD&gt;. In this data set, there aren't any periods, but it will help in other NLP problems. I'm also removing all words that show up five or fewer times in the dataset. This will greatly reduce issues due to noise in the data and improve the quality of the vector representations. If you want to write your own functions for this stuff, go for it.", "words = utils.preprocess(text)\nprint(words[:30])\n\nprint(\"Total words: {}\".format(len(words)))\nprint(\"Unique words: {}\".format(len(set(words))))", "And here I'm creating dictionaries to covert words to integers and backwards, integers to words. The integers are assigned in descending frequency order, so the most frequent word (\"the\") is given the integer 0 and the next most frequent is 1 and so on. The words are converted to integers and stored in the list int_words.", "vocab_to_int, int_to_vocab = utils.create_lookup_tables(words)\nint_words = [vocab_to_int[word] for word in words]", "Subsampling\nWords that show up often such as \"the\", \"of\", and \"for\" don't provide much context to the nearby words. If we discard some of them, we can remove some of the noise from our data and in return get faster training and better representations. This process is called subsampling by Mikolov. For each word $w_i$ in the training set, we'll discard it with probability given by \n$$ P(w_i) = 1 - \\sqrt{\\frac{t}{f(w_i)}} $$\nwhere $t$ is a threshold parameter and $f(w_i)$ is the frequency of word $w_i$ in the total dataset.\nI'm going to leave this up to you as an exercise. This is more of a programming challenge, than about deep learning specifically. But, being able to prepare your data for your network is an important skill to have. Check out my solution to see how I did it.\n\nExercise: Implement subsampling for the words in int_words. That is, go through int_words and discard each word given the probablility $P(w_i)$ shown above. Note that $P(w_i)$ is the probability that a word is discarded. Assign the subsampled data to train_words.", "## Your code here\ntrain_words = # The final subsampled word list", "Making batches\nNow that our data is in good shape, we need to get it into the proper form to pass it into our network. With the skip-gram architecture, for each word in the text, we want to grab all the words in a window around that word, with size $C$. \nFrom Mikolov et al.: \n\"Since the more distant words are usually less related to the current word than those close to it, we give less weight to the distant words by sampling less from those words in our training examples... If we choose $C = 5$, for each training word we will select randomly a number $R$ in range $< 1; C >$, and then use $R$ words from history and $R$ words from the future of the current word as correct labels.\"\n\nExercise: Implement a function get_target that receives a list of words, an index, and a window size, then returns a list of words in the window around the index. Make sure to use the algorithm described above, where you choose a random number of words from the window.", "def get_target(words, idx, window_size=5):\n ''' Get a list of words in a window around an index. '''\n \n # Your code here\n \n return", "Here's a function that returns batches for our network. The idea is that it grabs batch_size words from a words list. Then for each of those words, it gets the target words in the window. I haven't found a way to pass in a random number of target words and get it to work with the architecture, so I make one row per input-target pair. This is a generator function by the way, helps save memory.", "def get_batches(words, batch_size, window_size=5):\n ''' Create a generator of word batches as a tuple (inputs, targets) '''\n \n n_batches = len(words)//batch_size\n \n # only full batches\n words = words[:n_batches*batch_size]\n \n for idx in range(0, len(words), batch_size):\n x, y = [], []\n batch = words[idx:idx+batch_size]\n for ii in range(len(batch)):\n batch_x = batch[ii]\n batch_y = get_target(batch, ii, window_size)\n y.extend(batch_y)\n x.extend([batch_x]*len(batch_y))\n yield x, y\n ", "Building the graph\nFrom Chris McCormick's blog, we can see the general structure of our network.\n\nThe input words are passed in as one-hot encoded vectors. This will go into a hidden layer of linear units, then into a softmax layer. We'll use the softmax layer to make a prediction like normal.\nThe idea here is to train the hidden layer weight matrix to find efficient representations for our words. This weight matrix is usually called the embedding matrix or embedding look-up table. We can discard the softmax layer becuase we don't really care about making predictions with this network. We just want the embedding matrix so we can use it in other networks we build from the dataset.\nI'm going to have you build the graph in stages now. First off, creating the inputs and labels placeholders like normal.\n\nExercise: Assign inputs and labels using tf.placeholder. We're going to be passing in integers, so set the data types to tf.int32. The batches we're passing in will have varying sizes, so set the batch sizes to [None]. To make things work later, you'll need to set the second dimension of labels to None or 1.", "train_graph = tf.Graph()\nwith train_graph.as_default():\n inputs = tf.placeholder(tf.int32, [None, None])\n labels = tf.placeholder(tf.int32, [None, 1])", "Embedding\nThe embedding matrix has a size of the number of words by the number of neurons in the hidden layer. So, if you have 10,000 words and 300 hidden units, the matrix will have size $10,000 \\times 300$. Remember that we're using one-hot encoded vectors for our inputs. When you do the matrix multiplication of the one-hot vector with the embedding matrix, you end up selecting only one row out of the entire matrix:\n\nYou don't actually need to do the matrix multiplication, you just need to select the row in the embedding matrix that corresponds to the input word. Then, the embedding matrix becomes a lookup table, you're looking up a vector the size of the hidden layer that represents the input word.\n<img src=\"assets/word2vec_weight_matrix_lookup_table.png\" width=500>\n\nExercise: Tensorflow provides a convenient function tf.nn.embedding_lookup that does this lookup for us. You pass in the embedding matrix and a tensor of integers, then it returns rows in the matrix corresponding to those integers. Below, set the number of embedding features you'll use (200 is a good start), create the embedding matrix variable, and use tf.nn.embedding_lookup to get the embedding tensors. For the embedding matrix, I suggest you initialize it with a uniform random numbers between -1 and 1 using tf.random_uniform. This TensorFlow tutorial will help if you get stuck.", "n_vocab = len(int_to_vocab)\nn_embedding = 200 # Number of embedding features \nwith train_graph.as_default():\n embedding = tf.variable(tf.truncated_normal(n_vocab, n_embedding)) # create embedding weight matrix here\n embed = tf.nn.embedding_lookup(embedding, inputs) # use tf.nn.embedding_lookup to get the hidden layer output", "Negative sampling\nFor every example we give the network, we train it using the output from the softmax layer. That means for each input, we're making very small changes to millions of weights even though we only have one true example. This makes training the network very inefficient. We can approximate the loss from the softmax layer by only updating a small subset of all the weights at once. We'll update the weights for the correct label, but only a small number of incorrect labels. This is called \"negative sampling\". Tensorflow has a convenient function to do this, tf.nn.sampled_softmax_loss.\n\nExercise: Below, create weights and biases for the softmax layer. Then, use tf.nn.sampled_softmax_loss to calculate the loss. Be sure to read the documentation to figure out how it works.", "# Number of negative labels to sample\nn_sampled = 100\nwith train_graph.as_default():\n softmax_w = tf.variable(tf.truncated_normal(n_embedding,n_vocab, stdev=0.1) # create softmax weight matrix here\n softmax_b = tf.variable(tf.zeros(n_vocab)) # create softmax biases here\n \n # Calculate the loss using negative sampling\n loss = tf.nn.sampled_softmax_loss(softmax_w, softmax_b, labels, embed, n_sampled, n_vocab, name='sampled_softmax_loss') \n \n cost = tf.reduce_mean(loss)\n optimizer = tf.train.AdamOptimizer().minimize(cost)", "Validation\nThis code is from Thushan Ganegedara's implementation. Here we're going to choose a few common words and few uncommon words. Then, we'll print out the closest words to them. It's a nice way to check that our embedding table is grouping together words with similar semantic meanings.", "with train_graph.as_default():\n ## From Thushan Ganegedara's implementation\n valid_size = 16 # Random set of words to evaluate similarity on.\n valid_window = 100\n # pick 8 samples from (0,100) and (1000,1100) each ranges. lower id implies more frequent \n valid_examples = np.array(random.sample(range(valid_window), valid_size//2))\n valid_examples = np.append(valid_examples, \n random.sample(range(1000,1000+valid_window), valid_size//2))\n\n valid_dataset = tf.constant(valid_examples, dtype=tf.int32)\n \n # We use the cosine distance:\n norm = tf.sqrt(tf.reduce_sum(tf.square(embedding), 1, keep_dims=True))\n normalized_embedding = embedding / norm\n valid_embedding = tf.nn.embedding_lookup(normalized_embedding, valid_dataset)\n similarity = tf.matmul(valid_embedding, tf.transpose(normalized_embedding))\n\n# If the checkpoints directory doesn't exist:\n!mkdir checkpoints", "Training\nBelow is the code to train the network. Every 100 batches it reports the training loss. Every 1000 batches, it'll print out the validation words.", "epochs = 10\nbatch_size = 1000\nwindow_size = 10\n\nwith train_graph.as_default():\n saver = tf.train.Saver()\n\nwith tf.Session(graph=train_graph) as sess:\n iteration = 1\n loss = 0\n sess.run(tf.global_variables_initializer())\n\n for e in range(1, epochs+1):\n batches = get_batches(train_words, batch_size, window_size)\n start = time.time()\n for x, y in batches:\n \n feed = {inputs: x,\n labels: np.array(y)[:, None]}\n train_loss, _ = sess.run([cost, optimizer], feed_dict=feed)\n \n loss += train_loss\n \n if iteration % 100 == 0: \n end = time.time()\n print(\"Epoch {}/{}\".format(e, epochs),\n \"Iteration: {}\".format(iteration),\n \"Avg. Training loss: {:.4f}\".format(loss/100),\n \"{:.4f} sec/batch\".format((end-start)/100))\n loss = 0\n start = time.time()\n \n if iteration % 1000 == 0:\n ## From Thushan Ganegedara's implementation\n # note that this is expensive (~20% slowdown if computed every 500 steps)\n sim = similarity.eval()\n for i in range(valid_size):\n valid_word = int_to_vocab[valid_examples[i]]\n top_k = 8 # number of nearest neighbors\n nearest = (-sim[i, :]).argsort()[1:top_k+1]\n log = 'Nearest to %s:' % valid_word\n for k in range(top_k):\n close_word = int_to_vocab[nearest[k]]\n log = '%s %s,' % (log, close_word)\n print(log)\n \n iteration += 1\n save_path = saver.save(sess, \"checkpoints/text8.ckpt\")\n embed_mat = sess.run(normalized_embedding)", "Restore the trained network if you need to:", "with train_graph.as_default():\n saver = tf.train.Saver()\n\nwith tf.Session(graph=train_graph) as sess:\n saver.restore(sess, tf.train.latest_checkpoint('checkpoints'))\n embed_mat = sess.run(embedding)", "Visualizing the word vectors\nBelow we'll use T-SNE to visualize how our high-dimensional word vectors cluster together. T-SNE is used to project these vectors into two dimensions while preserving local stucture. Check out this post from Christopher Olah to learn more about T-SNE and other ways to visualize high-dimensional data.", "%matplotlib inline\n%config InlineBackend.figure_format = 'retina'\n\nimport matplotlib.pyplot as plt\nfrom sklearn.manifold import TSNE\n\nviz_words = 500\ntsne = TSNE()\nembed_tsne = tsne.fit_transform(embed_mat[:viz_words, :])\n\nfig, ax = plt.subplots(figsize=(14, 14))\nfor idx in range(viz_words):\n plt.scatter(*embed_tsne[idx, :], color='steelblue')\n plt.annotate(int_to_vocab[idx], (embed_tsne[idx, 0], embed_tsne[idx, 1]), alpha=0.7)" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
gwtsa/gwtsa
examples/notebooks/8_pastas_synthetic.ipynb
mit
[ "Pastas Noise model\nDeveloped by Stijn Klop and Mark Bakker\nThis Notebook contains a number of examples and tests with synthetic data. The purpose of this notebook is to demonstrate the noise model of Pastas.\nIn this Notebook, heads are generated with a known response function. Next, Pastas is used to solve for the parameters of the model it is verified that Pastas finds the correct parameters back. Several different types of errors are introduced in the generated heads and it is tested whether the confidence intervals computed by Pastas are reasonable. \nThe first step is to import all the required python packages.", "%matplotlib inline\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy.special import gammainc, gammaincinv\nimport pandas as pd\nimport pastas as ps", "Load data and define functions\nThe rainfall and reference evaporation are read from file and truncated for the period 1980 - 2000. The rainfall and evaporation series are taken from KNMI station De Bilt. The reading of the data is done using Pastas.\nHeads are generated with a Gamma response function which is defined below.", "rain = ps.read.read_knmi('data_notebook_5/etmgeg_260.txt', variables='RH').series\nevap = ps.read.read_knmi('data_notebook_5/etmgeg_260.txt', variables='EV24').series\nrain = rain['1980':'1999']\nevap = evap['1980':'1999']\n\ndef gamma_tmax(A, n, a, cutoff=0.99):\n return gammaincinv(n, cutoff) * a\n\ndef gamma_step(A, n, a, cutoff=0.99):\n tmax = gamma_tmax(A, n, a, cutoff)\n t = np.arange(0, tmax, 1)\n s = A * gammainc(n, t / a)\n return s\n\ndef gamma_block(A, n, a, cutoff=0.99):\n # returns the gamma block response starting at t=0 with intervals of delt = 1\n s = gamma_step(A, n, a, cutoff)\n return np.append(s[0], s[1:] - s[:-1])", "The Gamma response function requires 3 input arguments; A, n and a. The values for these parameters are defined along with the parameter d, the base groundwater level. The response function is created using the functions defined above.", "Atrue = 800\nntrue = 1.1\natrue = 200\ndtrue = 20\nh = gamma_block(Atrue, ntrue, atrue) * 0.001\ntmax = gamma_tmax(Atrue, ntrue, atrue)\nplt.plot(h)\nplt.xlabel('Time (days)')\nplt.ylabel('Head response (m) due to 1 mm of rain in day 1')\nplt.title('Gamma block response with tmax=' + str(int(tmax)));", "Create synthetic observations\nRainfall is used as input series for this example. No errors are introduced. A Pastas model is created to test whether Pastas is able to . The generated head series is purposely not generated with convolution.\nHeads are computed for the period 1990 - 2000. Computations start in 1980 as a warm-up period. Convolution is not used so that it is clear how the head is computed. The computed head at day 1 is the head at the end of day 1 due to rainfall during day 1. No errors are introduced.", "step = gamma_block(Atrue, ntrue, atrue)[1:]\nlenstep = len(step)\nh = dtrue * np.ones(len(rain) + lenstep)\nfor i in range(len(rain)):\n h[i:i + lenstep] += rain[i] * step\nhead = pd.DataFrame(index=rain.index, data=h[:len(rain)],)\nhead = head['1990':'1999']\n\nplt.figure(figsize=(12,5))\nplt.plot(head,'k.', label='head')\nplt.legend(loc=0)\nplt.ylabel('Head (m)')\nplt.xlabel('Time (years)');", "Create Pastas model\nThe next step is to create a Pastas model. The head generated using the Gamma response function is used as input for the Pastas model. \nA StressModel instance is created and added to the Pastas model. The StressModel intance takes the rainfall series as input aswell as the type of response function, in this case the Gamma response function ( ps.Gamma).\nThe Pastas model is solved without a noise model since there is no noise present in the data. The results of the Pastas model are plotted.", "ml = ps.Model(head)\nsm = ps.StressModel(rain, ps.Gamma, name='recharge', settings='prec')\nml.add_stressmodel(sm)\nml.solve(noise=False)\nml.plots.results();", "The results of the Pastas model show the calibrated parameters for the Gamma response function. The parameters calibrated using pastas are equal to the Atrue, ntrue, atrue and dtrue parameters defined above. The Explained Variance Percentage for this example model is 100%. \nThe results plots show that the Pastas simulation is identical to the observed groundwater. The residuals of the simulation are shown in the plot together with the response function and the contribution for each stress.\nBelow the Pastas block response and the true Gamma response function are plotted.", "plt.plot(gamma_block(Atrue, ntrue, atrue), label='Synthetic response')\nplt.plot(ml.get_block_response('recharge'), '-.', label='Pastas response')\nplt.legend(loc=0)\nplt.ylabel('Head response (m) due to 1 m of rain in day 1')\nplt.xlabel('Time (days)');", "Test 1: Adding noise\nIn the next test example noise is added to the observations of the groundwater head. The noise is normally distributed noise with a mean of 0 and a standard deviation of 1 and is scaled with the standard deviation of the head. \nThe noise series is added to the head series created in the previous example.", "random_seed = np.random.RandomState(15892)\n\nnoise = random_seed.normal(0,1,len(head)) * np.std(head.values) * 0.5\nhead_noise = head[0] + noise", "Create Pastas model\nA pastas model is created using the head with noise. A stress model is added to the Pastas model and the model is solved.", "ml2 = ps.Model(head_noise)\nsm2 = ps.StressModel(rain, ps.Gamma, name='recharge', settings='prec')\nml2.add_stressmodel(sm2)\nml2.solve(noise=True)\nml2.plots.results();", "The results of the simulation show that Pastas is able to filter the noise from the observed groundwater head. The simulated groundwater head and the generated synthetic head are plotted below. The parameters found with the Pastas optimization are similair to the original parameters of the Gamma response function.", "plt.figure(figsize=(12,5))\nplt.plot(head_noise, '.k', alpha=0.1, label='Head with noise')\nplt.plot(head, '.k', label='Head true')\nplt.plot(ml2.simulate(), label='Pastas simulation')\nplt.title('Simulated Pastas head compared with synthetic head')\nplt.legend(loc=0)\nplt.ylabel('Head (m)')\nplt.xlabel('Date (years)');", "Test 2: Adding correlated noise\nIn this example correlated noise is added to the observed head. The correlated noise is generated using the noise series created in the previous example. The correlated noise is implemented as exponential decay using the following formula:\n$$ n_{c}(t) = e^{-1/\\alpha} \\cdot n_{c}(t-1) + n(t)$$\nwhere $n_{c}$ is the correlated noise, $\\alpha$ is the noise decay parameter and $n$ is the uncorrelated noise. The noise series that is created is added to the observed groundwater head.", "noise_corr = np.zeros(len(noise))\nnoise_corr[0] = noise[0]\n\nalphatrue = 2\n\nfor i in range(1, len(noise_corr)):\n noise_corr[i] = np.exp(-1/alphatrue) * noise_corr[i - 1] + noise[i]\n \nhead_noise_corr = head[0] + noise_corr", "Create Pastas model\nA Pastas model is created using the head with correlated noise as input. A stressmodel is added to the model and the Pastas model is solved. The results of the model are plotted.", "ml3 = ps.Model(head_noise_corr)\nsm3 = ps.StressModel(rain, ps.Gamma, name='recharge', settings='prec')\nml3.add_stressmodel(sm3)\nml3.solve(noise=True)\nml3.plots.results();", "The Pastas model is able to calibrate the model parameters fairly well. The calibrated parameters are close to the true values defined above. The noise_alpha parameter calibrated by Pastas is close the the alphatrue parameter defined for the correlated noise series.\nBelow the head simulated with the Pastas model is plotted together with the head series and the head series with the correlated noise.", "plt.figure(figsize=(12,5))\nplt.plot(head_noise_corr, '.k', alpha=0.1, label='Head with correlated noise')\nplt.plot(head, '.k', label='Head true')\nplt.plot(ml3.simulate(), label='Pastas simulation')\nplt.title('Simulated Pastas head compared with synthetic head')\nplt.legend(loc=0)\nplt.ylabel('Head (m)')\nplt.xlabel('Date (years)');" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
sameersingh/ml-discussions
week3/lc_and_perceptron.ipynb
apache-2.0
[ "from __future__ import division\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport mltools as ml\n\nnp.random.seed(0)\n%matplotlib inline", "Differences Between Linear Classifier and Linear Regression\nWe start with loading a data that was created for this discussion and talk a about the differences between linear regression and linear classifier.", "lc2_data = np.genfromtxt('./lc2_data.txt', delimiter=None)\nX, Y = lc2_data[:, :-1], lc2_data[:, -1]\n\nf, ax = plt.subplots(1, 2, figsize=(20, 8))\n\nmask = Y == -1\n\nax[0].scatter(X[mask, 0], X[mask, 1], s=120, color='blue', marker='s', alpha=0.75)\nax[0].scatter(X[~mask, 0], X[~mask, 1], s=340, color='red', marker='*', alpha=0.75)\n\nax[0].set_xticklabels(ax[0].get_xticks(), fontsize=25)\nax[0].set_yticklabels(ax[0].get_yticks(), fontsize=25)\n\nax[1].scatter(X[:, 0], X[:, 1], s=120, color='black', alpha=0.75)\n\nax[1].set_xticklabels(ax[1].get_xticks(), fontsize=25)\nax[1].set_yticklabels(ax[1].get_yticks(), fontsize=25)\n\n\nplt.show()", "Some of the questions that were asked in class by me or by the students. Make sure you know how to answer all of them :)\n\nIf it's a linear classifier, and the blue and red are the differenc classes, how many features do we have here?\nHow would a classifier line will look like if I plot it here?\nGive me a real life example.\nIf it's a linear regression and you can ignore the colors, how many features are here?\nAnd a regression line? (The simple f(x) = ax + c one)\nGive me a real life example.\nCan I treat this problem as a regression if I tell you that the Y value is now 0 if it's blue and 1 if red?\nHow many features do we have now? \nHow would the regression line look like?\n\nGive me a real life example.\n\n\nMy task if to answer 'See this new point? Should it be red or blue?' -- which one do I need?\n\nMy task is now to answer 'what will be the value of a new point at 8.3?' -- which one do I need now?\nHow about 'I know the value of 'Y' is 4.7, what was the value of X?'\nSo how would a test data look like for the classification problem?\nAnd how would it look like for the regression problem?\n\nBuilding a Classifier from the Ground Up\nIn the rest of the discussion we will show how to code a classifier from the ground up. This will be extremely useful not only for your homework assignment but also for future references. Most ML coding tend to be similar to one another so this will be reusable even in super complicated models.\nPerceptron Algorithm\nAs a simple example we will use the Perceptron Algorithm. We will build each part seperately, showing how it works and end by wrapping it all up in a classifier class that can be used with the mltools library. \nWe will use a 2 classes Perceptron with classes ${-1, 1}$. In the discussion you can also see how to use a binary classes ${0, 1}$ and in the wiki page you can see a generalization to multiple classes.\nFor an illustration of the algorithm you can watch this YouTube clip\nDecision Boundry and Classification\nThe Perceptron used a decidion boundry $\\theta$ to compute a value of each point. Then with a simple sign threshold decides on the class.\nWe'll start by computing the decision value for each point $x^j$: $$\\theta x^j$$\nLet's choose $j=90$ and let's define: $$\\theta = \\left[-6, 0.5, 1\\right]$$", "theta = np.array([-6., 0.5, 1.])", "Notice the '.'s. This will make sure it's a float and not integer which can casue problems later down the line.\n$\\theta$ has three features that will correspond to the constant (also known as the 'bias' or 'intercept') and two for the two features of X. So first we will add a constant to all the X data. \nDo not use the fpoly to do that, the behavior of that function is unexpected when there is more than one feature.", "def add_const(X):\n return np.hstack([np.ones([X.shape[0], 1]), X])\n\nXconst = add_const(X)\nx_j, y_j = Xconst[90], Y[90]", "Response Value\nThe first step in the preceptron is to compute the response value. It's comptued as the inner product $\\theta x^j$. The simple intuative way to do that is to simply use a for loop.", "x_theta = 0\nfor i in range(x_j.shape[0]):\n x_theta += x_j[i] * theta[i]\n \nprint x_theta", "This is a VERY inefficient way to do that. Luckily for us, numpy has the answer in the form of np.dot().", "print np.dot(x_j, theta)", "Classification Decision\nNow let's compute the decision classification $T[\\theta x^j]$. One option is to use the np.sign method. This will not a a good solution because np.sign(0) = 0.\nOne way of solving it is to use epsilon.", "eps = 1e-200\ndef sign(vals):\n \"\"\"Returns 1 if val >= 0 else -1\"\"\"\n return np.sign(vals + eps)", "Predict function\nSo now with the the decision value and my_sign we can write the predict function", "def predict(x_j, theta):\n \"\"\"Returns the class prediction of a single point x_j\"\"\"\n return sign(np.dot(x_j, theta))\n\nprint predict(x_j, theta)", "Predict multiple\nDuring the discussions I brought up that for some methods of computing the inner product (such as np.sum()) will not work for multiple points at the same time unless you take steps to make it work.", "def predict_with_np_sum(X, theta):\n \"\"\"Predicts the class value for multiple points or a single point at the same time. \"\"\"\n X = np.atleast_2d(X)\n \n return np.sum(theta * X, axis=1)", "Computing the Prediction Error\nUsing the predict function, we can now compute the prediction error: $$J^j = (y^j - \\hat{y}^j)$$", "def pred_err(X, Y, theta):\n \"\"\"Predicts that class for X and returns the error rate. \"\"\"\n Yhat = predict(X, theta)\n return np.mean(Yhat != Y)\n\nprint pred_err(x_j, y_j, theta)", "Learning Update\nUsing the error we can now even do the update step in the learning algorithm: $$\\theta = \\theta + \\alpha * (y^j - \\hat{y}^j)x^j$$", "a = 0.1\ny_hat_j = predict(x_j, theta)\nprint theta + a * (y_j - y_hat_j) * x_j", "Train method\nUsing everything we coded so far, we can fully create the train method", "def train(X, Y, a=0.01, stop_tol=1e-8, max_iter=1000):\n # Start by adding a const\n Xconst = add_const(X)\n \n m, n = Xconst.shape\n \n # Initializing theta\n theta = np.array([-6., 0.5, 1.])\n \n # The update loops\n J_err = [np.inf]\n for i in range(1, max_iter + 1):\n for j in range(m):\n x_j, y_j = Xconst[j], Y[j]\n y_hat_j = predict(x_j, theta)\n theta += a * (y_j - y_hat_j) * x_j\n\n curr_err = pred_err(Xconst, Y, theta)\n J_err.append(curr_err)\n\n if np.abs(J_err[-2] - J_err[-1]) < stop_tol:\n print 'Reached convergance after %d iterations. Prediction error is: %.3f' % (i, J_err[-1])\n break\n \n return theta\n\ntheta_trained = train(X, Y)", "Creating a Perceptron Classifier\nNow let's use all the code that we wrote and create a Python class Perceptron that can plug in to the mltools package.\nIn order to do that, the Prceptron class has to inherit the object mltools.base.classifier\nIn case you haven't looked at the actual code in the mltools, now will probably be the right time.", "from mltools.base import classifier", "In order to crete an object, we'll have to add self to all the methods.", "class Perceptron(classifier):\n def __init__(self, theta=None):\n self.theta = theta\n \n def predict(self, X):\n \"\"\"Retruns class prediction for either single point or multiple points. \"\"\"\n # I'm addiing this stuff here so it could work with the plotClassify2D method.\n Xconst = np.atleast_2d(X)\n \n # Making sure it has the const, if not adding it.\n if Xconst.shape[1] == self.theta.shape[0] - 1:\n Xconst = add_const(Xconst)\n \n return self.sign(np.dot(Xconst, self.theta))\n \n def sign(self, vals):\n \"\"\"A sign version with breaking 0's as +1. \"\"\"\n return np.sign(vals + 1e-200)\n \n def pred_err(self, X, Y):\n Yhat = self.predict(X)\n return np.mean(Yhat != Y)\n \n def train(self, X, Y, a=0.02, stop_tol=1e-8, max_iter=1000):\n # Start by adding a const\n Xconst = add_const(X)\n\n m, n = Xconst.shape\n \n # Making sure Theta is inititialized.\n if self.theta is None:\n self.theta = np.random.random(n)\n\n # The update loops\n J_err = [np.inf]\n for i in range(1, max_iter + 1):\n for j in range(m):\n x_j, y_j = Xconst[j], Y[j]\n y_hat_j = self.predict(x_j)\n self.theta += a * (y_j - y_hat_j) * x_j\n\n curr_err = self.pred_err(Xconst, Y)\n J_err.append(curr_err)\n\n if np.abs(J_err[-2] - J_err[-1]) < stop_tol:\n print 'Reached convergance after %d iterations. Prediction error is: %.3f' % (i, J_err[-1])\n break", "Creating a model, training and plotting predictions\nFirst let's create the model with some initialized theta and plot the decision bounderies. For the plotting we can use the mltools plotClassify2D !!! wowowowo!!!!", "model = Perceptron()\nmodel.theta = np.array([-6., 0.5, 1])\n\nml.plotClassify2D(model, X, Y)", "Next, let's actually train the model and plot the new decision boundery.", "model.train(X, Y)\nml.plotClassify2D(model, X, Y)", "We found the best classifier!!!" ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
manulera/ModellingCourse
ReAct/Python/ReAct_Notebook.ipynb
gpl-3.0
[ "ReAct, a useful program to understand Gillespie algorithm\nCore functions\nA brief descriptions of what these functions do\n\nReAct: the main function. Returns the deterministic and Gillespie solutions for the concentration of all the chemical species in time. It also prints the reactions of the system.\nInput:\nInitial concentration of all chemical species involved in the system, provided as a list of length 2xN, where N is the number of chemical species in the system. The concentration of each chemical species is defined by groups of 2 consecutive elements in a list: $[R_{i},n_{R_{i}},R_{i+1},n_{R_{i+1}},...,R_{n},n_{R_{n}} ]$\n\nReactions: The reactions writen as a tuple of size 3xN, where N is an int. Each reaction in the system is defined by 3 consecutive elements of the tuple:\n\n\nTuple of reactants (R<sub>i</sub>,SI<sub>i</sub>,R<sub>i+1</sub>,SI<sub>i+1</sub>,...,R<sub>n</sub>,SI<sub>n</sub>), where n is the number of reactants, R<sub>i</sub> is the name of reactant \"i\" as a string, and SI<sub>i</sub> is the stoichiometric index.\n\nNote: Those familiar with metabolic pathways or other biochemical reactions systems probably\n have frequently encountered chemical reactions represented like in Fig.1. In this MAP cascade,\n for reaction 1 (the one with k1), the reaction speed depends on the concentration of MAP3K and\n receptor; however, the receptor is not a reactive nor a product of the reaction, it acts like an\n enzyme/catalizer for the reaction. For an accurate representation of this chemical reaction, we would\n need a Michaelis-Menten-like description: E+S<->ES-E+P. However, in the description of these \n reactions in textbooks or papers we often find only one kynetic constant. This simplification is a \n good approximation if the reaction limitting step is the association of the enzyme with the \n substrate, and the second reaction is much faster than the association(go to Michaelis-Menten to see \n this). However, this poses a problem for the way this program works: in the stoichiometry matrix, the\n value for receptor in this reaction is 0 when we write the reaction receptor+S->receptor+P.\n\n$\\left[ \\begin{array}{cc}\n receptor & 0 \\\n MAP3K & -1\\\n MAP3KP & +1\\ \\end{array} \\right]$\nTo overcome this problem, to specify that a chemical species acts as an enzime/catalizer as described \n before, we include it as one of the reactants in the reaction tuple, and we set the stoichiometry\n index to -1, for instance this rection would be described as follows: \n $('receptor',-1,'MAP3K',1),('MAP3KP',1),k1$\n\n\nTuple of products, in the same way as the reactives\n\n\nThe reaction constant, as a numb\n * Time: a np.array containing the time points for which you want to calculate the integration for the deterministic solution. Gillespie will run from the first value until the time exceeds the last value.\n * Mode: by default, it is 0. If mode=0, calculate the Gillespie and deterministic solution; if mode=1, only deterministic; if mode=2, only Gillespie.\n\nDetSol: this function is called by ReAct, and calculates the deterministic solution, if you want to understand how it works, see the python code in the cell below.\nGillespy: this function is called by ReAct, and calculates the Gillespie solution, if you want to understand how it works, see the python code in the cell below. If you do so, you will see that both functions take the same arguments, and behave in a very similar way, with small differences. If you know the basics of programming, a look at these functions would be very useful to understand the differences between the two methods.\nGillesplot: plots the calculated solutions. \nInput:\nThe first 6 arguments (solution,t,tgill, valsgill,rows,mode) are the output of the ReAct function (check the code for propper understanding). The next optional argument is a list of strings with the names of the chemical species that you want to represent, for instance $[R_1,R_3]$, will plot only the concentrations of R<sub>1</sub> and RR<sub>3</sub>.\n<img src=\"Images/miniMAP.png\" style=\"width: 200px;\"/>\nFigure 1: MAP kinase cascade, as frequently depicted in textbooks\n\n\n\n\n\n\n\n\n\n\n\nIn the following cell, I include a useful function: It will print the python code from your files in the Jupyter cell. This is very useful, since you probably want to edit your important files in an external editor, save them, and bring them to Jupyter. Copy-paste is always a bad idea, like this you can make sure that the code displayed in the notebook is the same as the one you have in the file. However, if you want to change something in the code, you will have to change in the file (you cannot edit the code in jupyter, since it is just printed in the cell). With this function, the code in your file will be output in the end, with the code coloured as in Jupyter.\nAlso, if you change something in the file, and you want that change to be applied in the notebook, you will have to restart the kernel! (Running the same file again won't make a difference)", "from pygments import highlight\nfrom pygments.lexers import PythonLexer\nfrom pygments.formatters import HtmlFormatter\n\nimport IPython\ndef PrintPythonFile(filename):\n\n f = open(filename) \n code = f.read()\n\n formatter = HtmlFormatter()\n return IPython.display.HTML('<style type=\"text/css\">{}</style>{}'.format(\n formatter.get_style_defs('.highlight'),\n highlight(code, PythonLexer(), formatter)))", "Gilles.py is the file that contains the important functions, we will go through it to understand the main differences between the deterministic and stochastic solution, but first let's see some examples!", "%run Gilles.py", "Here we can see some examples for the use of ReAct", "%run 'Example1_oscillations.py'\nPrintPythonFile('Example1_oscillations.py')", "Is this oscilatory effect only? If we change the number of molecules of A from 100 to 1000 what do we see? How could we quantify the relevance of this oscilations with respect to the equilibrium?", "%run 'Example2_Ask4Oscillations.py'\nPrintPythonFile('Example2_Ask4Oscillations.py')", "You can copy the content of the file into a new cell, and change the values, explore how the parameters affect the outcome using the cell below.", "# Initial conditions\nuser_input = ['A', 100,\n 'B', 0]\n# Constants (this is not necessary, they could be filled up already in the reaction tuple)\nk = (12,8)\n\n# Reaction template ((stoch_1,reactant_1,stoch_2,reactant_2),(stoch_1,product_1,stoch_2,product_2),k)\nreactions = (\n (1,'A'),(1,'B'),k[0],\n (1,'B'),(1,'A'),k[1],\n)\n# dt is used for the deterministic calculation, and the\ndt=0.0001\nt = np.arange(0, 4, dt)\n\n(solution,(tgill, valsgill, _, _),rows,mode)=ReAct(user_input,reactions,t)\n\nGillesplot(solution,t,tgill, valsgill,rows,mode)\n\nplt.show()", "Now, let's look at a maybe more relevant situation for biologists, the already mentioned MAP kinase cascade.\n<img src=\"Images/miniMAP.png\" style=\"width: 200px;\"/>\nKinase cascades are known for amplifying the signal: a minor change in the cell, for example, a transient activation of a small number of receptors, is amplified by the cascade and results in major changes in the cell state. Have a look at the example below, do we see this effect?\nThe first graph is a bit crowded, so we can choose to plot only the most relevant species for us.\nThe second graph shows how the Map1K is strongly amplified, explore how the parameters (initial concentrations and kynetic constants) affect the outcome of the response in the cell below. Try to find a link with the explained role of kinase cascades.", "%run 'Example3_KyneticCascade.py'\nPrintPythonFile('Example3_KyneticCascade.py')", "Explore how the parameters (initial concentrations and kynetic constants) affect the outcome of the response in the cell below. Try to find a link with the explained role of kinase cascades.", "import numpy as np\nfrom Gilles import *\nimport matplotlib.pyplot as plt\n\n# Initial conditions\nuser_input = ['Rec', 10,\n '1M3', 10,\n '1M3P', 0,\n '1M2', 20,\n '1M2P', 0,\n '1M1', 30,\n '1M1P', 0]\n\n# Constants (this is not necessary, they could be filled up already in the reaction tuple)\nk = (2,0.05,1,0.5,1,0.5,1)\n\n# Reaction template ((stoch_1,reactant_1,stoch_2,reactant_2),(stoch_1,product_1,stoch_2,product_2),k)\nreactions = (\n (1,'Rec'),(),k[0],\n (-1,'Rec',1,'1M3'),(1,'1M3P'),k[1],\n (1,'1M3P'),(1,'1M3'),k[2],\n (-1,'1M3P',1,'1M2'),(1,'1M2P'),k[3],\n (1,'1M2P'),(1,'1M2'),k[4],\n (-1, '1M2P', 1, '1M1'), (1, '1M1P'), k[5],\n (1, '1M1P'), (1, '1M1'), k[6],\n)\n# dt is used for the deterministic calculation, and the\ndt=0.00001\nt = np.arange(0, 10, dt)\n\n(solution,(tgill, valsgill, _, _),rows,mode)=ReAct(user_input,reactions,t)\n\n\nGillesplot(solution,t,tgill, valsgill,rows,mode)\nplt.figure()\nGillesplot(solution,t,tgill, valsgill,rows,mode,['Rec','1M3P','1M2P','1M1P'])\nplt.show()", "The predator-pray model\nAlso known as Lotka–Volterra equations:\n<img src=\"Images/Lotka_volterra.svg\" style=\"width: 150px;\"/>\nWhere, x is the number of preys , and y is the number of predators. Before looking at the next cell, how would you write these equations as a chemical reaction?\nWhat does each reaction represent", "%run 'Example_PredatorPray.py'\nPrintPythonFile('Example_PredatorPray.py')" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
metabolite-atlas/metatlas
notebooks/reference/Workflow_Notebook_Metatlas_Stable_v0.1.0_20210303.ipynb
bsd-3-clause
[ "Instructions\nMetAltas analysis workflow\nThis notebook is for performing a targeted metabolomics analysis on your raw data files. In order for this to work, you data files must be uploaded to the nersc raw data directory with appropriate permissions. A fileconverter runs at nersc to convert as follows:\n.raw > .mzml > .h5 > pactolus.gz This conversion must happen before you can proceed with data analysis. A Northen Lab staff member can help you transfer and convert your files if they are not already at nersc.\n\n/global/project/projectdirs/metatlas/raw_data/SUBFOLDER\n\nA targeted analysis requires a list of compounds to search for in your sample files. Thus, at a minimum, you must have some information on their m/z ratio in order to get started. The inputs for this notebook, include the location of your raw files, and an atlas listing out these compounds. \nOur lab has analyzed thousands of compounds using standardized LCMS methods. You may use one of these libraries to create an atlas for your sample analysis, assuming you used one of the same standard LCMS methods.\nAnalysis steps\n<ol>\n<li>Create your atlas csv external to jupyter: typically you will either have a custom atlas or generate an atlas from one of our EMA libraries. These can be found in the shared google drive, under atlases.</li>\n<li>Run through with your initial atlas</li>\n<li>Use the RT adjuster to mark the quality of the peaks and MSMS matches, and also adjust your RT bounds</li>\n<li>Reload your atlas from the \"get atlas\" block</li>\n<li>Export your atlas, filter out the compounds marked under id notes as remove. Save and upload your new atlas</li>\n<li>Repeat the analysis using your filtered atlas.</li>\n <li>On your final export, make sure you are excluding system blanks and QC files in your output. If you have extraction blanks you should include those.</li>\n </ol>\n\nThere is a database that will store your atlas and registered run files. During the analysis process you will pull these as local variables. These are used to pull out EIC and MSMS data which is stored in a variable called metatlas_dataset. All output files are generated from that. It is a local variable so is not stored in the db - thus if your kernel dies partway through, you need to rerun through the notebook to regenerate that variable.\nIMPORTANT: Anytime you adjust your RTs in the interactive plot, you need to retreive your atlas again before exporting any files. The changes are stored in the db and the exports are made from locally stored variables/dataframes.\nindex\n\nImport python packages\nCreate groups\nSelect groups\nCreate Atlases\nSelect Atlas\nAnnotate data\nCorrect RT bounds\nExport results\nTools\n\nHelpful tips\n\nWhen entering search strings, use % for wildcard: %peas will return \"positive_peas\" and \"positive_greenpeas\", but not \"positive_greenpeas_HILIC\", for that use %peas%, you can also put multiple strings via %pos%peas%\nSet your kernel to mass_spec_cori. If not sure how, check w Ben or Daniel.", "# optional: run this block to adjust the width of the notebook. Change the width percent.\n\nfrom IPython.core.display import display, HTML \ndisplay(HTML(\"<style>.container { width:100% !important; }</style>\"))", "1. Import Python Packages\nInstructions:\n\nOn the 3rd line of the block below, add your directory where your most recent metatlas code is stored (to the sys.path.insert line).\nOnly run this block once to load the metatlas python modules. If the kernel dies, you will need to rerun this block.\nRun the block with the \"print fa.file\" code line to double check that the printed output matches the directory in the sys.path.insert line of the prior block.\n If it does not match, then on the \"kernel\" dropdown, click restart and correct the path below before trying again.", "%matplotlib notebook\nimport sys, os\n\n# v edit this line\nsys.path.insert(0,'/global/homes/FIRST-INITIAL-OF-USERNAME/USERNAME/REPOFOLDER/metatlas-master-20200416/metatlas-master/')\n# ^ edit this line\n\nfrom metatlas.tools import fastanalysis as fa\nfrom metatlas.plots import dill2plots as dp\nfrom metatlas.io import metatlas_get_data_helper_fun as ma_data\nfrom metatlas.plots import chromplotplus as cpp\nfrom metatlas.datastructures import metatlas_objects as metob\nimport qgrid\nfrom ipywidgets import interact, interactive, fixed\nimport ipywidgets as widgets\nfrom IPython.display import display\nimport time\nimport pickle\nimport dill\nimport multiprocessing as mp\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plot\nimport operator\nfrom importlib import reload\n\npd.set_option('display.max_rows', 5000)\npd.set_option('display.max_columns', 500)\npd.set_option('display.max_colwidth', 100)\n\n# check that first block registered metatlas directory correctly. If it does not look the same, seek help!\nprint(fa.__file__)", "Set atlas, project and output directories from your nersc home directory\nSTEP 1\n1. Create a project folder name for this analysis by replacing the PROJECTDIRECTORY string text in red below. Make sure to update the rest of the direcory to point to your home directory. The pwd block will print out the directory where this jupyter notebook is stored.\n2. Create a subdirectory name for the output, each run through you may want to create a new output folder.\n3. When you run the block the folders will be created in your home directory. If the directory already exists, the block will just set the path for use with future code blocks.\nSTEP 2\n1. Enter the nersc path where you have stored your atlas files using one of the two optional lines below:\n 1. Set a custom path\n 2. Use the project and output subdirectory from above", "pwd\n\n#STEP 1\nproject_directory='/global/homes/FIRST-INITIAL-OF-USERNAME/USERNAME/PROJECTDIRECTORY/' # <- edit this line, do not copy the path directly from NERSC (ex. the u1, or u2 directories)\noutput_subfolder='HILIC_POS_20190830/' # <- edit this as 'chromatography_polarity_yyyymmdd/'\noutput_dir = os.path.join(project_directory,output_subfolder)\n\nif not os.path.exists(project_directory):\n os.makedirs(project_directory)\nif not os.path.exists(output_dir):\n os.makedirs(output_dir)\n\n#STEP 2\npathtoatlas='/global/homes/FIRST-INITIAL-OF-USERNAME/USERNAME/DIRECTORYOFATLAS/' # <- enter the directory where you have stored your atlas\n#pathtoatlas= '%s%s' % (project_directory,output_subfolder)", "2. Create Groups (named variables that hold your replicates of each sample)\nYou must assign your raw files into experimental groups for analysis. These are used for downstream statistics and for selection of specific groups for filtering to subsets of files for analysis (Ex. just pos or just neg).\nThe groups are created from common file headers and the unique group names. The convention our lab group uses for filenames is as follows: \n\nDATE_NORTHENLABINITIALS_COLLABINITIALS_PROJ_EXP_SAMPSET_SYSTEM_COLUMN-method_SERIAL_POL_ACQ_SAMPLENUMBER_ SAMPLEGROUP_REP_OPTIONAL_SEQ \nEx.:20180105_SK_AD_ENIGMA_PseudoInt_R2ADec2017_QE119_50454_123456_POS_MSMS_001_Psyringae-R2A-30C-20hr_Rep01_NA_Seq001.raw\n\nThe common header consists of the fields 0-10: DATE_NORTHENLABINITIALS_COLLABINITIALS_PROJ_EXP_SAMPSET_SYSTEM_COLUMN-method_SERIAL_POL_ACQ \nThe sample group name is commonly field # 12 (between underscore 11 and 12) -0 indexed-\nFind your files\n\nOn the first line of the block below, set the 'experiment' and 'name' variables to find your files. These fields require wildcards for partial string searches\n'Experiment' is the folder name within global/project/projectdirs/metatlas/raw_data, that will be emailed to you when the files are uploaded to NERSC. You can also look in the raw_data directory for the NERSC user who uploaded your files; your experiment folder should be in there.\n'name' is string that will match a subset of your files within that folder.", "files = dp.get_metatlas_files(experiment = '%ENTERSTRING%',name = '%ENTERSTRING%',most_recent = True)\n# ^ edit the text string in experiment and name fields\n\ndf = metob.to_dataframe(files)\ndf[['experiment','name','username','acquisition_time']]\n\nlen(files)", "OPTION A: Automated Group Maker\nThis will attempt to create groups in an automated fashion (rather than filling out a spreadsheet with a list of files and group names). If your files are all in one folder at nersc, you can use this options. If not, use option B below.\nA long group name consisting of the common header + either controlled vocab value or field #12 along with a short group name (just controlled vocab or field #12) will be stored in a local variable. The short group names can be used on plots.\n\nSTEP 1: View the groups\nPick an experiment folder to look for files in on the metob.retrieve function\nEnter controlled vocabulary for control files to put select files into groups when control string may be in a different field (not #12) or as a randomly placed substring within a field (ex. if 'InjBl' is included in your controlled vocab list, files like InjBl-MeOH and StartInjBl will group together)\nIf your group name is not between _ 11 and 12 you can adjust those values in the split commands below. All other (non-controlledvocab) groups will be created from that field.\n\n\nSTEP 2: Create the groups variable after checking the output from STEP 1\nSTEP 3: <br />\n Option A: If everything looks fine the group names and short names, Store groups once you know you have files in correct groups by running and checking the output of STEPS 1 and 2.<br />\n Option B (optional): If you would like to edit the groups, uncomment the options B-I and B-II. Run Option B-I to export a prefilled tab infosheet. Edit the file and then run Option B-II to import the new groups and save it.", "#STEP 1: View the groups\n\nfiles = metob.retrieve('lcmsruns',experiment='%ENTERSTRING%',username='*')\ncontrolled_vocab = ['QC','InjBl','ISTD'] #add _ to beginning. It will be stripped if at begining\nversion_identifier = 'vs1'\nexclude_files = [] # Exclude files containing a substring (list) Eg., ['peas']\nfile_dict = {}\ngroups_dict = {}\nfor f in files:\n if not any(map(f.name.__contains__, exclude_files)):\n k = f.name.split('.')[0]\n # get index if any controlled vocab in filename\n indices = [i for i, s in enumerate(controlled_vocab) if s.lower() in k.lower()]\n prefix = '_'.join(k.split('_')[:11])\n if len(indices)>0:\n short_name = controlled_vocab[indices[0]].lstrip('_')\n group_name = '%s_%s_%s'%(prefix,version_identifier,short_name)\n short_name = k.split('_')[9]+'_'+short_name # Prepending POL to short_name\n else:\n short_name = k.split('_')[12]\n group_name = '%s_%s_%s'%(prefix,version_identifier,short_name)\n short_name = k.split('_')[9]+'_'+k.split('_')[12] # Prepending POL to short_name\n file_dict[k] = {'file':f,'group':group_name,'short_name':short_name}\n groups_dict[group_name] = {'items':[],'name':group_name,'short_name':short_name}\ndf = pd.DataFrame(file_dict).T\ndf.index.name = 'filename'\ndf.reset_index(inplace=True)#['group'].unique()\ndf.drop(columns=['file'],inplace=True)\nfor ug in groups_dict.keys():\n for file_key,file_value in file_dict.items():\n if file_value['group'] == ug:\n groups_dict[ug]['items'].append(file_value['file'])\ndf.head(100)\n\n#STEP 2: create the groups variable, if the above looks OK\n\ngroups = []\nfor group_key,group_values in groups_dict.items():\n g = metob.Group(name=group_key,items=group_values['items'],short_name=group_values['short_name'])\n groups.append(g) \n for item in g.items:\n print(g.name,g.short_name,item.name)\n print('')\n\n# STEP 3 Option A: store the groups variable content in the DB (currently only the long group name is stored)\nmetob.store(groups)\n\n## STEP 3 Option B-I: OPTIONAL: Export groups to csv file for editing (filename, short_name, group, description)\n#dp.make_prefilled_fileinfo_sheet(groups,os.path.join(output_dir,'prefilled_fileinfo.tab'))\n\n## STEP 3 Option B-II: Import groups from csv file after editing the prefilled_fileinfo.tab\n#groups = dp.make_groups_from_fileinfo_sheet(os.path.join(output_dir,'prefilled_fileinfo.tab'), filetype='tab', store=True)", "OPTION B: Register LCMS Runs into categorical groups from a file.\nTypically, you will make one fileinfo sheet with all of your files (pos and neg) for this experiment. At a minimum, group names MUST contain the first 11 underscore delimited fields (DATE_NORTHENLABINITIALS_COLLABINITIALS_PROJ_EXP_SAMPSET_SYSTEM_COLUMN-method_SERIAL_POL_ACQ) and the 'SAMPLEGROUP' field.\nFiles can be from multiple folders at nersc.\n\nSTEP 1: select files\nEdit the experiment and name fields to find the files you want.\n\n\nSTEP 2: create and save a .tab file to your project directory.\nAfter running the block, find the .tab file in your project directory.\nOpen in excel or other spreadsheet editor.\nFill out the group names as per above in the editor.\nSave the file as filled_fileinfo.txt\n\n\nSTEP 3: Create groups from spreadsheet\nTransfer the .txt. file back to your project directory \nRun the make groups block using store=False\n\n\nSTEP 4: CHECK groups\nRun the next block 'metob.to_dataframe(g) and check that the information looks correct\n\n\nIf it is correct, rerun the STEP 2 make groups block, using store=True. If not, fix your file in excel and redo Steps 2&3", "#STEP 1: Select files\nfiles = dp.get_metatlas_files(experiment = '%ENTERSTRING%',name = '%ENTERSTRING%',most_recent = True)\n# ^ edit the text string in experiment and name fields\n\n#STEP 2: Save spreadsheet file\ndp.make_empty_fileinfo_sheet('%s%s' % (output_dir,'empty_fileinfo.tab'),files)\n\n#STEP 3: create groups from file\ng = dp.make_groups_from_fileinfo_sheet('%s%s' % (output_dir,'filled_fileinfo.txt'),\n filetype='tab',\n store=True)\n\n# STEP 4: check groups\nmetob.to_dataframe(g)", "Make data frame of short filenames and samplenames\nUncomment the below 2 blocks to make short file names and smaple names.<br>\nThis creates a dataframe and a csv file which can be edited, exported and imported.", "# Make short_filename and short_samplename \nfiles = metob.retrieve('lcmsruns',experiment='%ENTERSTRING%',username='*')\nshort_filename_delim_ids = [0,2,4,5,7,9,14]\nshort_samplename_delim_ids = [9,12,13,14]\nshort_names_df = pd.DataFrame(columns=['sample_treatment','short_filename','short_samplename'])\nctr = 0\nfor f in files:\n short_filename = []\n short_samplename = []\n tokens = f.name.split('.')[0].split('_')\n for id in short_filename_delim_ids:\n short_filename.append(str(tokens[id]))\n for id in short_samplename_delim_ids:\n short_samplename.append(str(tokens[id]))\n short_filename = \"_\".join(short_filename)\n short_samplename = \"_\".join(short_samplename)\n short_names_df.loc[ctr, 'full_filename'] = f.name.split('.')[0]\n short_names_df.loc[ctr, 'sample_treatment'] = str(tokens[12]) # delim 12\n short_names_df.loc[ctr, 'short_filename'] = short_filename\n short_names_df.loc[ctr, 'short_samplename'] = short_samplename\n short_names_df.loc[ctr, 'last_modified'] = pd.to_datetime(f.last_modified,unit='s')\n ctr +=1\nshort_names_df.sort_values(by='last_modified', inplace=True)\nshort_names_df.drop(columns=['last_modified'], inplace=True)\nshort_names_df.drop_duplicates(subset=['full_filename'], keep='last', inplace=True)\nshort_names_df.set_index('full_filename', inplace=True)\nshort_names_df.to_csv(os.path.join(output_dir, 'short_names.csv'), sep=',', index=True)\n\n# Optional import edited short_names.csv \nshort_names_df = pd.read_csv(os.path.join(output_dir, 'short_names.csv'), sep=',', index_col='full_filename')", "3. Select groups of files to operate on\nHere, you will assign your database groups to a local variable which will be used downstream in the notebook for analyzing your data with an atlas.\n\nin block below, fill out the fields for name, include_list and exclude_list using text strings from the group names you created in the previous step. The include/exlcude lists do not need wildcards. Name is a string unique to all of your groups (ex. fields 0-11 of your filenames)\n\nTypically, you will run one polarity at a time.", "polarity = 'POS' #IMPORTANT: Please make sure you set the correct polarity for the analysis\n\ngroups = dp.select_groups_for_analysis(name = '%ENTERSEARCHSTRING%', # <- edit text search string here\n most_recent = True,\n remove_empty = True,\n include_list = [], exclude_list = ['NEG','QC','InjBl'])# ex. ['QC','Blank'])\nprint(\"sorted groups\")\ngroups = sorted(groups, key=operator.attrgetter('name'))\nfor i,a in enumerate(groups):\n print(i, a.name)\n\n# to view metadata about your groups, run the block below\nmetob.to_dataframe(groups)", "4. Create new Atlas entries in the Metatlas database from a csv file\nQC, IS, and EMA template atlases are available on the google drive.\n\nCreate your atlas as a csv file, check that it looks correct (has all the correct headers and no blank values in rows; all columns are the correct data type\nSave it with the type of atlas (EMA, QC or IS), your initials, the experiment name, the polarity, and the version or timestamp\nUpload it to your nersc project directory (the one you named above). (If it doesn't work, double check your file permissions are set to at least rw-rw----).\nRun blocks below to create the DB entries for negative and positive mode atlases\nWARNING: Don't run this block over and over again - it will create multiple new DB entries with the same atlas name\n\nRequired Atlas file headers:\ninchi_key,label,rt_min,rt_max,rt_peak,mz,mz_tolerance,adduct,polarity,identification_notes\nvalues in rows must be completed for all fields except inchi_key (leaving this blank will not allow you to perform MSMS matching below), and identification notes\nINFO: store=True will register your atlas in the database. If you are not sure if your atlas structure is correct, set store=False for the first time your run the block to check if you get an error. If there is no error, then rerun it with store=True.\nNEGATIVE MODE ATLAS UPLOAD", "atlasfilename='%ENTERSTRING%' # <- enter the exact name of your csv file without the file extension\n\nnames = dp.make_atlas_from_spreadsheet('%s%s%s' % (pathtoatlas,atlasfilename,'.csv'), # <- DO NOT EDIT THIS LINE\n atlasfilename,\n filetype='csv',\n sheetname='',\n polarity = 'negative',\n store=True,\n mz_tolerance = 12\n ) \n", "POSITIVE MODE ATLAS UPLOAD", "atlasfilename='%ENTERSTRING%' # <- enter the exact name of your csv file without the file extension\n\nnames = dp.make_atlas_from_spreadsheet('%s%s%s' % (pathtoatlas,atlasfilename,'.csv'), # <- DO NOT EDIT THIS LINE\n atlasfilename,\n filetype='csv',\n sheetname='',\n polarity = 'positive',\n store=True,\n mz_tolerance = 12\n )\n", "5. Select Atlas to use\n\nThe first block will retrieve a list of atlases matching the 'name' string that you enter. Also, you must enter your username.\nThe next block will select one from the list, using the index number. Make sure to enter the index number for the atlas you want to use for your analysis by setting in this line: my_atlas = atlases[0]", "atlases = metob.retrieve('Atlas',name='%ENTERSTRING%',username='YOUR-NERSC-USERNAME')\nnames = []\nfor i,a in enumerate(atlases):\n print(i,a.name,pd.to_datetime(a.last_modified,unit='s'))#len(a.compound_identifications)\n\nmy_atlas = atlases[-1]\natlas_df = ma_data.make_atlas_df(my_atlas)\natlas_df['label'] = [cid.name for cid in my_atlas.compound_identifications]\nprint(my_atlas.name)\nmetob.to_dataframe([my_atlas])\n# the first line of the output will show the dimensions of the atlas dataframe\n\n# OPTIONAL: to view your atlas, run this block\nprint(my_atlas.name)\natlas_df", "6. Get EICs and MSMS for all files in your groups, using all compounds in your atlas.\nThis block builds the metatlas_dataset variable. This holds your EIC data (mz, rt, intensity values within your mz and rt ranges).\nThe EIC data contains mz, intensity and RT values across your RT range. There are two parameters that you will need to edit: extra_time and extra_mz. Extra time will collect mz, intensity and RT values from outside of your atlas defined min and max rt values. For example if your rt_min = 1.0, and rt_max = 2.0 and you set extra_time to 0.3, then your new rt range will be 0.7 to 2.3. This is helpful for checking if you have nearby peaks at the same m/z. Extra_mz should only be used for troubleshooting. You should keep this at 0 unless you believe you have poor mass accuracy during your run. Other ways to address this issue is by changing the mz_tolerance values in your atlas. Before changing this value, you should check in with a metatlas experienced lab member to discuss when/how to use this value.\n\nChange the value in \"extra_time = 0.0\" to something like 0.5 to 1.0 for the first EMA runthrough on your files. This will take longer but collect msms outside your retention windows which allows you to check the msms of nearby peaks before adjusting your rt bounds around the correct peak.\nextra_mz should almost always be set to 0.0 If you need to troubleshoot a low mz compound you could potentially use this value to run it back through with a larger mz error window than what was specified in your atlas (ppm tolerance).\n\n\nOn Your final runthrough, set extra_time to 0", "all_files = []\nfor my_group in groups:\n for my_file in my_group.items:\n extra_time = 0.75 # NOTE: 0.75 for the first run, 0.5 for final \n extra_mz = 0.00\n all_files.append((my_file,my_group,atlas_df,my_atlas,extra_time,extra_mz))\npool = mp.Pool(processes=min(4, len(all_files)))\nt0 = time.time()\nmetatlas_dataset = pool.map(ma_data.get_data_for_atlas_df_and_file, all_files)\npool.close()\npool.terminate()\nprint(time.time() - t0)\n\n# Make data sources tables (atlas_metadata.tab, groups_metadata.tab, groups.tab and [atlasname]_originalatlas.tab within data_sources subfolder)\nma_data.make_data_sources_tables(groups, my_atlas, output_dir) ", "6b Optional: Filter atlas for compounds with no or low signals\nUncomment the below 3 blocks to filter the atlas.\nPlease ensure that correct polarity is used for the atlases.", "# dp = reload(dp)\n# num_data_points_passing = 5\n# peak_height_passing = 4e5\n# atlas_df_passing = dp.filter_atlas(atlas_df=atlas_df, input_dataset=metatlas_dataset, num_data_points_passing = num_data_points_passing, peak_height_passing = peak_height_passing)\n# print(\"# Compounds in Atlas: \"+str(len(atlas_df)))\n# print(\"# Compounds passing filter: \"+str(len(atlas_df_passing)))", "Create new atlas and store in database\nThis block creates a filtered atlas with a new name !!\nAutomatically selects this atlas for processing. \nMake sure to use this atlas for downstream analyses. (NOTE: If you restart kernel or come back to the analysis, you need to reselect this newly created filtered atlas for processing)", "# atlas_passing = my_atlas.name+'_filteredby-datapnts'+str(num_data_points_passing)+'-pkht'+str(peak_height_passing)\n# myAtlas_passing = dp.make_atlas_from_spreadsheet(atlas_df_passing,\n# atlas_passing,\n# filetype='dataframe',\n# sheetname='',\n# polarity = 'positive',\n# store=True,\n# mz_tolerance = 12)\n\n# atlases = dp.get_metatlas_atlas(name=atlas_passing,do_print = True, most_recent=True)\n\n# myAtlas = atlases[-1]\n# atlas_df = ma_data.make_atlas_df(myAtlas)\n# atlas_df['label'] = [cid.name for cid in myAtlas.compound_identifications]\n# print(myAtlas.name)\n# print(myAtlas.username)\n# metob.to_dataframe([myAtlas])# \n\n# all_files = []\n# for my_group in groups:\n# for my_file in my_group.items:\n# all_files.append((my_file,my_group,atlas_df,myAtlas))\n \n# pool = mp.Pool(processes=min(4, len(all_files)))\n# t0 = time.time()\n# metatlas_dataset = pool.map(ma_data.get_data_for_atlas_df_and_file, all_files)\n# pool.close()\n# pool.terminate()\n# #If you're code crashes here, make sure to terminate any processes left open.\n#(print time.time() - t0)", "One of the two blocks below builds the hits variable. This holds your MSMS spectra (from within your mz, and rt ranges, and within the extra time indicated above).\nThere are two options for generating the hits variable:\n1. block A: use when your files have msms. It create the hits variable and also saves a binary (pickled) serialized hits file to the output directory.\n2. block B: only run if your files were collected in MS1 mode\n3. If you have already run block A and then the kernel dies, you can skip block A and directly unplickle the binary hits file from the output directory. Skip block A, uncomment the Optional block and run it.", "##BLOCK A\nimport warnings; warnings.simplefilter('ignore')\nt0 = time.time()\n\nhits=dp.get_msms_hits(metatlas_dataset,extra_time=True,keep_nonmatches=True, frag_mz_tolerance=0.01, ref_loc='/global/project/projectdirs/metatlas/projects/spectral_libraries/msms_refs_v3.tab')\npickle.dump(hits, open(os.path.join(output_dir,polarity+'_hits.pkl'), \"wb\"))\n\nprint(time.time() - t0)\nprint('%s%s' % (len(hits),' <- total number of MSMS spectra found in your files'))\n\n## BLOCK B (uncomment lines below to run this. Only use when all data files are MS1)\n#hits=pd.DataFrame([], columns=['database','id','file_name','msms_scan', u'score', u'num_matches', u'msv_query_aligned', u'msv_ref_aligned', u'name', u'adduct', u'inchi_key', u'precursor_mz', u'measured_precursor_mz'])\n#hits.set_index(['database','id','file_name','msms_scan'], inplace=True)\n\n# Optional: If you already have a pickled hits file and do not need to run get_msms_hits again, uncomment this block\n# hits = pickle.load(open(os.path.join(output_dir,polarity+'_hits.pkl'), \"rb\")) ", "7. Adjust Retention Times.\nThis block creates an interactive plot. The top panel displays MSMS from within the two green RT bounds selected below (rt min and max, initially set in atlas). When the database holds reference spectra, mirror plots are generated with the reference spectra inverted below the sample spectra. The lower panel displays the EICs overlayed for all of the files in your selected groups. You can highlight your groups different colors. It is recommended that you do this, at least, for your extraction blank (or if not available, use a solvent injection blank). This plot also displays radio buttons that can be interactively selected; the values will be exported in your final identifications table and in your atlas export. Use these to mark peak/MSMS quality.\nHow to use:\n1. STEP 1: Set peak flag radio buttons\n 1. OPTION A (custom flags): fill out the peak flags list (list of strings) \n peak_flag_list = ('A','B') some recommendations are below.\n 2. OPTION B (default flags): comment out the custom peak_flag_list line. Uncomment the default peak_flags = \"\". \n Flags default to: keep, remove, unresolvable isomers, check.\n2. STEP 2: Set EIC colors\n 1. Option A (custom EIC colors): fill out the colorlist in the format of below\n\n\ncolorlist = [['color1nameorhexadec','partialgroupstring1'],\n ['color2nameorhexadec','partialgroupstring2']]\n\n&lt;ul&gt;&lt;li&gt;You can add more comma delimited colors/groups as needed.&lt;/li&gt;\n&lt;li&gt;These are partial strings that match to you file names (not your group names).&lt;/li&gt;\n&lt;li&gt;The order they are listed in your list is the order they are displayed in the overlays (first is front, last is back)&lt;/li&gt;\n&lt;li&gt;Named colors available in matplotlib are here: https://matplotlib.org/3.1.0/gallery/color/named_colors.html\n or use hexadecimal values '#000000'&lt;/li&gt;&lt;/ul&gt;\nB. Option B (default EIC colors): comment out the custom colorlist lines and uncomment the default colorlist = \"\". \n Colors all default to black.\n\n\nUser the right/left buttons on your keyboard to cycle through compounds in your atlas.\nUse the up/down buttons on your keyboard to cycle through MSMS spectra within the RT bounds of the lower plot.\nUse the horizontal rt min and rt max bars below the plots to adjust the rt bounds around your peak. If there are multiple peaks, select one at a time and then click up/down to update the msms available in that new RT range. If necessary evaluate your data in an external program such as mzmine to make sure you are selecting the correct peak.\n\n\nTIPS: use compound_idx = 0 in step 3 to change to a different compound in your atlas using the index number. If your plot does not fit in your browser window, adjust height and width values. Use alpha to change the transparency of the lines this is a value 0 (transparent) to 1 (opaque).\nDO NOT change your RT theoretical peak (the purple line). It is locked from editing (unless you change a hidden parameter) and only to be changed in special cases. The measured retention times of your peaks will be calculated and exported in your output files. These will be compared with the RT theoreticals and used in your evidence of identification table.", "###STEP 1: Set the peak flag radio buttons using one of the two lines below, for custom flags or default flags\nimport warnings; warnings.simplefilter('ignore')\npeak_flag_list=('','L1+ - 1 pk, good RT&MSMS','L1+ - known isomer overlap','L1+ - 1 pk, good RT, MSMS ok (coisolated mz/partial match/low int)','L1+ - 1 pk, good RT&MSMS from external library','L1 - 1 pk, correct RT, no MSMS or int too low for matching','L1 - 1 pk, good RT, very low intensity/poor pk shape','L2 put comp','L3 putative class','Remove - background/noise','Remove - bad EMA MSMS','Remove - bad MSMS NIST/MONA/Metlin')\nmsms_flags_list = \"\" #\n#peak_flag_list =\"\" # this will default to ('keep','remove','unresolvable isomers','poor peak shape')\n\n###STEP 2: Set the EIC line colors using on of the two lines below, for custom colors or default \ncolorlist= [['red','ExCtrl'], \n ['green','TxCtrl'],\n ['blue','InjBl']]\n#colorlist=\"\" # this will default to black\n\n###STEP 3\na = dp.adjust_rt_for_selected_compound(metatlas_dataset, msms_hits=hits, peak_flags=peak_flag_list, msms_flags=msms_flags_list, color_me = colorlist, compound_idx=0,alpha=0.5,width=15,height=4.5)\n", "8. Create filtered atlas excluding compounds marked removed\nRe-run the following before filtering atlas\n1. Get Groups (include InjBl)\n2. Get Atlas\n3. Get Data\n4. Get MSMS Hits", "dp=reload(dp)\n(atlas_kept, atlas_removed) = dp.filter_by_remove(atlas_df, metatlas_dataset)\nprint(\"# Compounds Total: \"+str(len(atlas_df)))\nprint(\"# Compounds Kept: \"+str(len(atlas_kept)))\nprint(\"# Compounds Removed: \"+str(len(atlas_removed)))\n\natlasfilename=my_atlas.name+'_kept' # <- enter the name of the atlas to be stored\n\nnames = dp.make_atlas_from_spreadsheet(atlas_kept, \n atlasfilename, # <- DO NOT EDIT THIS LINE\n filetype='dataframe',\n sheetname='',\n polarity = 'positive',\n store=True,\n mz_tolerance = 12\n ) ", "Re-run the following before filtering atlas\n\nRestart kernel\nGet Groups\nGet Atlas (look for the *_kept atlas)\nGet Data\nGet MSMS Hits\n\n9. Export results files\nExport Atlas to a Spreadsheet\nThe peak flags that you set and selected from the rt adjuster radio buttons will be saved in a column called id_notes", "atlas_identifications = dp.export_atlas_to_spreadsheet(my_atlas,os.path.join(output_dir,'%s_%s%s.csv' % (polarity,my_atlas.name,\"export\")))\nprint(my_atlas.name)\n", "Export MSMS match scores, stats sheets, and final identification table\nThis block creates a number of files:\n\ncompound_scores.csv\nstats_table.tab\nfiltered and unfiltered peak heights, areas, msms scores, mz centroid, mz ppm error, num of fragment matches, rt delta, rt peak\nfinal identification sheet that is formatted for use as a supplemental table for manuscript submission. You will need to manually complete some columns. Please discuss with Ben, Katherine, Daniel or Suzie before using for the first time.\n\nTHe kwargs below will set the filtering points for the parameters indicated.", "kwargs = {'min_intensity': 1e4, # strict = 1e5, loose = 1e3\n 'rt_tolerance': .5, #>= shift of median RT across all files for given compound to reference\n 'mz_tolerance': 20, # strict = 5, loose = 25; >= ppm of median mz across all files for given compound relative to reference\n 'min_msms_score': .6, 'allow_no_msms': True, # strict = 0.6, loose = 0.3 <= highest compound dot-product score across all files for given compound relative to reference\n 'min_num_frag_matches': 1, 'min_relative_frag_intensity': .001} # strict = 3 and 0.1, loose = 1, 0.01 number of matching mzs when calculating max_msms_score and ratio of second highest to first highest intensity of matching sample mzs\nscores_df = fa.make_scores_df(metatlas_dataset,hits)\nscores_df['passing'] = fa.test_scores_df(scores_df, **kwargs)\n\npass_atlas_df, fail_atlas_df, pass_dataset, fail_dataset = fa.filter_atlas_and_dataset(scores_df, atlas_df, metatlas_dataset, column='passing')\n\nfa.make_stats_table(input_dataset = metatlas_dataset, msms_hits = hits, output_loc = output_dir,min_peak_height=1e5,use_labels=True,min_msms_score=0.01,min_num_frag_matches=1,include_lcmsruns = [],exclude_lcmsruns = ['QC'], polarity=polarity)\nscores_df.to_csv(os.path.join(output_dir,'stats_tables',polarity+'_compound_scores.csv'))", "Export EIC chromatograms as individual pdfs for each compound\n\nThere are three options for formatting your EIC output using the \"group =\" line below:\n'page' will print each sample group on a new page of a pdf file\n'index' will label each group with a letter\nNone will print all of the groups on one page with very small subplot labels\n\n\nThe Y axis scale can be shared across all files using share_y = True or set to the max within each file using share_y = False\nTo use short names for plots, short_names_df should be provided as input. Additionally the header column to be used for short names should be provided as follows (short_names_df=short_names_df, short_names_header='short_samplename'). Header options are sample_treatment, short_filename, short_samplename. These are optional parameters", "group = 'index' # 'page' or 'index' or None\nsave = True\nshare_y = True\n\ndp.make_chromatograms(input_dataset=metatlas_dataset, include_lcmsruns = [],exclude_lcmsruns = ['InjBl','QC','Blank','blank'], group=group, share_y=share_y, save=save, output_loc=output_dir, short_names_df=short_names_df, short_names_header='short_samplename', polarity=polarity)", "Export MSMS mirror plots as individual pdfs for each compound\n\nuse_labels = True will use the compound names you provided in your atlas, if you set it to false, the compounds will be named with the first synonym available from pubchem which could be a common name, iupac name, cas number, vendor part number, etc. \nThe include and exclude lists will match partial strings in filenames, do not use wildcards.\nIf short_names_df is provided as input, short_samplename is used for plots.", "dp.make_identification_figure_v2(input_dataset = metatlas_dataset, msms_hits=hits, use_labels=True, include_lcmsruns = [],exclude_lcmsruns = ['InjBl','QC','Blank','blank'], output_loc=output_dir, short_names_df=short_names_df, polarity=polarity)", "Data Sheets\n\nTo include short names in the output, short_names_df should be provided as input to make_output_dataframe. \nylabel is optional", "peak_height = dp.make_output_dataframe(input_dataset = metatlas_dataset,include_lcmsruns = [],exclude_lcmsruns = [], fieldname='peak_height', output_loc=os.path.join(output_dir,polarity+'_data_sheets'), short_names_df=short_names_df, polarity=polarity, use_labels=True)\npeak_area = dp.make_output_dataframe(input_dataset = metatlas_dataset,include_lcmsruns = [],exclude_lcmsruns = [], fieldname='peak_area', output_loc=os.path.join(output_dir,polarity+'_data_sheets'), short_names_df=short_names_df, polarity=polarity, use_labels=True)\nmz_peak = dp.make_output_dataframe(input_dataset = metatlas_dataset,include_lcmsruns = [],exclude_lcmsruns = [], fieldname='mz_peak', output_loc=os.path.join(output_dir,polarity+'_data_sheets'), short_names_df=short_names_df, polarity=polarity, use_labels=True)\nrt_peak = dp.make_output_dataframe(input_dataset = metatlas_dataset,include_lcmsruns = [],exclude_lcmsruns = [],fieldname='rt_peak', output_loc=os.path.join(output_dir,polarity+'_data_sheets'), short_names_df=short_names_df, polarity=polarity, use_labels=True)\nmz_centroid = dp.make_output_dataframe(input_dataset = metatlas_dataset,include_lcmsruns = [],exclude_lcmsruns = [], fieldname='mz_centroid', output_loc=os.path.join(output_dir,polarity+'_data_sheets'), short_names_df=short_names_df, polarity=polarity, use_labels=True)\nrt_centroid = dp.make_output_dataframe(input_dataset = metatlas_dataset,include_lcmsruns = [],exclude_lcmsruns = [], fieldname='rt_centroid', output_loc=os.path.join(output_dir,polarity+'_data_sheets'), short_names_df=short_names_df, polarity=polarity, use_labels=True)", "Box plots", "dp.make_boxplot_plots(rt_peak, output_loc=os.path.join(output_dir, polarity+'_boxplot_rt_peak'), ylabel=\"RT Peak\")\ndp.make_boxplot_plots(peak_height, output_loc=os.path.join(output_dir, polarity+'_boxplot_peak_height'), ylabel=\"Peak Height\")\ndp.make_boxplot_plots(mz_centroid, output_loc=os.path.join(output_dir, polarity+'_boxplot_mz_centroid'), ylabel=\"MZ Centroid\")", "Export MSMS fragment Ions", "intensity_fraction = 0.01\nmin_mz = 450.0 #minimum m/z to export in msms\nmax_mz = -40.0 # distance from precurosor to export (0.5 is a good number. crazy people use negative numbers)\nscale_intensity = True\ndata = []\nfor compound_index in range(len(metatlas_dataset[0])):\n max_intensity = 0\n d = {}\n for file_index in range(len(metatlas_dataset)):\n try:\n pk_idx = metatlas_dataset[file_index][compound_index]['data']['msms']['data']['precursor_intensity'].argmax()\n pk = metatlas_dataset[file_index][compound_index]['data']['msms']['data']['precursor_intensity'][pk_idx]\n precursor_mz = metatlas_dataset[file_index][compound_index]['data']['msms']['data']['precursor_MZ'][pk_idx]\n rt = metatlas_dataset[file_index][compound_index]['data']['msms']['data']['rt'][pk_idx]\n if (pk>max_intensity) & (rt>metatlas_dataset[file_index][compound_index]['identification'].rt_references[-1].rt_min) & (rt<metatlas_dataset[file_index][compound_index]['identification'].rt_references[-1].rt_max):\n good_index = file_index\n max_intensity = pk\n final_mz = precursor_mz #save this for filtering below\n except:\n pass\n# print(compound_index,good_index,max_intensity)\n if max_intensity>0:\n msms = metatlas_dataset[good_index][compound_index]['data']['msms']['data']\n idx = np.argwhere(msms['precursor_intensity']==max_intensity).flatten()\n mz = msms['mz'][idx]\n intensity = msms['i'][idx]\n max_msms_intensity = intensity.max()\n cutoff = intensity_fraction * max_msms_intensity\n conditions = (intensity>cutoff) & (mz>min_mz) & (mz<(final_mz+max_mz))\n if sum(conditions)>0:\n keep_idx = np.argwhere(conditions).flatten()\n mz = str(['%.2f'%x for x in list(mz[keep_idx])]).replace('\\'','')\n if scale_intensity==True:\n intensity = intensity / intensity.max()\n intensity = intensity * 1e5\n intensity = intensity.astype(int)\n intensity = str(['%d'%x for x in list(intensity[keep_idx])]).replace('\\'','')\n spectra = str([mz,intensity]).replace('\\'','')\n else:\n mz = None\n intensity = None\n spectra = None\n else:\n mz = None\n intensity = None\n spectra = None\n data.append({'name':metatlas_dataset[file_index][compound_index]['identification'].name,'spectrum':spectra,'mz':mz,'intensity':intensity})\ndata = pd.DataFrame(data)\ndata[['name','mz','intensity']].to_csv(os.path.join(output_dir,'spectra_1pct_450cut.csv'),index=None)\n# to look at it type this:\ndata.head(20)" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
mavillan/SciProg
06_profiling/06_profiling.ipynb
gpl-3.0
[ "<h1 align=\"center\">Scientific Programming in Python</h1>\n<h2 align=\"center\"> Topic 6: Python Profiling and Optimization </h2>\n\nNotebook created by Martín Villanueva - martin.villanueva@usm.cl - DI UTFSM - May 2017.", "%matplotlib inline\n\nimport numpy as np\nimport numexpr as ne\nimport numba\nimport random\nimport matplotlib.pyplot as plt\nimport scipy as sp\nimport sys\nfrom ipywidgets import interact, interactive, fixed", "Table of Contents\n\n1.- About Optimization\n2.- Time Profiling\n3.- Memory Profiling\n4.- Application: K-means Clustering Algorithm\n\n<div id='about' />\n1.- About Optimization\n\"The real problem is that programmers have spent far too much time worrying about efficiency in the wrong places and at the wrong times; premature optimization is the root of all evil (or at least most of it) in programming\". Donald Knuth.\n\nOptimizing code prematurely is generally considered a bad practice.\nCode optimization should only be conducted when it's really needed.\nWe should know exactly where we need to optimize your code.\nTypically the majority of the execution time comprises a relatively small part of the code.\nOptimization should never be done without preliminary profiling.\n\n<div id='time' />\n2.- Time Profiling\n2.1- Time Benchmarking: timeit\nThe %timeit magic and the %%timeit cell magic allow you to quickly evaluate (benchmark) the time taken by one or several Python statements.\nSome useful options: %timeit?\nOptions:\n\n\nn: Execute the given statement <N> times in a loop. If this value\nis not given, a fitting value is chosen.\n\n\nr: Repeat the loop iteration <R> times and take the best result.\nDefault: 3\n\n\nt: Use time.time to measure the time, which is the default on Unix.\nThis function measures wall time.\n\n\nc: Use time.clock to measure the time, which is the default on\nWindows and measures wall time. On Unix, resource.getrusage is used\ninstead and returns the CPU user time.\n\n\np: Use a precision of <P> digits to display the timing result.\nDefault: 3\n\n\nq: Quiet, do not print result.\n\n\no: Return a TimeitResult that can be stored in a variable to inspect\n the result in more details.\n\n\nWe are going to estimate the time taken to calculate the sum of the inverse squares of all positive integer numbers up to a given n. Let's first define n:", "n = 100000", "Let's time this computation in pure Python (Using list comprehensions)", "t1 = %timeit -o -n 100 sum([1. / i**2 for i in range(1, n)])", "Now, let's use the %%timeit cell magic to time the same computation written on two lines:", "%%timeit s=0.\nfor i in range(1, n):\n s += 1./i**2", "Finally, let's time the NumPy version of this computation:", "t2 = %timeit -o -n 100 np.sum(1./np.arange(1., n) ** 2)", "The object returned by timeit contains information about the time measurements:", "print(\"Type:\")\nprint(type(t1))\n\nprint(\"\\nTime of all runs:\")\nprint(t1.all_runs)\n\nprint(\"\\nBest measured time:\")\nprint(t1.best)\n\nprint(\"\\nWorst measured time:\")\nprint(t1.worst)\n\nprint(\"\\nCompilation time:\")\nprint(t1.compile_time)", "And we can compare the performance improvement with the quotient between the best measured times:", "print(\"Performance improvement:\")\nprint(t1.best/t2.best)", "2.2- Function Profiling: cProfile\nThe %timeit magic command is often helpful, yet a bit limited when you need detailed information about what takes most of the execution time in your code. This magic command is meant for benchmarking rather than profiling.\nPython includes a profiler named cProfile that breaks down the execution time into the contributions of all called functions. IPython provides convenient ways to leverage this tool in an interactive session, through the %prun and %%prun magics.\nTo introduce its usage we will use a known example: Random walks.\nLet's create a function generating random +1 and -1 values in an array:", "def step(*shape):\n # Create a random n-vector with +1 or -1 values.\n return 2 * (np.random.random_sample(shape) < .5) - 1", "Now, let's write the simulation code in a cell starting with %%prun in order to profile the entire simulation. The various options allow us to save the report in a file and to sort the first 10 results by cumulative time.\nPython's profiler creates a detailed report of the execution time of our code, function by function. For each function, we get the total number of calls, the total and cumulative times, and their per-call counterparts (division by ncalls). Note that:\n* The total time represents how long the interpreter stays in a given function, excluding the time spent\nin calls to subfunctions.\n* The cumulative time is similar but includes the time spent in calls to subfunctions.", "a = np.array([1,2,3,4,5,6])\nnp.cumsum(a)\n\n%%prun -s cumulative -q -l 15 -T prun0\nn = 10000\niterations = 500\nx = np.cumsum(step(iterations, n), axis=0)\nbins = np.arange(-30, 30, 1)\ny = np.vstack([np.histogram(x[i,:], bins)[0] for i in range(iterations)])", "In the example, -s allows us to sort the report by a particular column, -q to suppress the pager output, -l to limit the number of lines displayed or to filter the results by function name, and -T to save the report\nin a text file. . This database-like object contains all information about the profiling and can be analyzed through Python's pstats module. For more info about arguments run %prun?.\nThe profiling report has been saved in a text file named prun0. Let's display it:", "print(open('prun0', 'r').read())\n\ndef plot_helper(y, i, n):\n plt.figure(figsize=(10,7))\n plt.plot(np.arange(-30,29), y[i], 'ro-')\n plt.title(\"Distribution of {0} simultaneous random walks at iteration {1}\".format(n,i))\n plt.show()\n\ninteract(plot_helper, y=fixed(y), i=(0,500), n=fixed(10000))", "2.3- Line Profiling: line_profiler\nPython's native cProfile module and the corresponding %prun magic break down the execution time of code function by function. Sometimes, we may need an even more fine- grained analysis of code performance with a line-by-line report.\nTo profile code line-by-line, we need an external Python module named line_profiler. To install it run one of these:\n* conda install line_profiler\n* pip install line_profiler\nOnce installed import the line_profiler IPython extension module that comes with the package:", "%load_ext line_profiler", "This IPython extension module provides a %lprun magic command to profile a Python function line-by-line. \nNote: It works best when the function is defined in a file and not in the interactive namespace or in the notebook.\nTherefore, here we write our code in a Python script using the %%writefile cell magic:", "%%writefile simulation.py\nimport numpy as np\ndef step(*shape):\n # Create a random n-vector with +1 or -1 values.\n return (2 * (np.random.random_sample(shape) < .5) - 1)\n\ndef simulate(iterations, n=10000):\n s = step(iterations, n)\n x = np.cumsum(s, axis=0)\n bins = np.arange(-30, 30, 1)\n y = np.vstack([np.histogram(x[i,:], bins)[0] for i in range(iterations)])\n return y", "Now, let's import this script into the interactive namespace so that we can execute and profile our code. \nThe functions to be profiled need to be explicitly specified in the %lprun magic command. We also save the report in a file, lprof0:", "import simulation\n\n%lprun -T lprof0 -f simulation.simulate simulation.simulate(500)", "Let's display the report:", "print(open('lprof0', 'r').read())", "To see all the possible arguments run %lprun?.\n<div id='memory' />\n3.- Memory Profiling\n\nThe methods described in the previous recipe were about CPU time profiling. However, memory is also a critical factor.\nWriting memory-optimized code is not trivial and can really make your program faster. This is particularly important when dealing with large NumPy arrays.\n\nTo profile memory usage we need and external module named memory_profiler. To install run one of these:\n* conda install memory_profiler\n* pip install memory_profiler\nAssuming that the simulation code has been loaded as shown above, we load the memory profiler IPython extension:", "%load_ext memory_profiler", "The memory_profiler package checks the memory usage of the interpreter at every line. The increment column allows us to spot those places in the code where large amounts of memory are allocated.\nNow, let's run the code under the control of the memory profiler:", "%mprun -T mprof0 -f simulation.simulate simulation.simulate(1500)", "Let's show the results:", "print(open('mprof0', 'r').read())", "The memory_profiler IPython extension also comes with a %memit magic command that lets us benchmark the memory used by a single Python statement. Here is a simple example:", "%memit np.random.randn(2000, 10000)", "<div id='Application' />\n4.- Application: K-Means Clustering Algorithm\nThis is an algorithm that find structure over unlabeled data, i.e, it is an unsupervised learning algorithm. It is very simple, and works as follows: \n\ninitialize $k$ cluster centroids\nRepeat the following:\n2.1.- For each point, compute which centroid is nearest to it.\n2.2.- For each centroid, move its location to the mean location of the points assigned to it.\n\n\n\nLet's first generate a set of random 2D points:", "points = np.vstack(((np.random.randn(150, 2) * 0.75 + np.array([1, 0])),\n (np.random.randn(50, 2) * 0.25 + np.array([-0.5, 0.5])),\n (np.random.randn(50, 2) * 0.5 + np.array([-0.5, -0.5]))))\n\npoints.shape\n\nplt.figure(figsize=(7,7))\nplt.scatter(points[:, 0], points[:, 1])\nplt.grid()\nplt.show()\n\ndef initialize_centroids(points, k):\n \"\"\"returns k centroids from the initial points\"\"\"\n centroids = points.copy()\n np.random.shuffle(centroids)\n return centroids[:k]", "And lest visualize the choosen (initial) centroids:", "centroids = initialize_centroids(points, 3)\n\nplt.figure(figsize=(7,7))\nplt.scatter(points[:, 0], points[:, 1])\nplt.scatter(centroids[:, 0], centroids[:, 1], c='r', s=100)\nplt.grid()\nplt.show()", "The following function computes which is the closest centroid for each point in the dataset", "def closest_centroid(points,centroids):\n \"\"\"returns an array containing the index to the nearest centroid for each point\"\"\"\n # computation of distance matrix\n m = points.shape[0]\n n = centroids.shape[0]\n D = np.zeros((m,n))\n for i in range(m):\n for j in range(n):\n D[i,j] = np.sqrt( np.sum( (points[i]-centroids[j])**2 ) )\n return np.argmin(D, axis=1)\n\nclosest = closest_centroid(points,centroids)", "And the next function move/update the centroids according to the mean position of the cluster of points", "def move_centroids(points, closest, centroids):\n \"\"\"returns the new centroids assigned from the points closest to them\"\"\"\n return np.array([points[closest==k].mean(axis=0) for k in range(centroids.shape[0])])\n\nmove_centroids(points, closest, centroids)\n\nplt.subplot(121)\nplt.scatter(points[:, 0], points[:, 1])\nplt.scatter(centroids[:, 0], centroids[:, 1], c='r', s=100)\n\ncentroids = move_centroids(points, closest, centroids)\n\nplt.subplot(122)\nplt.scatter(points[:, 0], points[:, 1])\nplt.scatter(centroids[:, 0], centroids[:, 1], c='r', s=100)\nplt.show()\n\ndef main_loop(points, centroids, n_iter, tol=1e-8):\n for i in range(n_iter):\n closest = closest_centroid(points, centroids)\n _centroids = move_centroids(points, closest, centroids)\n if np.sum((_centroids-centroids)**2, axis=1).max() < tol:\n centroids = _centroids\n break\n centroids = _centroids\n return centroids", "Now let's profile the execution of this funcion and its sub-functions calls. We use a set of $10000$ points now:", "points = np.vstack(((np.random.randn(5000, 2) * 0.75 + np.array([1, 0])),\n (np.random.randn(2500, 2) * 0.25 + np.array([-0.5, 0.5])),\n (np.random.randn(2500, 2) * 0.5 + np.array([-0.5, -0.5]))))\n\n%%prun -s cumulative -q -l 15 -T prun1\nmain_loop(points, centroids, 1000)\n\nprint(open('prun1', 'r').read())", "Clearly the problem is the closest_centroid function!. Now that we have isolated the problem, we do a line profile of this single function:", "%lprun -T lprof2 -f closest_centroid closest_centroid(points, centroids)\n\nprint(open('lprof2', 'r').read())", "As you should suspect, the problem is that NumPy arrays are not meant to be iterated by Python, but we have to implement this algorithm in a vectorial way (or make it faster with Numba/Cython). \nThe next is a re-implementation of the algorithm, using native NumPy functions:", "def closest_centroid(points, centroids):\n \"\"\"returns an array containing the index to the nearest centroid for each point\"\"\"\n px = points[:,0].reshape((-1,1))\n py = points[:,1].reshape((-1,1))\n Dx = px - centroids[:,0].reshape((1,-1))\n Dy = py - centroids[:,1].reshape((1,-1))\n # distance matrix\n D = np.sqrt(Dx**2+Dy**2)\n return np.argmin(D, axis=1)", "Let's profile again:", "%%prun -s cumulative -q -l 15 -T prun2\nmain_loop(points, centroids, 1000)\n\nprint(open('prun2', 'r').read())", "Mr Knuth will be proud :)\nAnother interesting tools for profiling\n\nRunSnakeRun, a GUI tools for exploring and visualizing the output of a profiling session.\nPython's trace module it is extremely useful during in-depth debugging and profiling sessions. More info.\n(Memory) Guppy-PE, PySizer and Pympler." ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
ES-DOC/esdoc-jupyterhub
notebooks/fio-ronm/cmip6/models/sandbox-2/landice.ipynb
gpl-3.0
[ "ES-DOC CMIP6 Model Properties - Landice\nMIP Era: CMIP6\nInstitute: FIO-RONM\nSource ID: SANDBOX-2\nTopic: Landice\nSub-Topics: Glaciers, Ice. \nProperties: 30 (21 required)\nModel descriptions: Model description details\nInitialized From: -- \nNotebook Help: Goto notebook help page\nNotebook Initialised: 2018-02-15 16:54:01\nDocument Setup\nIMPORTANT: to be executed each time you run the notebook", "# DO NOT EDIT ! \nfrom pyesdoc.ipython.model_topic import NotebookOutput \n\n# DO NOT EDIT ! \nDOC = NotebookOutput('cmip6', 'fio-ronm', 'sandbox-2', 'landice')", "Document Authors\nSet document authors", "# Set as follows: DOC.set_author(\"name\", \"email\") \n# TODO - please enter value(s)", "Document Contributors\nSpecify document contributors", "# Set as follows: DOC.set_contributor(\"name\", \"email\") \n# TODO - please enter value(s)", "Document Publication\nSpecify document publication status", "# Set publication status: \n# 0=do not publish, 1=publish. \nDOC.set_publication_status(0)", "Document Table of Contents\n1. Key Properties\n2. Key Properties --&gt; Software Properties\n3. Grid\n4. Glaciers\n5. Ice\n6. Ice --&gt; Mass Balance\n7. Ice --&gt; Mass Balance --&gt; Basal\n8. Ice --&gt; Mass Balance --&gt; Frontal\n9. Ice --&gt; Dynamics \n1. Key Properties\nLand ice key properties\n1.1. Overview\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nOverview of land surface model.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.landice.key_properties.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "1.2. Model Name\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nName of land surface model code", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.landice.key_properties.model_name') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "1.3. Ice Albedo\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nSpecify how ice albedo is modelled", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.landice.key_properties.ice_albedo') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"prescribed\" \n# \"function of ice age\" \n# \"function of ice density\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "1.4. Atmospheric Coupling Variables\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nWhich variables are passed between the atmosphere and ice (e.g. orography, ice mass)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.landice.key_properties.atmospheric_coupling_variables') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "1.5. Oceanic Coupling Variables\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nWhich variables are passed between the ocean and ice", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.landice.key_properties.oceanic_coupling_variables') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "1.6. Prognostic Variables\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nWhich variables are prognostically calculated in the ice model", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.landice.key_properties.prognostic_variables') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"ice velocity\" \n# \"ice thickness\" \n# \"ice temperature\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "2. Key Properties --&gt; Software Properties\nSoftware properties of land ice code\n2.1. Repository\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nLocation of code for this component.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.landice.key_properties.software_properties.repository') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "2.2. Code Version\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nCode version identifier.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.landice.key_properties.software_properties.code_version') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "2.3. Code Languages\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N\nCode language(s).", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.landice.key_properties.software_properties.code_languages') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "3. Grid\nLand ice grid\n3.1. Overview\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nOverview of the grid in the land ice scheme", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.landice.grid.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "3.2. Adaptive Grid\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nIs an adative grid being used?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.landice.grid.adaptive_grid') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "3.3. Base Resolution\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: FLOAT&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nThe base resolution (in metres), before any adaption", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.landice.grid.base_resolution') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "3.4. Resolution Limit\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: FLOAT&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nIf an adaptive grid is being used, what is the limit of the resolution (in metres)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.landice.grid.resolution_limit') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "3.5. Projection\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nThe projection of the land ice grid (e.g. albers_equal_area)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.landice.grid.projection') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "4. Glaciers\nLand ice glaciers\n4.1. Overview\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nOverview of glaciers in the land ice scheme", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.landice.glaciers.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "4.2. Description\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDescribe the treatment of glaciers, if any", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.landice.glaciers.description') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "4.3. Dynamic Areal Extent\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDoes the model include a dynamic glacial extent?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.landice.glaciers.dynamic_areal_extent') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "5. Ice\nIce sheet and ice shelf\n5.1. Overview\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nOverview of the ice sheet and ice shelf in the land ice scheme", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.landice.ice.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "5.2. Grounding Line Method\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nSpecify the technique used for modelling the grounding line in the ice sheet-ice shelf coupling", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.landice.ice.grounding_line_method') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"grounding line prescribed\" \n# \"flux prescribed (Schoof)\" \n# \"fixed grid size\" \n# \"moving grid\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "5.3. Ice Sheet\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nAre ice sheets simulated?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.landice.ice.ice_sheet') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "5.4. Ice Shelf\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nAre ice shelves simulated?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.landice.ice.ice_shelf') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "6. Ice --&gt; Mass Balance\nDescription of the surface mass balance treatment\n6.1. Surface Mass Balance\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDescribe how and where the surface mass balance (SMB) is calulated. Include the temporal coupling frequeny from the atmosphere, whether or not a seperate SMB model is used, and if so details of this model, such as its resolution", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.landice.ice.mass_balance.surface_mass_balance') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "7. Ice --&gt; Mass Balance --&gt; Basal\nDescription of basal melting\n7.1. Bedrock\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDescribe the implementation of basal melting over bedrock", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.landice.ice.mass_balance.basal.bedrock') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "7.2. Ocean\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDescribe the implementation of basal melting over the ocean", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.landice.ice.mass_balance.basal.ocean') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "8. Ice --&gt; Mass Balance --&gt; Frontal\nDescription of claving/melting from the ice shelf front\n8.1. Calving\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDescribe the implementation of calving from the front of the ice shelf", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.landice.ice.mass_balance.frontal.calving') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "8.2. Melting\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDescribe the implementation of melting from the front of the ice shelf", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.landice.ice.mass_balance.frontal.melting') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "9. Ice --&gt; Dynamics\n**\n9.1. Description\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nGeneral description if ice sheet and ice shelf dynamics", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.landice.ice.dynamics.description') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "9.2. Approximation\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nApproximation type used in modelling ice dynamics", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.landice.ice.dynamics.approximation') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"SIA\" \n# \"SAA\" \n# \"full stokes\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "9.3. Adaptive Timestep\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nIs there an adaptive time scheme for the ice scheme?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.landice.ice.dynamics.adaptive_timestep') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "9.4. Timestep\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nTimestep (in seconds) of the ice scheme. If the timestep is adaptive, then state a representative timestep.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.landice.ice.dynamics.timestep') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "©2017 ES-DOC" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
opengeostat/pygslib
pygslib/Ipython_templates/.ipynb_checkpoints/bicalib_raw-checkpoint.ipynb
mit
[ "PyGSLIB\nBicalib\nThe GSLIb equivalent parameter file is\n```\n Parameters for BICALIB\n ****\nSTART OF PARAMETERS:\ndata/ydata.dat \\file with secondary data\n4 \\ column for secondary variable\ndata/cluster.dat \\file with calibration scatterplot\n3 4 5 \\ columns of pri, sec, and weight\n-1.0e21 1.0e21 \\ trimming limits\nbicalib.out \\file for output data / distributions\nbicalib.cal \\file for output calibration (SISIM)\nbicalib.rep \\file for calibration report\n5 \\number of thresholds on primary\n0.50 1.00 2.50 5.00 10.0 \\ thresholds on primary\n5 \\number of thresholds on secondary\n0.50 1.00 2.50 5.00 10.0 \\ thresholds on secondary\n```", "#general imports\nimport matplotlib.pyplot as plt \nimport pygslib \nimport numpy as np\nimport pandas as pd\n\n#make the plots inline\n%matplotlib inline ", "Getting the data ready for work\nIf the data is in GSLIB format you can use the function gslib.read_gslib_file(filename) to import the data into a Pandas DataFrame.", "#get the data in gslib format into a pandas Dataframe\ncluster= pygslib.gslib.read_gslib_file('../data/cluster.dat') \nydata = pygslib.gslib.read_gslib_file('../data/ydata.dat') \n\nydata.head()\n\ncluster.head()\n\n#view data in a 2D projection\nplt.scatter(ydata['Xlocation'],ydata['Ylocation'], c=ydata['Secondary'],\n alpha=1, s=15, marker =',', linewidths= (0,))\nplt.scatter(cluster['Xlocation'],cluster['Ylocation'], c=cluster['Secondary'])\nplt.colorbar()\nplt.grid(True)\nplt.show()\n\nnpoints = len(cluster['Secondary'])\nydata['Declustering Weight']=1\n#using declustering wight\nparameters_qpplt = {\n 'qqorpp' : 0, # Q-Q plot (qqorpp=0); P-P plot (qqorpp=1)\n 'npts' : npoints, # number of points to use on the Q-Q or P-P plot (should not exceed the smallest number of data in data1 / data2\n 'va1' : cluster['Secondary'], # array('d') with bounds (nd)\n 'va2' : ydata['Secondary'], # array('d') with bounds (nd)\n 'wt1' : cluster['Declustering Weight'],# array('d') with bounds (nd)\n 'wt2' : ydata['Declustering Weight']} # array('d') with bounds (nd)\n\nvr1a,vr2a,error = pygslib.gslib.__plot.qpplt(**parameters_qpplt)\n\nprint ('error ? ', error != 0)\n\nfig = plt.figure()\nax = fig.add_subplot(1,1,1)\nplt.plot (vr1a, vr2a, 'o')\nax.set_xscale('log')\nax.set_yscale('log')\nax.set_xlabel ('cluster|secundary')\nax.set_ylabel ('ydata|secundary')\nax.set_title ('QQ PLot')\nplt.grid(True)\nfig.show\n\n\nnpoints = len(cluster['Secondary'])\nydata['Declustering Weight']=1\n#using declustering wight\nparameters_qpplt = {\n 'qqorpp' : 0, # Q-Q plot (qqorpp=0); P-P plot (qqorpp=1)\n 'npts' : npoints, # number of points to use on the Q-Q or P-P plot (should not exceed the smallest number of data in data1 / data2\n 'va1' : cluster['Primary'], # array('d') with bounds (nd)\n 'va2' : ydata['Secondary'], # array('d') with bounds (nd)\n 'wt1' : cluster['Declustering Weight'],# array('d') with bounds (nd)\n 'wt2' : ydata['Declustering Weight']} # array('d') with bounds (nd)\n\nvr1a,vr2a,error = pygslib.gslib.__plot.qpplt(**parameters_qpplt)\n\nprint ('error ? ', error != 0)\n\nfig = plt.figure()\nax = fig.add_subplot(1,1,1)\nplt.plot (vr1a, vr2a, 'o')\nax.set_xscale('log')\nax.set_yscale('log')\nax.set_xlabel ('cluster|primary')\nax.set_ylabel ('ydata|secundary')\nax.set_title ('QQ PLot')\nplt.grid(True)\nfig.show", "Testing bicalib", "print (pygslib.gslib.__bicalib.__doc__)\n\nparameters_bicalib = {\n 'vval' : ydata['Secondary'], # secondary data\n 'u' : cluster['Primary'], # calibration scatterplot (primary data)\n 'v' : cluster['Secondary'], # calibration scatterplot (secondary data)\n 'wt' : cluster['Declustering Weight'], # calibration scatterplot (weight data)\n 'cutu' : [0.5,1.,2.5,5.,10.], # thresholds on primary \n 'cutv' : [0.5,1.,2.5,5.,10.]} # thresholds on secondary\n\nssqu,avgu,umin,umax,ssqv,avgv,vmin,vmax, \\\npdfrep,fract,yx,em,vm,nm,b,lcdf,error = pygslib.gslib.__bicalib.bicalib(**parameters_bicalib)\n\nprint ('error ? ', error != 0)\n", "Comparing results with the report file (bicalib.rep)\n```\n MARKOV-BAYES CALIBRATION REPORT\n *******\n Number of pairs retained = 140\n\n Primary variable: average = 2.52812052 \n variance = 22.0867615 \n minimum = 5.99999987E-02\n maximum = 58.3199997\n\n Secondary variable: average = 2.52758455 \n variance = 9.77680969 \n minimum = 0.180000007 \n maximum = 22.4599991\n\nCutoffs on Primary Variable\n U cutoff 1 cutoff = 0.5000 cdf = 0.29924\n U cutoff 2 cutoff = 1.0000 cdf = 0.47028\n U cutoff 3 cutoff = 2.5000 cdf = 0.72501\n U cutoff 4 cutoff = 5.0000 cdf = 0.87425\n U cutoff 5 cutoff = 10.0000 cdf = 0.95929\nNumber within each bivariate (u,v) class:\n 0.50 1.00 2.50 5.00 10.00\n 0.50 | 0.1927 0.0130 0.0094 0.0000 0.0000 0.0000\n 1.00 | 0.0780 0.0568 0.0344 0.0068 0.0000 0.0000\n 2.50 | 0.0067 0.0934 0.1416 0.0315 0.0205 0.0000\n 5.00 | 0.0218 0.0000 0.0562 0.0917 0.0174 0.0032\n 10.00 | 0.0000 0.0077 0.0131 0.0169 0.0256 0.0115\n Max. | 0. 0. 0. 0. 0. 0.\nThe cumulative frequency (local prior cdf) table:\n 0.89586 0.95633 1.00000 1.00000 1.00000 1.00000\n 0.44297 0.76568 0.96116 1.00000 1.00000 1.00000\n 0.02295 0.34097 0.82286 0.93023 1.00000 1.00000\n 0.11440 0.11440 0.40956 0.89164 0.98330 1.00000\n 0.00000 0.10326 0.27851 0.50410 0.84630 1.00000\n 0.00000 0.00000 0.00000 0.04442 0.47650 1.00000\ncutoff,total#,mean,Variance\n for U(x) <= cutoff \n 0.50 30 | 0.70124 0.07721\n 1.00 47 | 0.71755 0.07886\n 2.50 76 | 0.84762 0.04005\n 5.00 101 | 0.93332 0.01228\n 10.00 126 | 0.97366 0.00769\n for U(x) > cutoff \n 0.50 110 | 0.12758 0.04022\n 1.00 93 | 0.25076 0.04704\n 2.50 64 | 0.40175 0.07353\n 5.00 39 | 0.46354 0.13237\n 10.00 14 | 0.62067 0.03795\nB(i) values:\n 0.5737\n 0.4668\n 0.4459\n 0.4698\n 0.3530\n```", "[str(i) for i in [1,2,3]]\n\nU_cutoff= np.arange(len(parameters_bicalib['cutu'])) +1\nindex = [str(i) for i in parameters_bicalib['cutu']]\nindex.append('max')\ncolumns = [str(i) for i in parameters_bicalib['cutv']]\ncolumns.append('max')\nuv_class= pd.DataFrame(pdfrep, index= index, columns = columns)\n\nU_le_cutoff = pd.DataFrame ({'cutoff': parameters_bicalib['cutu'], 'total':nm[:,1],'mean': em[:,1], 'var' : vm[:,1]})\nU_ge_cutoff = pd.DataFrame ({'cutoff': parameters_bicalib['cutu'], 'total':nm[:,0],'mean': em[:,0], 'var' : vm[:,0]})\n\n\nprint (' MARKOV-BAYES CALIBRATION REPORT ')\nprint (' ----------------------------------------')\nprint ('Primary variable: average =', avgu)\nprint (' variance =', ssqu) \nprint (' minimum =', umin)\nprint (' maximum =', umax)\nprint ('Secondary variable: average =', avgv)\nprint (' variance =', ssqv) \nprint (' minimum =', vmin)\nprint (' maximum =', vmax)\nprint (' Cutoffs on Primary Variable ')\nfor i in U_cutoff:\n print (' U cutoff', i, ' cutoff = ', parameters_bicalib['cutu'][i-1], ' cdf = ' , fract[i-1])\n\nprint ('')\nprint ('Number within each bivariate (u,v) class: ??? max is differen ??? check this') \nprint (uv_class)\nprint ('')\nprint (' The cumulative frequency (local prior cdf) table:')\nprint (yx)\nprint ('')\nprint ('cutoff,total#,mean,Variance') \nprint (' for U(x) <= cutoff ') \nprint (U_le_cutoff)\nprint (' for U(x) > cutoff')\nprint (U_ge_cutoff)\nprint ('')\nprint (pd.DataFrame({'B(i) values:':b}))\n", "Note:\nThis is also comparable to data on file for output calibration (SISIM), or bicalib.cal in the *.par file above \nThresholds for secondary variable\n 5\n 0.500000000 \n 1.00000000 \n 2.50000000 \n 5.00000000 \n 10.0000000 \n The local prior distribution table:\n 0.89586 0.95633 1.00000 1.00000 1.00000\n 0.44297 0.76568 0.96116 1.00000 1.00000\n 0.02295 0.34097 0.82286 0.93023 1.00000\n 0.11440 0.11440 0.40956 0.89164 0.98330\n 0.00000 0.10326 0.27851 0.50410 0.84630\n 0.00000 0.00000 0.00000 0.04442 0.47650\n The calibration parameters B(i): \n 0.5737\n 0.4668\n 0.4459\n 0.4698\n 0.3530\nComparing with results in the file for output data / distributions (bicalib.out)", "names=['P_Thr_'+x for x in map (str , parameters_bicalib['cutu'])]\nPtr=pd.DataFrame(lcdf, columns= names)\nprint (Ptr.head())", "expected results \npt0.5 pt1.0 pt2.5 pt5.0 pt10.0\n .5 .5 -1.00 3.26 0.1144 0.1144 0.4096 0.8916 0.9833\n 1.5 .5 -1.00 2.64 0.1144 0.1144 0.4096 0.8916 0.9833\n 2.5 .5 -1.00 2.15 0.0229 0.3410 0.8229 0.9302 1.0000\n 3.5 .5 -1.00 1.69 0.0229 0.3410 0.8229 0.9302 1.0000\n 4.5 .5 -1.00 .51 0.4430 0.7657 0.9612 1.0000 1.0000", "print (Ptr.tail())\n", "expected results \npt0.5 pt1.0 pt2.5 pt5.0 pt10.0\n45.5 49.5 -1.00 8.82 0.0000 0.1033 0.2785 0.5041 0.8463\n46.5 49.5 -1.00 7.97 0.0000 0.1033 0.2785 0.5041 0.8463\n47.5 49.5 -1.00 8.62 0.0000 0.1033 0.2785 0.5041 0.8463\n48.5 49.5 -1.00 6.59 0.0000 0.1033 0.2785 0.5041 0.8463\n49.5 49.5 -1.00 5.83 0.0000 0.1033 0.2785 0.5041 0.8463" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
mne-tools/mne-tools.github.io
0.19/_downloads/063df3a44a4ac9d23978d7b307e69a4e/plot_read_evoked.ipynb
bsd-3-clause
[ "%matplotlib inline", "Reading and writing an evoked file\nThis script shows how to read and write evoked datasets.", "# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>\n#\n# License: BSD (3-clause)\n\nfrom mne import read_evokeds\nfrom mne.datasets import sample\n\nprint(__doc__)\n\ndata_path = sample.data_path()\n\nfname = data_path + '/MEG/sample/sample_audvis-ave.fif'\n\n# Reading\ncondition = 'Left Auditory'\nevoked = read_evokeds(fname, condition=condition, baseline=(None, 0),\n proj=True)", "Show result as a butterfly plot:\nBy using exclude=[] bad channels are not excluded and are shown in red", "evoked.plot(exclude=[], time_unit='s')\n\n# Show result as a 2D image (x: time, y: channels, color: amplitude)\nevoked.plot_image(exclude=[], time_unit='s')", "Use :func:mne.Evoked.save or :func:mne.write_evokeds to write the evoked\nresponses to a file." ]
[ "code", "markdown", "code", "markdown", "code", "markdown" ]
nick-youngblut/SIPSim
ipynb/bac_genome/SSU_genes_per_ng_DNA.ipynb
mit
[ "Estimating the number of 16S rRNA genes per ng of DNA\n\nUsed to estimate the number of 16S genes in a CsCl gradient \nUsed ~5 ug of DNA per gradient\n\nCalculations\n\nNumber of genomes in X DNA\nMW of DNA\nSize distribution of bacterial genomes\nNumber of 16S gene copies in X DNA\nDistribution of 16S gene copy number per genome\n\nSetting variables", "import os\n\nworkDir = '/home/nick/notebook/SIPSim/dev/bac_genome1210/SSU_genes_per_ng_DNA/'\nrnammerDir = os.path.join(workDir + 'rnammer')\ngenomeDir = '/home/nick/notebook/SIPSim/dev/bac_genome1210/genomes/'", "Init", "import glob\nimport pyfasta\nimport numpy as np\nimport pandas as pd\nfrom collections import Counter\nimport matplotlib.pyplot as plt\nimport scipy.stats as ss\nfrom fitter import Fitter\nfrom functools import partial\n\n%matplotlib inline \n%load_ext rpy2.ipython\n\n%%R\nlibrary(dplyr) \nlibrary(tidyr) \nlibrary(ggplot2) \n\nif not os.path.isdir(workDir):\n os.makedirs(workDir)\n \nif not os.path.isdir(rnammerDir):\n os.makedirs(rnammerDir) ", "Size distribution of bacterial genomes", "p = os.path.join(genomeDir, '*.fasta')\ngenomeFiles = glob.glob(p)\n\nprint 'Number of genome files: {}'.format(len(genomeFiles))", "Distribution of 16S gene copies per genome", "total_seq_len = lambda x: sum([len(y) for y in x.values()])\n\ndef total_genome_lens(genome_files):\n genome_lens = {}\n for fasta in genome_files:\n name = os.path.split(fasta)[-1]\n name = os.path.splitext(name)[0]\n\n pyf = pyfasta.Fasta(fasta)\n genome_lens[name] = [total_seq_len(pyf)]\n return genome_lens\n\ngenome_lens = total_genome_lens(genomeFiles)\n\ndf_genome_len = pd.DataFrame(genome_lens).transpose()\ndf_genome_len\n\nfig = plt.figure()\nax = fig.add_subplot(111)\nax.hist(df.ix[:,0], bins=20)", "Fitting distribution", "fo = Fitter(df_genome_len.ix[:,0])\nfo.fit()\n\nfo.summary()\n\ngenome_len_best_fit = fo.fitted_param['rayleigh']\ngenome_len_best_fit\n\n# test of distribution\nx = ss.rayleigh.rvs(*genome_len_best_fit, size=10000)\nfig = plt.figure()\nax = plt.subplot(111)\nax.hist(x, bins=50)\nfig.show()", "Distribution of 16S gene copies per genome\nrnammer run", "%%bash -s \"$genomeDir\" \"$rnammerDir\"\n\nfind $1 -name \"*fasta\" | \\\n perl -pe 's/.+\\/|\\.fasta//g' | \\\n xargs -n 1 -I % -P 30 bash -c \\\n \"rnammer -S bac -m ssu -gff $2/%_rrn.gff -f $2/%_rrn.fna -xml $2/%_rrn.xml < $1/%.fasta\"\n\n## Summarizing the results\n\n!cd $rnammerDir; \\\n egrep -v \"^#\" *.gff | \\\n grep \"16s_rRNA\" | \\\n perl -pe 's/:/\\t/' > ssu_summary.txt\n\n\ninFile = os.path.join(rnammerDir, 'ssu_summary.txt')\n\ninFH = open(inFile, 'rb')\ndf_ssu = pd.read_csv(inFH, sep='\\t', header=None)\ndf_ssu.head()\n\nfig = plt.figure()\nax = plt.subplot(111)\nax.hist(df_ssu.ix[:,6], bins=50)\nfig.show()\n\n# filtering by gene length of >= 1000 bp\ndf_ssu_f = df_ssu.loc[df[6] >= 1000]\ndf_ssu_f.head()\n\n# counting number of 16S genes per genome\nssu_count = Counter(df_ssu_f[1])\n\nssu_max = max(ssu_count.values())\n\n# plotting distribution\nfig = plt.figure()\nax = plt.subplot(111)\nax.hist(ssu_count.values(), bins=ssu_max)\nfig.show()", "Fitting distribution", "fo = Fitter(ssu_count.values())\nfo.fit()\n\nfo.summary()\n\nssu_ray_fit = fo.fitted_param['rayleigh']\nssu_ray_fit\n\n# test of distribution\nx = ss.rayleigh.rvs(*ssu_ray_fit, size=10000)\nfig = plt.figure()\nax = plt.subplot(111)\nax.hist(x, bins=50)\nfig.show()\n\nssu_beta_fit = fo.fitted_param['beta']\nssu_beta_fit\n\n# test of distribution\nx = ss.beta.rvs(*ssu_beta_fit, size=10000)\nfig = plt.figure()\nax = plt.subplot(111)\nax.hist(x, bins=50)\nfig.show()", "Notes\n\nUsing rayleigh distribution\n\nMonte Carlo estimation of 16S gene copies per ng of DNA\n\nM.W. of dsDNA = (# nucleotides x 607.4) + 157.9", "# example of calculations\ngradient_DNA_conc = 1e-9 # g of DNA\navogadro = 6.022e23 # molecules/mole\n\ngenome_len = 4000000\nmw_genome = genome_len * 607.4 + 157.9\n\nn_genomes = gradient_DNA_conc / mw_genome * avogadro\n\nssu_copy_per_genome = 4\n\nn_genomes * ssu_copy_per_genome\n\ndef SSU_copies_in_ng_DNA(DNA_conc, genome_len, ssu_copy_per_genome):\n \n DNA_conc__g = DNA_conc * 1e-9 # ng --> g of DNA\n avogadros = 6.022e23 # molecules/mole\n\n mw_genome = genome_len * 607.4 + 157.9\n n_genomes = DNA_conc__g / mw_genome * avogadros\n ssu_copies = n_genomes * ssu_copy_per_genome\n \n return ssu_copies\n\n# run\nSSU_copies_in_ng_DNA(1, 4000000, 4)\n\ndef SSU_copies_MC(DNA_conc, genome_len_dist, ssu_copy_dist, n=100000):\n n_copy_dist = []\n for i in range(n):\n genome_len = genome_len_dist(size=1)[0]\n ssu_copy_per_genome = ssu_copy_dist(size=1)[0]\n n_copies = SSU_copies_in_ng_DNA(DNA_conc, genome_len, ssu_copy_per_genome)\n n_copy_dist.append(n_copies) \n return n_copy_dist\n\n# distribution functions\ngenome_len_dist = partial(ss.rayleigh.rvs, *genome_len_best_fit)\nssu_copy_dist = partial(ss.rayleigh.rvs, *ssu_ray_fit)\n\n# monte carlo estimation of ssu copies in a gradient\ngradient_dna_conc__ng = 5000\nn_copy_dist = SSU_copies_MC(gradient_dna_conc__ng, genome_len_dist, ssu_copy_dist, n=10000)\n\nfig = plt.figure()\nax = plt.subplot(111)\nax.hist(n_copy_dist, bins=50)\nfig.show()\n\nmedian_copy = int(np.median(n_copy_dist))\nstd_copy = int(np.std(n_copy_dist))\n\nprint 'Number of SSU copies in {} ng of DNA: {} +/- {}'.format(gradient_dna_conc__ng, median_copy, std_copy)\n\ndef median_confidence_interval(data, confidence=0.95):\n a = 1.0*np.array(data)\n n = len(a)\n m, se = np.median(a), ss.sem(a)\n h = se * ss.t._ppf((1+confidence)/2., n-1)\n return m, m-h, m+h\n\nmci = median_confidence_interval(n_copy_dist)\nmci = map(int, mci)\n\n# lci,hci = ss.norm.interval(0.05, loc=np.mean(n_copy_dist), scale=np.std(n_copy_dist))\n# copy_median = np.median(n_copy_dist)\n# mci = [copy_median, copy_median - lci, copy_median + hci]\n\nprint 'Number of SSU copies in {} ng of DNA: {:,d} (low:{:,d}, high:{:,d})'.format(gradient_dna_conc__ng, *mci)" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
NathanYee/ThinkBayes2
code/chap02.ipynb
gpl-2.0
[ "Think Bayes: Chapter 2\nThis notebook presents example code and exercise solutions for Think Bayes.\nCopyright 2016 Allen B. Downey\nMIT License: https://opensource.org/licenses/MIT", "from __future__ import print_function, division\n\n% matplotlib inline\n\nfrom thinkbayes2 import Hist, Pmf, Suite", "The Pmf class\nI'll start by making a Pmf that represents the outcome of a six-sided die. Initially there are 6 values with equal probability.", "pmf = Pmf()\nfor x in [1,2,3,4,5,6]:\n pmf[x] = 1\n \npmf.Print()", "To be true probabilities, they have to add up to 1. So we can normalize the Pmf:", "pmf.Normalize()", "The return value from Normalize is the sum of the probabilities before normalizing.", "pmf.Print()", "A faster way to make a Pmf is to provide a sequence of values. The constructor adds the values to the Pmf and then normalizes:", "pmf = Pmf([1,2,3,4,5,6])\npmf.Print()", "To extract a value from a Pmf, you can use Prob", "pmf.Prob(1)", "Or you can use the bracket operator. Either way, if you ask for the probability of something that's not in the Pmf, the result is 0.", "pmf[1]", "The cookie problem\nHere's a Pmf that represents the prior distribution.", "pmf = Pmf()\npmf['Bowl 1'] = 0.5\npmf['Bowl 2'] = 0.5\npmf.Print()", "And we can update it using Mult", "pmf.Mult('Bowl 1', 0.75)\npmf.Mult('Bowl 2', 0.5)\npmf.Print()", "Or here's the shorter way to construct the prior.", "pmf = Pmf(['Bowl 1', 'Bowl 2'])\npmf.Print()", "And we can use *= for the update.", "pmf['Bowl 1'] *= 0.75\npmf['Bowl 2'] *= 0.5\npmf.Print()", "Either way, we have to normalize the posterior distribution.", "pmf.Normalize()\npmf.Print()", "The Bayesian framework\nHere's the same computation encapsulated in a class.", "class Cookie(Pmf):\n \"\"\"A map from string bowl ID to probablity.\"\"\"\n\n def __init__(self, hypos):\n \"\"\"Initialize self.\n\n hypos: sequence of string bowl IDs\n \"\"\"\n Pmf.__init__(self)\n for hypo in hypos:\n self.Set(hypo, 1)\n self.Normalize()\n\n def Update(self, data):\n \"\"\"Updates the PMF with new data.\n\n data: string cookie type\n \"\"\"\n for hypo in self.Values():\n like = self.Likelihood(data, hypo)\n self.Mult(hypo, like)\n self.Normalize()\n\n mixes = {\n 'Bowl 1':dict(vanilla=0.75, chocolate=0.25),\n 'Bowl 2':dict(vanilla=0.5, chocolate=0.5),\n }\n\n def Likelihood(self, data, hypo):\n \"\"\"The likelihood of the data under the hypothesis.\n\n data: string cookie type\n hypo: string bowl ID\n \"\"\"\n mix = self.mixes[hypo]\n like = mix[data]\n return like", "We can confirm that we get the same result.", "pmf = Cookie(['Bowl 1', 'Bowl 2'])\npmf.Update('vanilla')\npmf.Print()", "But this implementation is more general; it can handle any sequence of data.", "dataset = ['vanilla', 'chocolate', 'vanilla']\nfor data in dataset:\n pmf.Update(data)\n \npmf.Print()", "The Monty Hall problem\nThe Monty Hall problem might be the most contentious question in\nthe history of probability. The scenario is simple, but the correct\nanswer is so counterintuitive that many people just can't accept\nit, and many smart people have embarrassed themselves not just by\ngetting it wrong but by arguing the wrong side, aggressively,\nin public.\nMonty Hall was the original host of the game show Let's Make a\nDeal. The Monty Hall problem is based on one of the regular\ngames on the show. If you are on the show, here's what happens:\n\n\nMonty shows you three closed doors and tells you that there is a\n prize behind each door: one prize is a car, the other two are less\n valuable prizes like peanut butter and fake finger nails. The\n prizes are arranged at random.\n\n\nThe object of the game is to guess which door has the car. If\n you guess right, you get to keep the car.\n\n\nYou pick a door, which we will call Door A. We'll call the\n other doors B and C.\n\n\nBefore opening the door you chose, Monty increases the\n suspense by opening either Door B or C, whichever does not\n have the car. (If the car is actually behind Door A, Monty can\n safely open B or C, so he chooses one at random.)\n\n\nThen Monty offers you the option to stick with your original\n choice or switch to the one remaining unopened door.\n\n\nThe question is, should you \"stick\" or \"switch\" or does it\nmake no difference?\nMost people have the strong intuition that it makes no difference.\nThere are two doors left, they reason, so the chance that the car\nis behind Door A is 50%.\nBut that is wrong. In fact, the chance of winning if you stick\nwith Door A is only 1/3; if you switch, your chances are 2/3.\nHere's a class that solves the Monty Hall problem.", "class Monty(Pmf):\n \"\"\"Map from string location of car to probability\"\"\"\n\n def __init__(self, hypos):\n \"\"\"Initialize the distribution.\n\n hypos: sequence of hypotheses\n \"\"\"\n Pmf.__init__(self)\n for hypo in hypos:\n self.Set(hypo, 1)\n self.Normalize()\n\n def Update(self, data):\n \"\"\"Updates each hypothesis based on the data.\n\n data: any representation of the data\n \"\"\"\n for hypo in self.Values():\n like = self.Likelihood(data, hypo)\n self.Mult(hypo, like)\n self.Normalize()\n\n def Likelihood(self, data, hypo):\n \"\"\"Compute the likelihood of the data under the hypothesis.\n\n hypo: string name of the door where the prize is\n data: string name of the door Monty opened\n \"\"\"\n if hypo == data:\n return 0\n elif hypo == 'A':\n return 0.5\n else:\n return 1", "And here's how we use it.", "pmf = Monty('ABC')\npmf.Update('B')\npmf.Print()", "The Suite class\nMost Bayesian updates look pretty much the same, especially the Update method. So we can encapsulate the framework in a class, Suite, and create new classes that extend it.\nChild classes of Suite inherit Update and provide Likelihood. So here's the short version of Monty", "class Monty(Suite):\n\n def Likelihood(self, data, hypo):\n if hypo == data:\n return 0\n elif hypo == 'A':\n return 0.5\n else:\n return 1", "And it works.", "pmf = Monty('ABC')\npmf.Update('B')\npmf.Print()", "The M&M problem\nM&Ms are small candy-coated chocolates that come in a variety of\ncolors. Mars, Inc., which makes M&Ms, changes the mixture of\ncolors from time to time.\nIn 1995, they introduced blue M&Ms. Before then, the color mix in\na bag of plain M&Ms was 30% Brown, 20% Yellow, 20% Red, 10%\nGreen, 10% Orange, 10% Tan. Afterward it was 24% Blue , 20%\nGreen, 16% Orange, 14% Yellow, 13% Red, 13% Brown.\nSuppose a friend of mine has two bags of M&Ms, and he tells me\nthat one is from 1994 and one from 1996. He won't tell me which is\nwhich, but he gives me one M&M from each bag. One is yellow and\none is green. What is the probability that the yellow one came\nfrom the 1994 bag?\nHere's a solution:", "class M_and_M(Suite):\n \"\"\"Map from hypothesis (A or B) to probability.\"\"\"\n\n mix94 = dict(brown=30,\n yellow=20,\n red=20,\n green=10,\n orange=10,\n tan=10,\n blue=0)\n\n mix96 = dict(blue=24,\n green=20,\n orange=16,\n yellow=14,\n red=13,\n brown=13,\n tan=0)\n\n hypoA = dict(bag1=mix94, bag2=mix96)\n hypoB = dict(bag1=mix96, bag2=mix94)\n\n hypotheses = dict(A=hypoA, B=hypoB)\n\n def Likelihood(self, data, hypo):\n \"\"\"Computes the likelihood of the data under the hypothesis.\n\n hypo: string hypothesis (A or B)\n data: tuple of string bag, string color\n \"\"\"\n bag, color = data\n mix = self.hypotheses[hypo][bag]\n like = mix[color]\n return like", "And here's an update:", "suite = M_and_M('AB')\nsuite.Update(('bag1', 'yellow'))\nsuite.Update(('bag2', 'green'))\nsuite.Print()", "Exercise: Suppose you draw another M&M from bag1 and it's blue. What can you conclude? Run the update to confirm your intuition.", "suite.Update(('bag1', 'blue'))\nsuite.Print()", "Exercise: Now suppose you draw an M&M from bag2 and it's blue. What does that mean? Run the update to see what happens.", "# Solution goes here", "Exercises\nExercise: This one is from one of my favorite books, David MacKay's \"Information Theory, Inference, and Learning Algorithms\":\n\nElvis Presley had a twin brother who died at birth. What is the probability that Elvis was an identical twin?\"\n\nTo answer this one, you need some background information: According to the Wikipedia article on twins: ``Twins are estimated to be approximately 1.9% of the world population, with monozygotic twins making up 0.2% of the total---and 8% of all twins.''", "# Solution goes here\n\n# Solution goes here", "Exercise: Let's consider a more general version of the Monty Hall problem where Monty is more unpredictable. As before, Monty never opens the door you chose (let's call it A) and never opens the door with the prize. So if you choose the door with the prize, Monty has to decide which door to open. Suppose he opens B with probability p and C with probability 1-p. If you choose A and Monty opens B, what is the probability that the car is behind A, in terms of p? What if Monty opens C?\nHint: you might want to use SymPy to do the algebra for you.", "from sympy import symbols\np = symbols('p')\n\n# Solution goes here\n\n# Solution goes here\n\n# Solution goes here\n\n# Solution goes here", "Exercise: According to the CDC, ``Compared to nonsmokers, men who smoke are about 23 times more likely to develop lung cancer and women who smoke are about 13 times more likely.'' Also, among adults in the U.S. in 2014:\n\nNearly 19 of every 100 adult men (18.8%)\nNearly 15 of every 100 adult women (14.8%)\n\nIf you learn that a woman has been diagnosed with lung cancer, and you know nothing else about her, what is the probability that she is a smoker?", "# Solution goes here", "Exercise In Section 2.3 I said that the solution to the cookie problem generalizes to the case where we draw multiple cookies with replacement.\nBut in the more likely scenario where we eat the cookies we draw, the likelihood of each draw depends on the previous draws.\nModify the solution in this chapter to handle selection without replacement. Hint: add instance variables to Cookie to represent the hypothetical state of the bowls, and modify Likelihood accordingly. You might want to define a Bowl object.", "# Solution goes here\n\n# Solution goes here\n\n# Solution goes here\n\n# Solution goes here\n\n# Solution goes here\n\n# Solution goes here\n\n# Solution goes here" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
Fetisoff/Portfolio
1. Python (Intermediate) Exploring Gun Deaths in the US/Basics.ipynb
apache-2.0
[ "US Gun Deaths Data\nThe dataset came from FiveThirtyEight, and can be found here https://github.com/fivethirtyeight/guns-data. The dataset is stored in the guns.csv file. It contains information on gun deaths in the US from 2012 to 2014. Each row in the dataset represents a single fatality. The columns contain demographic and other information about the victim. Here are the first few rows of the dataset", "import csv\ndata = list(csv.reader(open('guns.csv', 'r')))\nprint(data[:5])\n\n#removing header row\nheaders = data[:1]\ndata = data[1:]\nprint(data[:5])\n\n#count in the dictionary of how many times each element occurs in the year column\n\nyears = [each[1] for each in data]\nyears\nyear_counts = {}\nfor each in years:\n if each in year_counts:\n year_counts[each] += 1\n else:\n year_counts[each] = 1\nprint(year_counts)\n\n \n\n#Let's see if gun deaths in the US change by month and year\nimport datetime\ndates = [datetime.datetime(year=int(each[1]), month=int(each[2]), day=1) for each in data] \ndate_counts = {}\nfor each in dates:\n if each in date_counts:\n date_counts[each] += 1\n else:\n date_counts[each] = 1\ndates[:5]\n\n\n\n", "The sex and race columns contain potentially interesting information on how gun deaths in the US vary by gender and race. Exploring both of these columns can be done with a similar dictionary counting technique to what we did earlier.", "sex_counts = {}\nrace_counts = {}\n\nfor each in data:\n sex = each[5]\n if sex in sex_counts:\n sex_counts[sex] += 1\n else:\n sex_counts[sex] = 1\n\nfor each in data:\n race = each[7]\n if race in race_counts:\n race_counts[race] += 1\n else:\n race_counts[race] = 1\nprint(race_counts)\nprint(sex_counts)\n\n\n\n", "However, our analysis only gives us the total number of gun deaths by race in the US. Unless we know the proportion of each race in the US, we won't be able to meaningfully compare those numbers.\nI want to get is a rate of gun deaths per 100000 people of each race", "f = open ('census.csv', 'r')\ncensus = list(csv.reader(f))\ncensus\n\nmapping = {\n 'Asian/Pacific Islander': int(census[1][14]) + int(census[1][15]),\n 'Black': int(census[1][12]),\n 'Native American/Native Alaskan': int(census[1][13]),\n 'Hispanic': int(census[1][11]),\n 'White': int(census[1][10])\n}\nrace_per_hundredk = {}\n\nfor key, value in race_counts.items():\n result = race_counts[key] / mapping[key] * 100000\n race_per_hundredk[key] = result\nrace_per_hundredk\n\n\n#We can filter our results, and restrict them to the Homicide intent\n\nintents = [each[3] for each in data]\nraces = [each[7] for each in data]\nhomicide_race_counts = {}\nfor i, each in enumerate(races):\n if intents[i] == 'Homicide':\n if each not in homicide_race_counts:\n homicide_race_counts[each] = 0\n else:\n homicide_race_counts[each] += 1\nhomicide_race_counts\n\n\n\n\nhomicide_race_per_hundredk = {}\n\nfor key, value in homicide_race_counts.items():\n result = homicide_race_counts[key] / mapping[key] * 100000\n homicide_race_per_hundredk[key] = result\nhomicide_race_per_hundredk\n\n", "Finding\nI have founded out, that some racial categories in USA have higher gun-related homicide rate than other races. For example, at least as evidenced by the statics, that people of Black rice commit gun-related homicide 10 times more people of White race or 4 times more people of Hispanic race.\nAre the any link between month and homicide rate in USA?\nLet figure out that!", "month_homicide_rate = {}\nmonths = [int(each[2]) for each in data]\nfor i, each in enumerate(months):\n if intents[i] == 'Homicide':\n if each not in month_homicide_rate:\n month_homicide_rate[each] = 0\n else:\n month_homicide_rate[each] += 1\nmonth_homicide_rate\n\n\n\n\n\n\n\n\ndef months_diff(input_dict):\n max_value = 0\n max_key = 0\n min_value = input_dict[1]\n min_key = 0\n\n for key, value in input_dict.items():\n if value > max_value:\n max_value = value\n max_key = key\n if value < min_value:\n min_value = value\n min_key = key\n gap = round((max_value / min_value), 2)\n \n print ('max month is',max_key,'has',max_value,'and min month is',min_key,'has',min_value,'. The gap between min and max months is',gap,'!')\n\nmonths_diff(month_homicide_rate)", "As we can see, there is a link beetween month of year and homicide rate. In June are commited gun-relative homicide in 1" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
ndanielsen/dc_parking_violations_data
notebooks/Top 15 Violations by Revenue And Total for VA.ipynb
mit
[ "import pandas as pd\nfrom matplotlib.pyplot import pie, axis, show\n%matplotlib inline\nimport matplotlib\nimport matplotlib.pyplot as plt\nmatplotlib.style.use('ggplot')\n\nfine_df_file = '../data/interim/fine_enriched_parking_violations.tsv'\n\ndf = pd.read_csv(fine_df_file, sep='\\t', parse_dates=['ticket_issue_datetime'])\ndf['counter'] = 1", "VA Top 15 violations by total revenue (revenue and total)", "dc_df = df[(df.rp_plate_state.isin(['VA']))]\n\ndc_fines = dc_df.groupby(['violation_code']).fine.sum().reset_index('violation_code')\nfine_codes_15 = dc_fines.sort_values(by='fine', ascending=False)[:15]\ntop_codes = dc_df[dc_df.violation_code.isin(fine_codes_15.violation_code)]\n\ntop_violation_by_state = top_codes.groupby(['violation_description']).fine.sum()\nax = top_violation_by_state.plot.barh()\nax.xaxis.set_major_formatter(plt.FormatStrFormatter('%.0f'))\n\nplt.draw()\n\ntop_violation_by_state = top_codes.groupby(['violation_description']).counter.sum()\nax = top_violation_by_state.plot.barh()\nax.xaxis.set_major_formatter(plt.FormatStrFormatter('%.0f'))\n\nplt.draw()", "VA Top 15 violations by total tickets (revenue and total)", "dc_df = df[(df.rp_plate_state.isin(['VA']))]\n\ndc_fines = dc_df.groupby(['violation_code']).counter.sum().reset_index('violation_code')\nfine_codes_15 = dc_fines.sort_values(by='counter', ascending=False)[:15]\ntop_codes = dc_df[dc_df.violation_code.isin(fine_codes_15.violation_code)]\n\ntop_violation_by_state = top_codes.groupby(['violation_description']).fine.sum()\nax = top_violation_by_state.plot.barh()\nax.xaxis.set_major_formatter(plt.FormatStrFormatter('%.0f'))\n\nplt.draw()\n\ntop_violation_by_state = top_codes.groupby(['violation_description']).counter.sum()\nax = top_violation_by_state.plot.barh()\nax.xaxis.set_major_formatter(plt.FormatStrFormatter('%.0f'))\n\nplt.draw()" ]
[ "code", "markdown", "code", "markdown", "code" ]
elenduuche/deep-learning
image-classification/.ipynb_checkpoints/dlnd_image_classification-checkpoint.ipynb
mit
[ "Image Classification\nIn this project, you'll classify images from the CIFAR-10 dataset. The dataset consists of airplanes, dogs, cats, and other objects. You'll preprocess the images, then train a convolutional neural network on all the samples. The images need to be normalized and the labels need to be one-hot encoded. You'll get to apply what you learned and build a convolutional, max pooling, dropout, and fully connected layers. At the end, you'll get to see your neural network's predictions on the sample images.\nGet the Data\nRun the following cell to download the CIFAR-10 dataset for python.", "\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE\n\"\"\"\nfrom urllib.request import urlretrieve\nfrom os.path import isfile, isdir\nfrom tqdm import tqdm\nimport problem_unittests as tests\nimport tarfile\n\ncifar10_dataset_folder_path = 'cifar-10-batches-py'\n\nclass DLProgress(tqdm):\n last_block = 0\n\n def hook(self, block_num=1, block_size=1, total_size=None):\n self.total = total_size\n self.update((block_num - self.last_block) * block_size)\n self.last_block = block_num\n\nif not isfile('cifar-10-python.tar.gz'):\n with DLProgress(unit='B', unit_scale=True, miniters=1, desc='CIFAR-10 Dataset') as pbar:\n urlretrieve(\n 'https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz',\n 'cifar-10-python.tar.gz',\n pbar.hook)\n\nif not isdir(cifar10_dataset_folder_path):\n with tarfile.open('cifar-10-python.tar.gz') as tar:\n tar.extractall()\n tar.close()\n\n\ntests.test_folder_path(cifar10_dataset_folder_path)", "Explore the Data\nThe dataset is broken into batches to prevent your machine from running out of memory. The CIFAR-10 dataset consists of 5 batches, named data_batch_1, data_batch_2, etc.. Each batch contains the labels and images that are one of the following:\n* airplane\n* automobile\n* bird\n* cat\n* deer\n* dog\n* frog\n* horse\n* ship\n* truck\nUnderstanding a dataset is part of making predictions on the data. Play around with the code cell below by changing the batch_id and sample_id. The batch_id is the id for a batch (1-5). The sample_id is the id for a image and label pair in the batch.\nAsk yourself \"What are all possible labels?\", \"What is the range of values for the image data?\", \"Are the labels in order or random?\". Answers to questions like these will help you preprocess the data and end up with better predictions.", "%matplotlib inline\n%config InlineBackend.figure_format = 'retina'\n\nimport helper\nimport numpy as np\n\n# Explore the dataset\nbatch_id = 1\nsample_id = 5\nhelper.display_stats(cifar10_dataset_folder_path, batch_id, sample_id)", "Implement Preprocess Functions\nNormalize\nIn the cell below, implement the normalize function to take in image data, x, and return it as a normalized Numpy array. The values should be in the range of 0 to 1, inclusive. The return object should be the same shape as x.", "def normalize(x):\n \"\"\"\n Normalize a list of sample image data in the range of 0 to 1\n : x: List of image data. The image shape is (32, 32, 3)\n : return: Numpy array of normalize data\n \"\"\"\n # TODO: Implement Function\n # Normalize RGB for each image data\n norm_image_data = np.ndarray(x.shape, np.float32)\n for i, image_data in enumerate(x):\n temp_image_data = np.ndarray(x.shape, np.float32)\n temp_image_data = image_data\n temp_image_data[:,:,0] = abs((temp_image_data[:,:,0] - 128)/128)\n temp_image_data[:,:,1] = abs((temp_image_data[:,:,1] - 128)/128)\n temp_image_data[:,:,2] = abs((temp_image_data[:,:,2] - 128)/128)\n norm_image_data[i] = temp_image_data\n #print('Sample normalized image data: {}'.format(norm_image_data[2].max()))\n return norm_image_data\n\n\n\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE\n\"\"\"\ntests.test_normalize(normalize)", "One-hot encode\nJust like the previous code cell, you'll be implementing a function for preprocessing. This time, you'll implement the one_hot_encode function. The input, x, are a list of labels. Implement the function to return the list of labels as One-Hot encoded Numpy array. The possible values for labels are 0 to 9. The one-hot encoding function should return the same encoding for each value between each call to one_hot_encode. Make sure to save the map of encodings outside the function.\nHint: Don't reinvent the wheel.", "def one_hot_encode(x):\n \"\"\"\n One hot encode a list of sample labels. Return a one-hot encoded vector for each label.\n : x: List of sample Labels\n : return: Numpy array of one-hot encoded labels\n \"\"\"\n # TODO: Implement Function\n one_hot = np.zeros(shape=[len(x), 10])\n for i, label_id in enumerate(x):\n one_hot[i, label_id] = True\n return one_hot\n\none_hot_encoding_map = {0:'airplane', 1:'automobile', 2:'bird', 3:'cat', 4:'deer', 5:'dog', 6:'frog', 7:'horse',\\\n 8:'ship', 9:'truck'}\n\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE\n\"\"\"\ntests.test_one_hot_encode(one_hot_encode)", "Randomize Data\nAs you saw from exploring the data above, the order of the samples are randomized. It doesn't hurt to randomize it again, but you don't need to for this dataset.\nPreprocess all the data and save it\nRunning the code cell below will preprocess all the CIFAR-10 data and save it to file. The code below also uses 10% of the training data for validation.", "\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL\n\"\"\"\n# Preprocess Training, Validation, and Testing Data\nhelper.preprocess_and_save_data(cifar10_dataset_folder_path, normalize, one_hot_encode)", "Check Point\nThis is your first checkpoint. If you ever decide to come back to this notebook or have to restart the notebook, you can start from here. The preprocessed data has been saved to disk.", "\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL\n\"\"\"\nimport pickle\nimport problem_unittests as tests\nimport helper\n\n# Load the Preprocessed Validation data\nvalid_features, valid_labels = pickle.load(open('preprocess_validation.p', mode='rb'))", "Build the network\nFor the neural network, you'll build each layer into a function. Most of the code you've seen has been outside of functions. To test your code more thoroughly, we require that you put each layer in a function. This allows us to give you better feedback and test for simple mistakes using our unittests before you submit your project.\nIf you're finding it hard to dedicate enough time for this course a week, we've provided a small shortcut to this part of the project. In the next couple of problems, you'll have the option to use TensorFlow Layers or TensorFlow Layers (contrib) to build each layer, except \"Convolutional & Max Pooling\" layer. TF Layers is similar to Keras's and TFLearn's abstraction to layers, so it's easy to pickup.\nIf you would like to get the most of this course, try to solve all the problems without TF Layers. Let's begin!\nInput\nThe neural network needs to read the image data, one-hot encoded labels, and dropout keep probability. Implement the following functions\n* Implement neural_net_image_input\n * Return a TF Placeholder\n * Set the shape using image_shape with batch size set to None.\n * Name the TensorFlow placeholder \"x\" using the TensorFlow name parameter in the TF Placeholder.\n* Implement neural_net_label_input\n * Return a TF Placeholder\n * Set the shape using n_classes with batch size set to None.\n * Name the TensorFlow placeholder \"y\" using the TensorFlow name parameter in the TF Placeholder.\n* Implement neural_net_keep_prob_input\n * Return a TF Placeholder for dropout keep probability.\n * Name the TensorFlow placeholder \"keep_prob\" using the TensorFlow name parameter in the TF Placeholder.\nThese names will be used at the end of the project to load your saved model.\nNote: None for shapes in TensorFlow allow for a dynamic size.", "import tensorflow as tf\n\ndef neural_net_image_input(image_shape):\n \"\"\"\n Return a Tensor for a bach of image input\n : image_shape: Shape of the images\n : return: Tensor for image input.\n \"\"\"\n # TODO: Implement Function\n image_input = tf.placeholder(tf.float32, shape=(None, image_shape[0], image_shape[1], image_shape[2]), name='x')\n return image_input\n\n\ndef neural_net_label_input(n_classes):\n \"\"\"\n Return a Tensor for a batch of label input\n : n_classes: Number of classes\n : return: Tensor for label input.\n \"\"\"\n # TODO: Implement Function\n label_input = tf.placeholder(tf.float32, shape=(None, n_classes), name='y')\n return label_input\n\n\ndef neural_net_keep_prob_input():\n \"\"\"\n Return a Tensor for keep probability\n : return: Tensor for keep probability.\n \"\"\"\n # TODO: Implement Function\n keep_prob_input = tf.placeholder(tf.float32, name='keep_prob')\n return keep_prob_input\n\n\n\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE\n\"\"\"\ntf.reset_default_graph()\ntests.test_nn_image_inputs(neural_net_image_input)\ntests.test_nn_label_inputs(neural_net_label_input)\ntests.test_nn_keep_prob_inputs(neural_net_keep_prob_input)", "Convolution and Max Pooling Layer\nConvolution layers have a lot of success with images. For this code cell, you should implement the function conv2d_maxpool to apply convolution then max pooling:\n* Create the weight and bias using conv_ksize, conv_num_outputs and the shape of x_tensor.\n* Apply a convolution to x_tensor using weight and conv_strides.\n * We recommend you use same padding, but you're welcome to use any padding.\n* Add bias\n* Add a nonlinear activation to the convolution.\n* Apply Max Pooling using pool_ksize and pool_strides.\n * We recommend you use same padding, but you're welcome to use any padding.\nNote: You can't use TensorFlow Layers or TensorFlow Layers (contrib) for this layer. You're free to use any TensorFlow package for all the other layers.", "def conv2d_maxpool(x_tensor, conv_num_outputs, conv_ksize, conv_strides, pool_ksize, pool_strides):\n \"\"\"\n Apply convolution then max pooling to x_tensor\n :param x_tensor: TensorFlow Tensor\n :param conv_num_outputs: Number of outputs for the convolutional layer\n :param conv_strides: Stride 2-D Tuple for convolution\n :param pool_ksize: kernal size 2-D Tuple for pool\n :param pool_strides: Stride 2-D Tuple for pool\n : return: A tensor that represents convolution and max pooling of x_tensor\n \"\"\"\n # TODO: Implement Function\n print('Conv_ksize: ', conv_ksize, ' Conv_strides: ', conv_strides, ' Conv output depth:', conv_num_outputs, \\\n x_tensor.get_shape().as_list(), ' Pool ksize: ', pool_ksize, ' Pool strides: ', pool_strides)\n #Convolution and max pool Parameters\n input_depth = x_tensor.get_shape().as_list()[3]\n output_depth = conv_num_outputs\n weight = tf.Variable(tf.truncated_normal([conv_ksize[0], conv_ksize[1], input_depth, output_depth], mean=0.0, stddev=0.1))\n biases = tf.Variable(tf.truncated_normal(output_depth))\n strides = [1, conv_strides[0], conv_strides[1], 1]\n pool_strides = [1, pool_strides[0], pool_strides[1], 1]\n \n #Convolution & Max pool\n conv2d_1 = tf.nn.conv2d(x_tensor, weight, strides, padding='SAME')\n conv2d_1 = tf.nn.bias_add(conv2d_1, biases)\n conv2d_1 = tf.nn.relu(conv2d_1)\n conv2d_1 = tf.nn.max_pool(conv2d_1, [1, pool_ksize[0], pool_ksize[1], 1], pool_strides, padding='SAME')\n return conv2d_1\n\n\n\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE\n\"\"\"\ntests.test_con_pool(conv2d_maxpool)", "Flatten Layer\nImplement the flatten function to change the dimension of x_tensor from a 4-D tensor to a 2-D tensor. The output should be the shape (Batch Size, Flattened Image Size). You can use TensorFlow Layers or TensorFlow Layers (contrib) for this layer.", "def flatten(x_tensor):\n \"\"\"\n Flatten x_tensor to (Batch Size, Flattened Image Size)\n : x_tensor: A tensor of size (Batch Size, ...), where ... are the image dimensions.\n : return: A tensor of size (Batch Size, Flattened Image Size).\n \"\"\"\n # TODO: Implement Function\n #print(x_tensor.get_shape().as_list()[3])\n #print(x_tensor.get_shape().as_list())\n h = x_tensor.get_shape().as_list()[1]\n w = x_tensor.get_shape().as_list()[2]\n d = x_tensor.get_shape().as_list()[3]\n flattened_tensor = tf.reshape(x_tensor, [-1, h*w*d])\n #print(flattened_tensor.get_shape().as_list())\n return flattened_tensor\n\n\n\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE\n\"\"\"\ntests.test_flatten(flatten)", "Fully-Connected Layer\nImplement the fully_conn function to apply a fully connected layer to x_tensor with the shape (Batch Size, num_outputs). You can use TensorFlow Layers or TensorFlow Layers (contrib) for this layer.", "def fully_conn(x_tensor, num_outputs):\n \"\"\"\n Apply a fully connected layer to x_tensor using weight and bias\n : x_tensor: A 2-D tensor where the first dimension is batch size.\n : num_outputs: The number of output that the new tensor should be.\n : return: A 2-D tensor where the second dimension is num_outputs.\n \"\"\"\n # TODO: Implement Function\n weight_rows = x_tensor.get_shape().as_list()[1]\n weight = tf.Variable(tf.truncated_normal([weight_rows, num_outputs], mean=0.0, stddev=0.1))\n biases = tf.Variable(tf.truncated_normal([num_outputs]))\n fc1 = tf.add(tf.matmul(x_tensor, weight), biases)\n fc1 = tf.nn.relu(fc1)\n return fc1\n\n\n\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE\n\"\"\"\ntests.test_fully_conn(fully_conn)", "Output Layer\nImplement the output function to apply a fully connected layer to x_tensor with the shape (Batch Size, num_outputs). You can use TensorFlow Layers or TensorFlow Layers (contrib) for this layer.\nNote: Activation, softmax, or cross entropy shouldn't be applied to this.", "def output(x_tensor, num_outputs):\n \"\"\"\n Apply a output layer to x_tensor using weight and bias\n : x_tensor: A 2-D tensor where the first dimension is batch size.\n : num_outputs: The number of output that the new tensor should be.\n : return: A 2-D tensor where the second dimension is num_outputs.\n \"\"\"\n # TODO: Implement Function\n weight = tf.Variable(tf.truncated_normal([x_tensor.get_shape().as_list()[1], num_outputs], mean=0.0, stddev=0.1))\n biases = tf.Variable(tf.zeros([num_outputs]))\n out = tf.add(tf.matmul(x_tensor, weight), biases)\n return out\n\n\n\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE\n\"\"\"\ntests.test_output(output)", "Create Convolutional Model\nImplement the function conv_net to create a convolutional neural network model. The function takes in a batch of images, x, and outputs logits. Use the layers you created above to create this model:\n\nApply 1, 2, or 3 Convolution and Max Pool layers\nApply a Flatten Layer\nApply 1, 2, or 3 Fully Connected Layers\nApply an Output Layer\nReturn the output\nApply TensorFlow's Dropout to one or more layers in the model using keep_prob.", "def conv_net(x, keep_prob):\n \"\"\"\n Create a convolutional neural network model\n : x: Placeholder tensor that holds image data.\n : keep_prob: Placeholder tensor that hold dropout keep probability.\n : return: Tensor that represents logits\n \"\"\"\n # TODO: Apply 1, 2, or 3 Convolution and Max Pool layers\n # Play around with different number of outputs, kernel size and stride\n # Function Definition from Above:\n # conv2d_maxpool(x_tensor, conv_num_outputs, conv_ksize, conv_strides, pool_ksize, pool_strides)\n conv2d_1 = conv2d_maxpool(x, 10, (5, 5), (1, 1), (2, 2), (2, 2))\n conv2d_2 = conv2d_maxpool(conv2d_1, 32, (5, 5), (1, 1), (2, 2), (2, 2))\n conv2d_3 = conv2d_maxpool(conv2d_2, 64, (5, 5), (1, 1), (2, 2), (2, 2))\n \n\n # TODO: Apply a Flatten Layer\n # Function Definition from Above:\n # flatten(x_tensor)\n flattened_tensor = flatten(conv2d_3)\n \n\n # TODO: Apply 1, 2, or 3 Fully Connected Layers\n # Play around with different number of outputs\n # Function Definition from Above:\n # fully_conn(x_tensor, num_outputs)\n fc1 = fully_conn(flattened_tensor, 64)\n fc1 = tf.nn.dropout(fc1, keep_prob)\n fc2 = fully_conn(fc1, 32)\n fc2 = tf.nn.dropout(fc2, keep_prob)\n \n # TODO: Apply an Output Layer\n # Set this to the number of classes\n # Function Definition from Above:\n # output(x_tensor, num_outputs)\n logits = output(fc2, 10)\n \n # TODO: return output\n return logits\n\n\n\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE\n\"\"\"\n\n##############################\n## Build the Neural Network ##\n##############################\n\n# Remove previous weights, bias, inputs, etc..\ntf.reset_default_graph()\n\n# Inputs\nx = neural_net_image_input((32, 32, 3))\ny = neural_net_label_input(10)\nkeep_prob = neural_net_keep_prob_input()\n\n# Model\nlogits = conv_net(x, keep_prob)\n\n# Name logits Tensor, so that is can be loaded from disk after training\nlogits = tf.identity(logits, name='logits')\n\n# Loss and Optimizer\ncost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=y))\noptimizer = tf.train.AdamOptimizer().minimize(cost)\n\n# Accuracy\ncorrect_pred = tf.equal(tf.argmax(logits, 1), tf.argmax(y, 1))\naccuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32), name='accuracy')\n\ntests.test_conv_net(conv_net)", "Train the Neural Network\nSingle Optimization\nImplement the function train_neural_network to do a single optimization. The optimization should use optimizer to optimize in session with a feed_dict of the following:\n* x for image input\n* y for labels\n* keep_prob for keep probability for dropout\nThis function will be called for each batch, so tf.global_variables_initializer() has already been called.\nNote: Nothing needs to be returned. This function is only optimizing the neural network.", "def train_neural_network(session, optimizer, keep_probability, feature_batch, label_batch):\n \"\"\"\n Optimize the session on a batch of images and labels\n : session: Current TensorFlow session\n : optimizer: TensorFlow optimizer function\n : keep_probability: keep probability\n : feature_batch: Batch of Numpy image data\n : label_batch: Batch of Numpy label data\n \"\"\"\n # TODO: Implement Function\n session.run(optimizer, feed_dict={ x: feature_batch, y: label_batch, keep_prob: keep_probability})\n\n\n\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE\n\"\"\"\ntests.test_train_nn(train_neural_network)", "Show Stats\nImplement the function print_stats to print loss and validation accuracy. Use the global variables valid_features and valid_labels to calculate validation accuracy. Use a keep probability of 1.0 to calculate the loss and validation accuracy.", "def print_stats(session, feature_batch, label_batch, cost, accuracy):\n \"\"\"\n Print information about loss and validation accuracy\n : session: Current TensorFlow session\n : feature_batch: Batch of Numpy image data\n : label_batch: Batch of Numpy label data\n : cost: TensorFlow cost function\n : accuracy: TensorFlow accuracy function\n \"\"\"\n # TODO: Implement Function\n loss = session.run(cost, feed_dict={x: feature_batch, y: label_batch, keep_prob: 1.0})\n valid_accuracy = session.run(accuracy, feed_dict={x: valid_features, y: valid_labels, keep_prob: 1.0})\n print('Cost: ', loss)\n print('Accuracy: ', valid_accuracy)", "Hyperparameters\nTune the following parameters:\n* Set epochs to the number of iterations until the network stops learning or start overfitting\n* Set batch_size to the highest number that your machine has memory for. Most people set them to common sizes of memory:\n * 64\n * 128\n * 256\n * ...\n* Set keep_probability to the probability of keeping a node using dropout", "# TODO: Tune Parameters\nepochs = 20\nbatch_size = 64\nkeep_probability = 0.5", "Train on a Single CIFAR-10 Batch\nInstead of training the neural network on all the CIFAR-10 batches of data, let's use a single batch. This should save time while you iterate on the model to get a better accuracy. Once the final validation accuracy is 50% or greater, run the model on all the data in the next section.", "\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL\n\"\"\"\nprint('Checking the Training on a Single Batch...')\nwith tf.Session() as sess:\n # Initializing the variables\n sess.run(tf.global_variables_initializer())\n \n # Training cycle\n for epoch in range(epochs):\n batch_i = 1\n for batch_features, batch_labels in helper.load_preprocess_training_batch(batch_i, batch_size):\n train_neural_network(sess, optimizer, keep_probability, batch_features, batch_labels)\n print('Epoch {:>2}, CIFAR-10 Batch {}: '.format(epoch + 1, batch_i), end='')\n print_stats(sess, batch_features, batch_labels, cost, accuracy)", "Fully Train the Model\nNow that you got a good accuracy with a single CIFAR-10 batch, try it with all five batches.", "\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL\n\"\"\"\nsave_model_path = './image_classification'\n\nprint('Training...')\nwith tf.Session() as sess:\n # Initializing the variables\n sess.run(tf.global_variables_initializer())\n \n # Training cycle\n for epoch in range(epochs):\n # Loop over all batches\n n_batches = 5\n for batch_i in range(1, n_batches + 1):\n for batch_features, batch_labels in helper.load_preprocess_training_batch(batch_i, batch_size):\n train_neural_network(sess, optimizer, keep_probability, batch_features, batch_labels)\n print('Epoch {:>2}, CIFAR-10 Batch {}: '.format(epoch + 1, batch_i), end='')\n print_stats(sess, batch_features, batch_labels, cost, accuracy)\n \n # Save Model\n saver = tf.train.Saver()\n save_path = saver.save(sess, save_model_path)", "Checkpoint\nThe model has been saved to disk.\nTest Model\nTest your model against the test dataset. This will be your final accuracy. You should have an accuracy greater than 50%. If you don't, keep tweaking the model architecture and parameters.", "\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL\n\"\"\"\n%matplotlib inline\n%config InlineBackend.figure_format = 'retina'\n\nimport tensorflow as tf\nimport pickle\nimport helper\nimport random\n\n# Set batch size if not already set\ntry:\n if batch_size:\n pass\nexcept NameError:\n batch_size = 64\n\nsave_model_path = './image_classification'\nn_samples = 4\ntop_n_predictions = 3\n\ndef test_model():\n \"\"\"\n Test the saved model against the test dataset\n \"\"\"\n\n test_features, test_labels = pickle.load(open('preprocess_training.p', mode='rb'))\n loaded_graph = tf.Graph()\n\n with tf.Session(graph=loaded_graph) as sess:\n # Load model\n loader = tf.train.import_meta_graph(save_model_path + '.meta')\n loader.restore(sess, save_model_path)\n\n # Get Tensors from loaded model\n loaded_x = loaded_graph.get_tensor_by_name('x:0')\n loaded_y = loaded_graph.get_tensor_by_name('y:0')\n loaded_keep_prob = loaded_graph.get_tensor_by_name('keep_prob:0')\n loaded_logits = loaded_graph.get_tensor_by_name('logits:0')\n loaded_acc = loaded_graph.get_tensor_by_name('accuracy:0')\n \n # Get accuracy in batches for memory limitations\n test_batch_acc_total = 0\n test_batch_count = 0\n \n for train_feature_batch, train_label_batch in helper.batch_features_labels(test_features, test_labels, batch_size):\n test_batch_acc_total += sess.run(\n loaded_acc,\n feed_dict={loaded_x: train_feature_batch, loaded_y: train_label_batch, loaded_keep_prob: 1.0})\n test_batch_count += 1\n\n print('Testing Accuracy: {}\\n'.format(test_batch_acc_total/test_batch_count))\n\n # Print Random Samples\n random_test_features, random_test_labels = tuple(zip(*random.sample(list(zip(test_features, test_labels)), n_samples)))\n random_test_predictions = sess.run(\n tf.nn.top_k(tf.nn.softmax(loaded_logits), top_n_predictions),\n feed_dict={loaded_x: random_test_features, loaded_y: random_test_labels, loaded_keep_prob: 1.0})\n helper.display_image_predictions(random_test_features, random_test_labels, random_test_predictions)\n\n\ntest_model()", "Why 50-70% Accuracy?\nYou might be wondering why you can't get an accuracy any higher. First things first, 50% isn't bad for a simple CNN. Pure guessing would get you 10% accuracy. However, you might notice people are getting scores well above 70%. That's because we haven't taught you all there is to know about neural networks. We still need to cover a few more techniques.\nSubmitting This Project\nWhen submitting this project, make sure to run all the cells before saving the notebook. Save the notebook file as \"dlnd_image_classification.ipynb\" and save it as a HTML file under \"File\" -> \"Download as\". Include the \"helper.py\" and \"problem_unittests.py\" files in your submission." ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
regardscitoyens/consultation_an
exploitation/analyse_quanti_theme2.ipynb
agpl-3.0
[ "%matplotlib inline\n\nimport json\nimport pandas as pd", "Reading the data", "def loadContributions(file, withsexe=False):\n contributions = pd.read_json(path_or_buf=file, orient=\"columns\")\n rows = [];\n rindex = [];\n for i in range(0, contributions.shape[0]):\n row = {};\n row['id'] = contributions['id'][i]\n rindex.append(contributions['id'][i])\n if (withsexe):\n if (contributions['sexe'][i] == 'Homme'):\n row['sexe'] = 0\n else:\n row['sexe'] = 1\n for question in contributions['questions'][i]:\n if (question.get('Reponse')) and (question['texte'][0:5] != 'Savez') and (question['titreQuestion'][-2:] != '10'):\n row[question['titreQuestion']+' : '+question['texte']] = 1\n for criteres in question.get('Reponse'):\n # print(criteres['critere'].keys())\n row[question['titreQuestion']+'. (Réponse) '+question['texte']+' -> '+str(criteres['critere'].get('texte'))] = 1\n rows.append(row)\n df = pd.DataFrame(data=rows)\n df.fillna(0, inplace=True)\n return df\n\ndf = loadContributions('../data/EGALITE2.brut.json', True)\ndf.fillna(0, inplace=True)\ndf.index = df['id']\n#df.to_csv('consultation_an.csv', format='%d')\n#df.columns = ['Q_' + str(col+1) for col in range(len(df.columns) - 2)] + ['id' , 'sexe']\ndf.head()", "Build clustering model\nHere we build a kmeans model , and select the \"optimal\" of clusters.\nHere we see that the optimal number of clusters is 2.", "from sklearn.cluster import KMeans\nfrom sklearn import metrics\nimport numpy as np\nX = df.drop('id', axis=1).values\n\ndef train_kmeans(nb_clusters, X):\n kmeans = KMeans(n_clusters=nb_clusters, random_state=0).fit(X)\n return kmeans\n#print(kmeans.predict(X))\n#kmeans.cluster_centers_\n\n\ndef select_nb_clusters():\n perfs = {};\n for nbclust in range(2,10):\n kmeans_model = train_kmeans(nbclust, X);\n labels = kmeans_model.labels_\n # from http://scikit-learn.org/stable/modules/clustering.html#calinski-harabaz-index\n # we are in an unsupervised model. cannot get better!\n # perfs[nbclust] = metrics.calinski_harabaz_score(X, labels);\n perfs[nbclust] = metrics.silhouette_score(X, labels);\n print(perfs);\n return perfs;\n\n\ndf['clusterindex'] = train_kmeans(4, X).predict(X)\n#df \n\nperfs = select_nb_clusters();\n# result :\n# {2: 341.07570462155348, 3: 227.39963334619881, 4: 186.90438345452918, 5: 151.03979976346525, 6: 129.11214073405731, 7: 112.37235520885432, 8: 102.35994869157568, 9: 93.848315820675438}\n\noptimal_nb_clusters = max(perfs, key=perfs.get);\n\nprint(\"optimal_nb_clusters\" , optimal_nb_clusters);", "Build the optimal model and apply it", "km_model = train_kmeans(optimal_nb_clusters, X);\ndf['clusterindex'] = km_model.predict(X)\nlGroupBy = df.groupby(['clusterindex']).mean();\n\ncluster_profile_counts = df.groupby(['clusterindex']).count();\ncluster_profile_means = df.groupby(['clusterindex']).mean();\nglobal_counts = df.count()\nglobal_means = df.mean()\n\n\n\n\ncluster_profile_counts.head(10)\n\n\ndf_profiles = pd.DataFrame();\nnbclusters = cluster_profile_means.shape[0]\ndf_profiles['clusterindex'] = range(nbclusters)\nfor col in cluster_profile_means.columns:\n if(col != \"clusterindex\"):\n df_profiles[col] = np.zeros(nbclusters)\n for cluster in range(nbclusters):\n df_profiles[col][cluster] = cluster_profile_means[col][cluster]\n# row.append(df[col].mean());\ndf_profiles.head()\n\n#print(df_profiles.columns) \n\nintereseting_columns = {};\nfor col in df_profiles.columns:\n if(col != \"clusterindex\"):\n global_mean = df[col].mean()\n diff_means_global = abs(df_profiles[col] - global_mean). max();\n # print(col , diff_means_global)\n if(diff_means_global > 0.05):\n intereseting_columns[col] = True\n \n#print(intereseting_columns)\n\n\n%matplotlib inline\n\nimport matplotlib\nimport numpy as np\nimport matplotlib.pyplot as plt", "Cluster Profiles\nHere, the optimal model ihas two clusters , cluster 0 with 399 cases, and 1 with 537 cases. \nAs this model is based on binary inputs. Given this, the best description of the clusters is by the distribution of zeros and ones of each input (question).\nThe figure below gives the cluster profiles of this model. Cluster 0 on the left. 1 on the right. The questions invloved as different (highest bars)", "interesting = list(intereseting_columns.keys())\ndf_profiles_sorted = df_profiles[interesting].sort_index(axis=1)\ndf_profiles_sorted.plot.bar(figsize =(1, 1))\ndf_profiles_sorted.plot.bar(figsize =(16, 8), legend=False)\n\n\ndf_profiles_sorted.T\n\n#df_profiles.sort_index(axis=1).T", "Analyse\nThématique 2 : médias audiovisuels et sur Internet\nDe prime abord, il y a deux populations :\n - 414 personnes qui savent que les fournisseurs d’accès à Internet et les hébergeurs doivent mettre en place des dispositifs signalement\n - 522 personnes qui ne le savent pas\nPour les personnes qui savent, lorsqu'elles ont répondu à la question 3, elles sont plus informées\nEn ignorant ces questions sur la connaissance des participants, apparait 325 personnes qui ont apportée des contributions écriture (contre 611 qui ne l'ont pas fait)\nCe dernier groupe de personnes est plus critique sur la capacité des télévisions à luttre contre les inégalités FH et sur les dispositif de signalement internet (+10 point à Pas du tout respectée pour les Q4, Q7). Etonnamment, ils sont moins sélectionné la réponses sur les stéréotype (Q2) mais ont été plus sensible à la diffusion d'oeuvre créées par des femmes (Q5)\nEnfin, un dernier groupe de population apparait en omettant cette différence :\n - 447 personnes qui pensent que la disposition de signalement internet n'est pas du tout respctée. Elles sont globalement plus sévère sur l'utiliser du dispositif (Q8 65% peu ou pas du tout adpaté) et sur les dispositions relatives aux médias (Q4)\n - 489 personnes qui pensent que la disposition de signalement internet n'est que peu repectées. Elles jugent plutot négativement les autres disposition mais sont moins sévères" ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
mne-tools/mne-tools.github.io
0.24/_downloads/ca1574468d033ed7a4e04f129164b25b/20_cluster_1samp_spatiotemporal.ipynb
bsd-3-clause
[ "%matplotlib inline", "Permutation t-test on source data with spatio-temporal clustering\nThis example tests if the evoked response is significantly different between\ntwo conditions across subjects. Here just for demonstration purposes\nwe simulate data from multiple subjects using one subject's data.\nThe multiple comparisons problem is addressed with a cluster-level\npermutation test across space and time.", "# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>\n# Eric Larson <larson.eric.d@gmail.com>\n# License: BSD-3-Clause\n\nimport os.path as op\n\nimport numpy as np\nfrom numpy.random import randn\nfrom scipy import stats as stats\n\nimport mne\nfrom mne.epochs import equalize_epoch_counts\nfrom mne.stats import (spatio_temporal_cluster_1samp_test,\n summarize_clusters_stc)\nfrom mne.minimum_norm import apply_inverse, read_inverse_operator\nfrom mne.datasets import sample\n\nprint(__doc__)", "Set parameters", "data_path = sample.data_path()\nraw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'\nevent_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'\nsubjects_dir = data_path + '/subjects'\nsrc_fname = subjects_dir + '/fsaverage/bem/fsaverage-ico-5-src.fif'\n\ntmin = -0.2\ntmax = 0.3 # Use a lower tmax to reduce multiple comparisons\n\n# Setup for reading the raw data\nraw = mne.io.read_raw_fif(raw_fname)\nevents = mne.read_events(event_fname)", "Read epochs for all channels, removing a bad one", "raw.info['bads'] += ['MEG 2443']\npicks = mne.pick_types(raw.info, meg=True, eog=True, exclude='bads')\nevent_id = 1 # L auditory\nreject = dict(grad=1000e-13, mag=4000e-15, eog=150e-6)\nepochs1 = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,\n baseline=(None, 0), reject=reject, preload=True)\n\nevent_id = 3 # L visual\nepochs2 = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,\n baseline=(None, 0), reject=reject, preload=True)\n\n# Equalize trial counts to eliminate bias (which would otherwise be\n# introduced by the abs() performed below)\nequalize_epoch_counts([epochs1, epochs2])", "Transform to source space", "fname_inv = data_path + '/MEG/sample/sample_audvis-meg-oct-6-meg-inv.fif'\nsnr = 3.0\nlambda2 = 1.0 / snr ** 2\nmethod = \"dSPM\" # use dSPM method (could also be MNE, sLORETA, or eLORETA)\ninverse_operator = read_inverse_operator(fname_inv)\nsample_vertices = [s['vertno'] for s in inverse_operator['src']]\n\n# Let's average and compute inverse, resampling to speed things up\nevoked1 = epochs1.average()\nevoked1.resample(50, npad='auto')\ncondition1 = apply_inverse(evoked1, inverse_operator, lambda2, method)\nevoked2 = epochs2.average()\nevoked2.resample(50, npad='auto')\ncondition2 = apply_inverse(evoked2, inverse_operator, lambda2, method)\n\n# Let's only deal with t > 0, cropping to reduce multiple comparisons\ncondition1.crop(0, None)\ncondition2.crop(0, None)\ntmin = condition1.tmin\ntstep = condition1.tstep * 1000 # convert to milliseconds", "Transform to common cortical space\nNormally you would read in estimates across several subjects and morph\nthem to the same cortical space (e.g. fsaverage). For example purposes,\nwe will simulate this by just having each \"subject\" have the same\nresponse (just noisy in source space) here.\n<div class=\"alert alert-info\"><h4>Note</h4><p>Note that for 7 subjects with a two-sided statistical test, the minimum\n significance under a permutation test is only p = 1/(2 ** 6) = 0.015,\n which is large.</p></div>", "n_vertices_sample, n_times = condition1.data.shape\nn_subjects = 6\nprint('Simulating data for %d subjects.' % n_subjects)\n\n# Let's make sure our results replicate, so set the seed.\nnp.random.seed(0)\nX = randn(n_vertices_sample, n_times, n_subjects, 2) * 10\nX[:, :, :, 0] += condition1.data[:, :, np.newaxis]\nX[:, :, :, 1] += condition2.data[:, :, np.newaxis]", "It's a good idea to spatially smooth the data, and for visualization\npurposes, let's morph these to fsaverage, which is a grade 5 source space\nwith vertices 0:10242 for each hemisphere. Usually you'd have to morph\neach subject's data separately (and you might want to use morph_data\ninstead), but here since all estimates are on 'sample' we can use one\nmorph matrix for all the heavy lifting.", "# Read the source space we are morphing to\nsrc = mne.read_source_spaces(src_fname)\nfsave_vertices = [s['vertno'] for s in src]\nmorph_mat = mne.compute_source_morph(\n src=inverse_operator['src'], subject_to='fsaverage',\n spacing=fsave_vertices, subjects_dir=subjects_dir).morph_mat\n\nn_vertices_fsave = morph_mat.shape[0]\n\n# We have to change the shape for the dot() to work properly\nX = X.reshape(n_vertices_sample, n_times * n_subjects * 2)\nprint('Morphing data.')\nX = morph_mat.dot(X) # morph_mat is a sparse matrix\nX = X.reshape(n_vertices_fsave, n_times, n_subjects, 2)", "Finally, we want to compare the overall activity levels in each condition,\nthe diff is taken along the last axis (condition). The negative sign makes\nit so condition1 > condition2 shows up as \"red blobs\" (instead of blue).", "X = np.abs(X) # only magnitude\nX = X[:, :, :, 0] - X[:, :, :, 1] # make paired contrast", "Compute statistic\nTo use an algorithm optimized for spatio-temporal clustering, we\njust pass the spatial adjacency matrix (instead of spatio-temporal)", "print('Computing adjacency.')\nadjacency = mne.spatial_src_adjacency(src)\n\n# Note that X needs to be a multi-dimensional array of shape\n# samples (subjects) x time x space, so we permute dimensions\nX = np.transpose(X, [2, 1, 0])\n\n# Now let's actually do the clustering. This can take a long time...\n# Here we set the threshold quite high to reduce computation.\np_threshold = 0.001\nt_threshold = -stats.distributions.t.ppf(p_threshold / 2., n_subjects - 1)\nprint('Clustering.')\nT_obs, clusters, cluster_p_values, H0 = clu = \\\n spatio_temporal_cluster_1samp_test(X, adjacency=adjacency, n_jobs=1,\n threshold=t_threshold, buffer_size=None,\n verbose=True)\n# Now select the clusters that are sig. at p < 0.05 (note that this value\n# is multiple-comparisons corrected).\ngood_cluster_inds = np.where(cluster_p_values < 0.05)[0]", "Visualize the clusters", "print('Visualizing clusters.')\n\n# Now let's build a convenient representation of each cluster, where each\n# cluster becomes a \"time point\" in the SourceEstimate\nstc_all_cluster_vis = summarize_clusters_stc(clu, tstep=tstep,\n vertices=fsave_vertices,\n subject='fsaverage')\n\n# Let's actually plot the first \"time point\" in the SourceEstimate, which\n# shows all the clusters, weighted by duration.\nsubjects_dir = op.join(data_path, 'subjects')\n# blue blobs are for condition A < condition B, red for A > B\nbrain = stc_all_cluster_vis.plot(\n hemi='both', views='lateral', subjects_dir=subjects_dir,\n time_label='temporal extent (ms)', size=(800, 800),\n smoothing_steps=5, clim=dict(kind='value', pos_lims=[0, 1, 40]))\n# brain.save_image('clusters.png')" ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
e-sr/SDWirkungNi
DSP/auswertungLS.ipynb
cc0-1.0
[ "Auswertung der Lichtschrankesignal (LS)\nDate: october 2015\nAuthor: ESR\n\nZiel dieser analyse ist folgende Grössen abzuschätzen:\n\nmittelere Geschwindigkeit der Zug\nAnderung in der Geschwindigkeit\nDuchfahrtszeit jedes Drehgestell\n\n\nNotwendige Modulen", "%reset -f\n%matplotlib notebook\n%load_ext autoreload\n%autoreload 1\n%aimport functions\n# %qtconsole\nimport numpy as np\nimport acoustics\nfrom functions import *\nimport matplotlib.pyplot as plt\nimport matplotlib as mpl\nimport seaborn as sns\nmpl.rcParams['lines.linewidth']=0.5", "Daten und Signale Importieren", "%%capture c\nimport json\npassby = json.load(open('Tabellen\\passby.json','r+'))\nfill_passby_with_signals(passby)", "Vorbereitung\nAuswahl Vorbeifahrt und Abschnitt\n\nAuswahl der Vorbeifahrt. Insgesamt haben wir die folgende passby IDs:", "print('passby IDs:', list(passby.keys()))", "Auswahl einer Abschnitt mit Lichtschranke:\nQ1, Q4", "E = passby['14']['Q4']\n#\nprint('Signal ID(with corresponding .mat file):', E['ID'])\nLSignals = {'LS':E['signals']['LS']}", "Detektion der Durchfahrtszeiten (tPeaks) jedes Drehgestell\nWenn die LS vom Rad abgedunket wird entsteht im Signal ein Peak. Damit lassen sich die Durchfahrtszeiten jedes drehgestell abschätzen. Die Funktion detect_weel_times implementiert die Berechnung.", "tPeaks = detect_weel_times(LSignals['LS'], decimation = 8 )", "das Resultat ist in den nächsten Bild zu sehen", "f,ax = plt.subplots()\nLSignals['LS'].plot(ax=ax)\nfor tp in tPeaks:\n ax.axvline(tp,color='red',alpha=0.5)\nax.set_xbound(tPeaks.min()-0.1, tPeaks.max()+0.1)", "Mittelere und Änderung der Vorbeifahrtsgeschwindigkeit\nDie Abschätzung erfolgt in zwei schritte und ist im train_speed funktion implementiert:\n\naus tPeaks lässt sich mithilfe der Abstand der Axen im Drehgestell die Geschwindigkeit jeder Drehgestell abschätzen.\nDann kann man mittels eine regression (robuste regression um ausreisser wenig zu gewichten) die mittelere geschwindigkeit und die Änderung der Vorbeifahrtsgeschwindigkeit abgeschätzt werden\n\nEine Abbildung der Resultate is unten zu sehen", "_,_,_ = train_speed(tPeaks, axleDistance=2, plot=True)" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
turbomanage/training-data-analyst
courses/machine_learning/deepdive2/image_classification/solutions/2_mnist_models.ipynb
apache-2.0
[ "MNIST Image Classification with TensorFlow on Cloud ML Engine\nThis notebook demonstrates how to implement different image models on MNIST using the tf.keras API.\nLearning Objectives\n\nUnderstand how to build a Dense Neural Network (DNN) for image classification\nUnderstand how to use dropout (DNN) for image classification\nUnderstand how to use Convolutional Neural Networks (CNN)\nKnow how to deploy and use an image classifcation model using Google Cloud's AI Platform\n\nFirst things first. Configure the parameters below to match your own Google Cloud project details.", "from datetime import datetime\nimport os\n\nPROJECT = \"your-project-id-here\" # REPLACE WITH YOUR PROJECT ID\nBUCKET = \"your-bucket-id-here\" # REPLACE WITH YOUR BUCKET NAME\nREGION = \"us-central1\" # REPLACE WITH YOUR BUCKET REGION e.g. us-central1\n\n# Do not change these\nos.environ[\"PROJECT\"] = PROJECT\nos.environ[\"BUCKET\"] = BUCKET\nos.environ[\"REGION\"] = REGION\nos.environ[\"IMAGE_URI\"] = os.path.join(\"gcr.io\", PROJECT, \"mnist_models\")", "Building a dynamic model\nIn the previous notebook, <a href=\"mnist_linear.ipynb\">mnist_linear.ipynb</a>, we ran our code directly from the notebook. In order to run it on the AI Platform, it needs to be packaged as a python module.\nThe boilerplate structure for this module has already been set up in the folder mnist_models. The module lives in the sub-folder, trainer, and is designated as a python package with the empty __init__.py (mnist_models/trainer/__init__.py) file. It still needs the model and a trainer to run it, so let's make them.\nLet's start with the trainer file first. This file parses command line arguments to feed into the model.", "%%writefile mnist_models/trainer/task.py\nimport argparse\nimport json\nimport os\nimport sys\n\nfrom . import model\n\n\ndef _parse_arguments(argv):\n \"\"\"Parses command-line arguments.\"\"\"\n parser = argparse.ArgumentParser()\n parser.add_argument(\n '--model_type',\n help='Which model type to use',\n type=str, default='linear')\n parser.add_argument(\n '--epochs',\n help='The number of epochs to train',\n type=int, default=10)\n parser.add_argument(\n '--steps_per_epoch',\n help='The number of steps per epoch to train',\n type=int, default=100)\n parser.add_argument(\n '--job-dir',\n help='Directory where to save the given model',\n type=str, default='mnist_models/')\n return parser.parse_known_args(argv)\n\n\ndef main():\n \"\"\"Parses command line arguments and kicks off model training.\"\"\"\n args = _parse_arguments(sys.argv[1:])[0]\n\n # Configure path for hyperparameter tuning.\n trial_id = json.loads(\n os.environ.get('TF_CONFIG', '{}')).get('task', {}).get('trial', '')\n output_path = args.job_dir if not trial_id else args.job_dir + '/'\n\n model_layers = model.get_layers(args.model_type)\n image_model = model.build_model(model_layers, args.job_dir)\n model_history = model.train_and_evaluate(\n image_model, args.epochs, args.steps_per_epoch, args.job_dir)\n\n\nif __name__ == '__main__':\n main()\n", "Next, let's group non-model functions into a util file to keep the model file simple. We'll copy over the scale and load_dataset functions from the previous lab.", "%%writefile mnist_models/trainer/util.py\nimport tensorflow as tf\n\n\ndef scale(image, label):\n \"\"\"Scales images from a 0-255 int range to a 0-1 float range\"\"\"\n image = tf.cast(image, tf.float32)\n image /= 255\n image = tf.expand_dims(image, -1)\n return image, label\n\n\ndef load_dataset(\n data, training=True, buffer_size=5000, batch_size=100, nclasses=10):\n \"\"\"Loads MNIST dataset into a tf.data.Dataset\"\"\"\n (x_train, y_train), (x_test, y_test) = data\n x = x_train if training else x_test\n y = y_train if training else y_test\n # One-hot encode the classes\n y = tf.keras.utils.to_categorical(y, nclasses)\n dataset = tf.data.Dataset.from_tensor_slices((x, y))\n dataset = dataset.map(scale).batch(batch_size)\n if training:\n dataset = dataset.shuffle(buffer_size).repeat()\n return dataset\n", "Finally, let's code the models! The tf.keras API accepts an array of layers into a model object, so we can create a dictionary of layers based on the different model types we want to use. The below file has two functions: get_layers and create_and_train_model. We will build the structure of our model in get_layers. Last but not least, we'll copy over the training code from the previous lab into train_and_evaluate.\nTODO 1: Define the Keras layers for a DNN model \nTODO 2: Define the Keras layers for a dropout model\nTODO 3: Define the Keras layers for a CNN model \nHint: These models progressively build on each other. Look at the imported tensorflow.keras.layers modules and the default values for the variables defined in get_layers for guidance.", "%%writefile mnist_models/trainer/model.py\nimport os\nimport shutil\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport tensorflow as tf\nfrom tensorflow.keras import Sequential\nfrom tensorflow.keras.callbacks import TensorBoard\nfrom tensorflow.keras.layers import (\n Conv2D, Dense, Dropout, Flatten, MaxPooling2D, Softmax)\n\nfrom . import util\n\n\n# Image Variables\nWIDTH = 28\nHEIGHT = 28\n\n\ndef get_layers(\n model_type,\n nclasses=10,\n hidden_layer_1_neurons=400,\n hidden_layer_2_neurons=100,\n dropout_rate=0.25,\n num_filters_1=64,\n kernel_size_1=3,\n pooling_size_1=2,\n num_filters_2=32,\n kernel_size_2=3,\n pooling_size_2=2):\n \"\"\"Constructs layers for a keras model based on a dict of model types.\"\"\"\n model_layers = {\n 'linear': [\n Flatten(),\n Dense(nclasses),\n Softmax()\n ],\n 'dnn': [\n Flatten(),\n Dense(hidden_layer_1_neurons, activation='relu'),\n Dense(hidden_layer_2_neurons, activation='relu'),\n Dense(nclasses),\n Softmax()\n ],\n 'dnn_dropout': [\n Flatten(),\n Dense(hidden_layer_1_neurons, activation='relu'),\n Dense(hidden_layer_2_neurons, activation='relu'),\n Dropout(dropout_rate),\n Dense(nclasses),\n Softmax()\n ],\n 'cnn': [\n Conv2D(num_filters_1, kernel_size=kernel_size_1,\n activation='relu', input_shape=(WIDTH, HEIGHT, 1)),\n MaxPooling2D(pooling_size_1),\n Conv2D(num_filters_2, kernel_size=kernel_size_2,\n activation='relu'),\n MaxPooling2D(pooling_size_2),\n Flatten(),\n Dense(hidden_layer_1_neurons, activation='relu'),\n Dense(hidden_layer_2_neurons, activation='relu'),\n Dropout(dropout_rate),\n Dense(nclasses),\n Softmax()\n ]\n }\n return model_layers[model_type]\n\n\ndef build_model(layers, output_dir):\n \"\"\"Compiles keras model for image classification.\"\"\"\n model = Sequential(layers)\n model.compile(optimizer='adam',\n loss='categorical_crossentropy',\n metrics=['accuracy'])\n return model\n\n\ndef train_and_evaluate(model, num_epochs, steps_per_epoch, output_dir):\n \"\"\"Compiles keras model and loads data into it for training.\"\"\"\n mnist = tf.keras.datasets.mnist.load_data()\n train_data = util.load_dataset(mnist)\n validation_data = util.load_dataset(mnist, training=False)\n\n callbacks = []\n if output_dir:\n tensorboard_callback = TensorBoard(log_dir=output_dir)\n callbacks = [tensorboard_callback]\n\n history = model.fit(\n train_data,\n validation_data=validation_data,\n epochs=num_epochs,\n steps_per_epoch=steps_per_epoch,\n verbose=2,\n callbacks=callbacks)\n\n if output_dir:\n export_path = os.path.join(output_dir, 'keras_export')\n model.save(export_path, save_format='tf')\n\n return history\n", "Local Training\nWith everything set up, let's run locally to test the code. Some of the previous tests have been copied over into a testing script mnist_models/trainer/test.py to make sure the model still passes our previous checks. On line 13, you can specify which model types you would like to check. line 14 and line 15 has the number of epochs and steps per epoch respectively.\nMoment of truth! Run the code below to check your models against the unit tests. If you see \"OK\" at the end when it's finished running, congrats! You've passed the tests!", "!python3 -m mnist_models.trainer.test", "Now that we know that our models are working as expected, let's run it on the Google Cloud AI Platform. We can run it as a python module locally first using the command line.\nThe below cell transfers some of our variables to the command line as well as create a job directory including a timestamp. This is where our model and tensorboard data will be stored.", "current_time = datetime.now().strftime(\"%y%m%d_%H%M%S\")\nmodel_type = 'cnn'\n\nos.environ[\"MODEL_TYPE\"] = model_type\nos.environ[\"JOB_DIR\"] = \"mnist_models/models/{}_{}/\".format(\n model_type, current_time)", "The cell below runs the local version of the code. The epochs and steps_per_epoch flag can be changed to run for longer or shorther, as defined in our mnist_models/trainer/task.py file.", "%%bash\npython3 -m mnist_models.trainer.task \\\n --job-dir=$JOB_DIR \\\n --epochs=5 \\\n --steps_per_epoch=50 \\\n --model_type=$MODEL_TYPE", "Training on the cloud\nSince we're using an unreleased version of TensorFlow on AI Platform, we can instead use a Deep Learning Container in order to take advantage of libraries and applications not normally packaged with AI Platform. Below is a simple Dockerlife which copies our code to be used in a TF2 environment.", "%%writefile mnist_models/Dockerfile\nFROM gcr.io/deeplearning-platform-release/tf2-cpu\nCOPY mnist_models/trainer /mnist_models/trainer\nENTRYPOINT [\"python3\", \"-m\", \"mnist_models.trainer.task\"]", "The below command builds the image and ships it off to Google Cloud so it can be used for AI Platform. When built, it will show up here with the name mnist_models. (Click here to enable Cloud Build)", "!docker build -f mnist_models/Dockerfile -t $IMAGE_URI ./\n\n!docker push $IMAGE_URI", "Finally, we can kickoff the AI Platform training job. We can pass in our docker image using the master-image-uri flag.", "current_time = datetime.now().strftime(\"%y%m%d_%H%M%S\")\nmodel_type = 'cnn'\n\nos.environ[\"MODEL_TYPE\"] = model_type\nos.environ[\"JOB_DIR\"] = \"gs://{}/mnist_{}_{}/\".format(\n BUCKET, model_type, current_time)\nos.environ[\"JOB_NAME\"] = \"mnist_{}_{}\".format(\n model_type, current_time)\n\n%%bash\necho $JOB_DIR $REGION $JOB_NAME\ngcloud ai-platform jobs submit training $JOB_NAME \\\n --staging-bucket=gs://$BUCKET \\\n --region=$REGION \\\n --master-image-uri=$IMAGE_URI \\\n --scale-tier=BASIC_GPU \\\n --job-dir=$JOB_DIR \\\n -- \\\n --model_type=$MODEL_TYPE", "Can't wait to see the results? Run the code below and copy the output into the Google Cloud Shell to follow along with TensorBoard. Look at the web preview on port 6006.", "!echo \"tensorboard --logdir $JOB_DIR\"", "Deploying and predicting with model\nOnce you have a model you're proud of, let's deploy it! All we need to do is give AI Platform the location of the model. Below uses the keras export path of the previous job, but ${JOB_DIR}keras_export/ can always be changed to a different path.\nEven though we're using a 1.14 runtime, it's compatable with TF2 exported models. Phew!\nUncomment the delete commands below if you are getting an \"already exists error\" and want to deploy a new model.", "%%bash\nMODEL_NAME=\"mnist\"\nMODEL_VERSION=${MODEL_TYPE}\nMODEL_LOCATION=${JOB_DIR}keras_export/\necho \"Deleting and deploying $MODEL_NAME $MODEL_VERSION from $MODEL_LOCATION ... this will take a few minutes\"\n#yes | gcloud ai-platform versions delete ${MODEL_VERSION} --model ${MODEL_NAME}\n#yes | gcloud ai-platform models delete ${MODEL_NAME}\ngcloud ai-platform models create ${MODEL_NAME} --regions $REGION\ngcloud ai-platform versions create ${MODEL_VERSION} \\\n --model ${MODEL_NAME} \\\n --origin ${MODEL_LOCATION} \\\n --framework tensorflow \\\n --runtime-version=1.14", "To predict with the model, let's take one of the example images.\nTODO 4: Write a .json file with image data to send to an AI Platform deployed model", "import json, codecs\nimport tensorflow as tf\nimport matplotlib.pyplot as plt\nfrom mnist_models.trainer import util\n\nHEIGHT = 28\nWIDTH = 28\nIMGNO = 12\n\nmnist = tf.keras.datasets.mnist.load_data()\n(x_train, y_train), (x_test, y_test) = mnist\ntest_image = x_test[IMGNO]\n\njsondata = test_image.reshape(HEIGHT, WIDTH, 1).tolist()\njson.dump(jsondata, codecs.open(\"test.json\", \"w\", encoding = \"utf-8\"))\nplt.imshow(test_image.reshape(HEIGHT, WIDTH));", "Finally, we can send it to the prediction service. The output will have a 1 in the index of the corresponding digit it is predicting. Congrats! You've completed the lab!", "%%bash\ngcloud ai-platform predict \\\n --model=mnist \\\n --version=${MODEL_TYPE} \\\n --json-instances=./test.json", "Copyright 2019 Google Inc.\nLicensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with the License. You may obtain a copy of the License at\nhttp://www.apache.org/licenses/LICENSE-2.0\nUnless required by applicable law or agreed to in writing, software distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License." ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
Nikolay-Lysenko/presentations
endogeneity/treatment_effect_with_selection_on_unobservables.ipynb
mit
[ "Introduction\nProblem Description\nData-driven approaches are now used in many fields from business to science. Since data storage and computational power has become cheap, machine learning has gained popularity. However, the majority of tools that can extract dependencies from data, are designed for prediction problem. In this notebook, a problem of decision support simulation is considered and it is shown that even good predictive models can lead to wrong conclusions. This occurs under some conditions summarized by an umbrella term called endogeneity. Its particular cases are as follows:\n* An important variable is omitted;\n* Variables that are used as features are measured with biases;\n* There is simultaneous or reverse causality between a target variable and some features.\nHere, important variable omission is a root of a trouble.\nSuppose that situation is as follows. There is a freshly-hired manager that can assign treatment to items in order to increase target metric. Treatment is binary, i.e. for each item it is assigned or it is absent. Because treatment costs something, its assignment should be optimized — only some items should be treated. A historical dataset of items performance is given, but the manager does not know that previously treatment was assigned predominantely based on values of just one parameter. Moreover, this parameter is not included in the dataset. By the way, the manager wants to create a system that predicts an item's target metric in case of treatment and in case of absence of treatment. If this system is deployed, the manager can compare these two cases and decide whether effect of treatment worths its costs.\nIf machine learning approach results in good prediction scores, chances are that the manager does not suspect that important variable is omitted (at least until some expenses are generated by wrong decisions). Hence, domain knowledge and data understanding are still required for modelling based on data. This is of particular importance when datasets contain values that are produced by someone's decisions, because there is no guarantee that future decisions will not change dramatically. On the flip side, if all factors that affect decisions are included in a dataset, i.e., there is selection on observables for treatment assignment, a powerful enough model is able to estimate treatment effect correctly (but accuracy of predictions still does not ensure causal relationships detection).\nReferences\nTo read more about causality in data analysis, it is possible to look at these papers:\n\n\nAngrist J, Pischke J-S. Mostly Harmless Econometrics. Princeton University Press, 2009.\n\n\nVarian H. Big Data: New Tricks for Econometrics. Journal of Economic Perspectives, 28(2): 3–28, 2013\n\n\nPreparations\nGeneral", "from itertools import combinations\n\nimport matplotlib.pyplot as plt\n%matplotlib inline\n\nimport numpy as np\n\nfrom sklearn.model_selection import train_test_split, KFold, GridSearchCV\nfrom sklearn.metrics import r2_score\nfrom sklearn.linear_model import LinearRegression\n\n# Startup settings can not suppress a warning from `XGBRegressor` and so this is needed.\nimport warnings\nwith warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n from xgboost import XGBRegressor\n\nnp.random.seed(seed=361)", "Synthetic Dataset Generation\nLet us generate an unobserved parameter and an indicator of treatment such that they are highly correlated.", "unobserved = np.hstack((np.ones(10000), np.zeros(10000)))\ntreatment = np.hstack((np.ones(9000), np.zeros(10000), np.ones(1000)))\n\nnp.corrcoef(unobserved, treatment)", "Now create historical dataset that is used for learning predictive model.", "def synthesize_dataset(unobserved, treatment,\n given_exogenous=None, n_exogenous_to_draw=2,\n weights_matrix=np.array([[5, 0, 0, 0],\n [0, 1, 1, 0],\n [0, 1, 2, 1],\n [0, 0, 1, 3]])):\n \"\"\"\n A helper function for repetitive\n pieces of code.\n \n Creates a dataset, where target depends on\n `unobserved`, but `unobserved` is not\n included as a feature. Independent features\n can be passed as `given_exogenous` as well as\n be drawn from Gaussian distribution.\n \n Target is generated as linear combination of\n features and their interactions in the\n following manner. Order features as below:\n unobserved variable, treatment indicator,\n given exogenous features, drawn exogenous\n features. Then the (i, i)-th element of\n `weights_matrix` defines coefficient of\n the i-th feature, whereas the (i, j)-th\n element of `weights_matrix` (where i != j)\n defines coefficient of interaction between\n the i-th and j-th features.\n\n @type unobserved: numpy.ndarray\n @type treatment: numpy.ndarray\n @type given_exogenous: numpy.ndarray\n @type n_exogenous_to_draw: int\n @type weights_matrix: numpy.ndarray\n @rtype: tuple(numpy.ndarray)\n \"\"\"\n\n if unobserved.shape != treatment.shape:\n raise ValueError(\"`unobserved` and `treatment` are not aligned.\")\n if (given_exogenous is not None and\n unobserved.shape[0] != given_exogenous.shape[0]):\n raise ValueError(\"`unobserved` and `given_exogenous` are not \" +\n \"aligned. Try to transpose `given_exogenous`.\")\n if weights_matrix.shape[0] != weights_matrix.shape[1]:\n raise ValueError(\"Matrix of weights is not square.\")\n if not np.array_equal(weights_matrix, weights_matrix.T):\n raise ValueError(\"Matrix of weigths is not symmetric.\")\n len_of_given = given_exogenous.shape[1] if given_exogenous is not None else 0\n if 2 + len_of_given + n_exogenous_to_draw != weights_matrix.shape[0]:\n raise ValueError(\"Number of weights is not equal to that of features.\")\n\n drawn_features = []\n for i in range(n_exogenous_to_draw):\n current_feature = np.random.normal(size=unobserved.shape[0])\n drawn_features.append(current_feature)\n if given_exogenous is None:\n features = np.vstack([unobserved, treatment] + drawn_features).T\n else:\n features = np.vstack([unobserved, treatment, given_exogenous.T] +\n drawn_features).T\n target = np.dot(features, weights_matrix.diagonal())\n indices = list(range(weights_matrix.shape[0]))\n interactions = [weights_matrix[i, j] * features[:, i] * features[:, j]\n for i, j in combinations(indices, 2)]\n target = np.sum(np.vstack([target] + interactions), axis=0)\n return features[:, 1:], target\n\nlearning_X, learning_y = synthesize_dataset(unobserved, treatment)", "Now create two datasets for simulation where the only difference between them is that in the first one treatment is absent and in the second one treatment is assigned to all items.", "unobserved = np.hstack((np.ones(2500), np.zeros(2500)))\n\nno_treatment = np.zeros(5000)\nfull_treatment = np.ones(5000)\n\nno_treatment_X, no_treatment_y = synthesize_dataset(unobserved, no_treatment)\nfull_treatment_X, full_treatment_y = synthesize_dataset(unobserved, full_treatment,\n no_treatment_X[:, 1:], 0)", "Look at the data that are used for simulation.", "no_treatment_X[:5, :]\n\nfull_treatment_X[:5, :]\n\nno_treatment_y[:5]\n\nfull_treatment_y[:5]", "Good Model...", "X_train, X_test, y_train, y_test = train_test_split(learning_X, learning_y,\n random_state=361)\nX_train.shape, X_test.shape, y_train.shape, y_test.shape\n\ndef tune_inform(X_train, y_train, rgr, grid_params, kf, scoring):\n \"\"\"\n Just a helper function that combines\n all routines related to grid search.\n \n @type X_train: numpy.ndarray\n @type y_train: numpy.ndarray\n @type rgr: any sklearn regressor\n @type grid_params: dict\n @type kf: any sklearn folds\n @type scoring: str\n @rtype: sklearn regressor\n \"\"\"\n grid_search_cv = GridSearchCV(rgr, grid_params, cv=kf,\n scoring=scoring)\n grid_search_cv.fit(X_train, y_train)\n print(\"Best CV mean score: {}\".format(grid_search_cv.best_score_))\n means = grid_search_cv.cv_results_['mean_test_score']\n stds = grid_search_cv.cv_results_['std_test_score']\n print(\"Detailed results:\")\n for mean, std, params in zip(means, stds,\n grid_search_cv.cv_results_['params']):\n print(\"%0.3f (+/-%0.03f) for %r\" % (mean, 2 * std, params))\n return grid_search_cv.best_estimator_\n\nrgr = LinearRegression()\ngrid_params = {'fit_intercept': [True, False]}\nkf = KFold(n_splits=5, shuffle=True, random_state=361)", "Let us use coefficient of determination as a scorer rather than MSE. Actually, they are linearly dependent: $R^2 = 1 - \\frac{MSE}{\\mathrm{Var}(y)}$, but coefficient of determination is easier to interpret.", "rgr = tune_inform(X_train, y_train, rgr, grid_params, kf, 'r2')\n\ny_hat = rgr.predict(X_test)\nr2_score(y_test, y_hat)", "Although true relationship is non-linear, predictive power of linear regression is good. This is indicated by close to 1 coefficient of determination. Since the winner is model with intercept, its score can be interpreted as follows — the model explains almost all variance of the target around its mean (note that such interpretation can not be used for a model without intercept).", "rgr = XGBRegressor()\ngrid_params = {'n_estimators': [50, 100, 200, 300],\n 'max_depth': [3, 5],\n 'subsample': [0.8, 1]}\nkf = KFold(n_splits=5, shuffle=True, random_state=361)\n\nrgr = tune_inform(X_train, y_train, rgr, grid_params, kf, 'r2')", "It looks like almost all combinations of hyperparameters result in error that is close to irreducible error caused by mismatches between the indicator of treatment and the omitted variable.", "y_hat = rgr.predict(X_test)\nr2_score(y_test, y_hat)", "The score is even closer to 1 than in case of linear model. Decent result deceptively motivates to think that all important variables are included in the model.\n...and Poor Simulation", "no_treatment_y_hat = rgr.predict(no_treatment_X)\nr2_score(no_treatment_y, no_treatment_y_hat)\n\nfull_treatment_y_hat = rgr.predict(full_treatment_X)\nr2_score(full_treatment_y, full_treatment_y_hat)", "And now scores are not perfect, are they?", "fig = plt.figure(figsize=(14, 7))\n\nax_one = fig.add_subplot(121)\nax_one.scatter(no_treatment_y_hat, no_treatment_y)\nax_one.set_title(\"Simulation of absence of treatment\")\nax_one.set_xlabel(\"Predicted values\")\nax_one.set_ylabel(\"True values\")\nax_one.grid()\n\nax_two = fig.add_subplot(122, sharey=ax_one)\nax_two.scatter(full_treatment_y_hat, full_treatment_y)\nax_two.set_title(\"Simulation of treatment\")\nax_two.set_xlabel(\"Predicted values\")\nax_two.set_ylabel(\"True values\")\n_ = ax_two.grid()", "It can be seen that effect of treatment is overestimated. In case of absence of treatment, for items with unobserved feature equal to 1, predictions are significantly less than true values. To be more precise, the differences are close to coefficient near unobserved feature in weights_matrix passed to the dataset creation. Similarly, in case of full treatment, for items with unobserved feature equal to 0, predictions are higher than true values and the differences are close to the abovementioned coefficient too.\nFinally, let us simulate a wrong decision that the manager can make. Suppose that treatment costs one dollar per item and every unit increase in the target variable leads to creation of value that is equal to one dollar too.", "estimated_effects = full_treatment_y_hat - no_treatment_y_hat\ntrue_effects = full_treatment_y - no_treatment_y\n\nnp.min(estimated_effects)", "The model recommends to treat all items. What happens if all of them are treated?", "cost_of_one_treatment = 1\n\nestimated_net_improvement = (np.sum(estimated_effects) -\n cost_of_one_treatment * estimated_effects.shape[0])\nestimated_net_improvement\n\ntrue_net_improvement = (np.sum(true_effects) -\n cost_of_one_treatment * true_effects.shape[0])\ntrue_net_improvement", "Suddenly, the manager will have small loss instead of solid profit.\nConclusion\nIt has been shown that formal metrics used in model evaluation may not reflect all sides of a problem. Sometimes, learning sample is biased and is not similar to samples that require predictions. This often occurs when machine learning affects decisions and new decisions differs from those that have been made for objects in the learning sample. \nThe described case might look too artificial, but below are two real-world examples of similar issues:\n\n\nThe goal is to train neural network to detect certain phrase in recorded speech. Assume that, unfortunately, all occurrences of the phrase in the learning sample are recorded by the same microphone in the same room and this flaw is not known. As a result, variable that indicates occurrence of the phrase and omitted variable that indicates used microphone, are confound. Neural network does not understand which target to learn and so produces good results during cross-validation and on hold-out test set, but poor results in a production environment.\n\n\nIn labor econometrics, a problem of estimation higher education effect on wages is well-studied. A subtle issue here is that people with higher abilities have more willingness to earn degrees and also they have more chances to have bigger salaries. Abilities are unobservable variable and, in naive models, their effect is attributed mainly to higher education, because there is strong correlation between these variables. Hence, naive modelling leads to overestimation of higher education effect. \n\n\nProbably, sections of the notebook that illustrate ways to mitigate the consequences of lack of important unobserved variables, will be released after some time." ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
briennakh/BIOF509
Wk05/Wk05-OOP-Public-interface.ipynb
mit
[ "Week 5 - Crafting the public interface.\nLearning Objectives\n\nExplain what a public interface is\nDiscuss the advantages of defining a public interface\nCompare different public interfaces\nDesign a simple public interface\n\nInheritance\nLast week we looked at inheritance, building a general class that we could then extend with additional functionality for special situations. \nEach of the classes we create inheriting from our general class can be thought of as having a 'is-a' relationship with the general class. For example, looking at our Item example from last week Equipment is a Item, Consumable is a Item.", "class Item(object):\n \n \n def __init__(self, name, description, location):\n self.name = name\n self.description = description\n self.location = location\n \n \n def update_location(self, new_location):\n pass\n\nclass Equipment(Item):\n pass\n\nclass Consumable(Item):\n \n \n def __init__(self, name, description, location, initial_quantity, current_quantity, storage_temp, flammability):\n self.name = name\n self.description = description\n self.location = location\n self.initial_quantity = initial_quantity\n self.current_quantity = current_quantity\n self.flammability = flammability\n \n \n def update_quantity_remaining(self, amount):\n pass", "Composition\nIn week 3 we took example projects and broke them down into a collection of different classes. Many of you chose the cookbook example for the assignment and questioned whether things like ingredients should be attributes on the recipe class or classes in their own right. Often the answer is both. These are the interactions that change a collection of different classes into a functioning program. This is called composition. The Recipe object is a composite object, it has ingredients, it has instructions, etc.\nThis week we will look at how we can design our classes to be easy to use, for both programmer-class and class-class interactions.", "class Ingredient(object):\n \"\"\"The ingredient object that contains nutritional information\"\"\"\n \n def __init__(self, name, carbs, protein, fat):\n self.name = name\n self.carbs = carbs\n self.protein = protein\n self.fat = fat\n \n \n def get_nutrition(self):\n \"\"\"Returns the nutritional information for the ingredient\"\"\"\n return (self.carbs, self.protein, self.fat)\n \n\nclass Recipe(object):\n \"\"\"The Recipe object containing the ingredients\"\"\"\n \n def __init__(self, name, ingredients):\n self.name = name\n self.ingredients = ingredients\n \n \n def get_nutrition(self):\n \"\"\"Returns the nutritional information for the recipe\"\"\"\n nutrition = [0, 0, 0]\n for amount, ingredient in self.ingredients:\n nutrition[0] += amount * ingredient.carbs\n nutrition[1] += amount * ingredient.protein\n nutrition[2] += amount * ingredient.fat\n return nutrition\n \n\nbread = Recipe('Bread', [(820, Ingredient('Flour', 0.77, 0.10, 0.01)), \n (30, Ingredient('Oil', 0, 0, 1)), \n (36, Ingredient('Sugar', 1, 0, 0)), \n (7, Ingredient('Yeast', 0.3125, 0.5, 0.0625)),\n (560, Ingredient('Water', 0, 0, 0))])\nprint(bread.ingredients)\nprint(bread.get_nutrition())", "This has the basic functionality implemented but there are some improvements we can make. \nBefore we look at making changes we can seek inspiration. Requests and Pandas are two packages well regarded for having well implemented interfaces.\nRequests: HTTP for Humans\nRequests is a package used for making HTTP requests. There are options in the python standard library for making http requests but they can seem difficult to use.", "import requests\n\nr = requests.get('https://api.github.com/repos/streety/biof509/events')\nprint(r.status_code)\nprint(r.headers['content-type'])\n\nprint(r.text[:1000])\n\nprint(r.json()[0]['payload']['commits'][0]['message'])\n\ntype(r)", "The API documentation for requests\nThe Response class\nSome useful features:\n\nproperty\n\nPandas\npandas is an open source, BSD-licensed library providing high-performance, easy-to-use data structures and \ndata analysis tools for the Python programming language.", "import pandas as pd\n\ndata = pd.DataFrame([[0,1,2,3], [4,5,6,7], [8,9,10,11]], index=['a', 'b', 'c'], columns=['col1', 'col2', 'col3', 'col4'])\ndata\n\nprint(data.shape)\n\nprint(data['col1'])\nprint(data.col1)\n\nimport matplotlib.pyplot as plt\n\n%matplotlib inline\n\ndata.plot()\n\ndata.to_csv('Wk05-temp.csv')\n\ndata2 = pd.read_csv('Wk05-temp.csv', index_col=0)\nprint(data2)", "The API documentation for the DataFrame object.\nThe actual code.\nSome useful features:\n* classmethod\n* property\n* __getitem__\n* Public and private attributes/methods\n* __getattr__\nCookbook\nWe can now return to our cookbook example.\nDisplaying the ingredients needs to be improved.", "class Ingredient(object):\n \"\"\"The ingredient object that contains nutritional information\"\"\"\n \n def __init__(self, name, carbs, protein, fat):\n self.name = name\n self.carbs = carbs\n self.protein = protein\n self.fat = fat\n \n \n def __repr__(self):\n return 'Ingredient({0}, {1}, {2}, {3})'.format(self.name, self.carbs, self.protein, self.fat)\n \n \n def get_nutrition(self):\n \"\"\"Returns the nutritional information for the ingredient\"\"\"\n return (self.carbs, self.protein, self.fat)\n \n\nclass Recipe(object):\n \"\"\"The Recipe object containing the ingredients\"\"\"\n \n def __init__(self, name, ingredients):\n self.name = name\n self.ingredients = ingredients\n \n \n def get_nutrition(self):\n \"\"\"Returns the nutritional information for the recipe\"\"\"\n nutrition = [0, 0, 0]\n for amount, ingredient in self.ingredients:\n nutrition[0] += amount * ingredient.carbs\n nutrition[1] += amount * ingredient.protein\n nutrition[2] += amount * ingredient.fat\n return nutrition\n \n\nbread = Recipe('Bread', [(820, Ingredient('Flour', 0.77, 0.10, 0.01)), \n (30, Ingredient('Oil', 0, 0, 1)), \n (36, Ingredient('Sugar', 1, 0, 0)), \n (7, Ingredient('Yeast', 0.3125, 0.5, 0.0625)),\n (560, Ingredient('Water', 0, 0, 0))])\nprint(bread.ingredients)\nprint(bread.get_nutrition())", "Viewing the ingredients now looks much better. Let's now look at the get_nutrition method.\nThere are still a number of areas that could be improved\n\nWhen we call get_nutrition it is not clear what the different values returned actually are\nWe don't use the get_nutrition method when calculating the nutrition values in the Recipe class\nThere is no way to add additional types of nutrient\nIngredient and Recipe return different types from get_nutrition, tuple and list respectively\nRecipe could not be used as an ingredient for another Recipe", "class Ingredient(object):\n \"\"\"The ingredient object that contains nutritional information\"\"\"\n \n def __init__(self, name, carbs, protein, fat):\n self.name = name\n self.carbs = carbs\n self.protein = protein\n self.fat = fat\n \n \n def __repr__(self):\n return 'Ingredient({0}, {1}, {2}, {3})'.format(self.name, self.carbs, self.protein, self.fat)\n \n \n def get_nutrition(self):\n \"\"\"Returns the nutritional information for the ingredient\"\"\"\n return (self.carbs, self.protein, self.fat)\n \n\nclass Recipe(object):\n \"\"\"The Recipe object containing the ingredients\"\"\"\n \n def __init__(self, name, ingredients):\n self.name = name\n self.ingredients = ingredients\n \n \n def get_nutrition(self):\n \"\"\"Returns the nutritional information for the recipe\"\"\"\n nutrition = [0, 0, 0]\n for amount, ingredient in self.ingredients:\n nutrition[0] += amount * ingredient.carbs\n nutrition[1] += amount * ingredient.protein\n nutrition[2] += amount * ingredient.fat\n return nutrition\n \n\nbread = Recipe('Bread', [(820, Ingredient('Flour', 0.77, 0.10, 0.01)), \n (30, Ingredient('Oil', 0, 0, 1)), \n (36, Ingredient('Sugar', 1, 0, 0)), \n (7, Ingredient('Yeast', 0.3125, 0.5, 0.0625)),\n (560, Ingredient('Water', 0, 0, 0))])\nprint(bread.ingredients)\nprint(bread.get_nutrition())", "WSGI\nThe value of building and documenting a interface to our code is not unique to object oriented programming.\nNext week we will look at creating websites as an alternative to command line programs and GUIs. Python has a rich ecosystem of web servers and frameworks for creating web applications. Importantly, the vast majority use a common interface called WSGI. \nWSGI is based on a simple exchange. The example below use the wsgiref package for the web server with the application implemented without using external packages. Next week, we will look at some of the more commonly used web servers and use a web framework to develop a more substantial web project.", "!cat Wk05-wsgi.py", "Assignments\nModify the Ingredient and Recipe classes so that the following code works.", "class Ingredient(object):\n \"\"\"The ingredient object that contains nutritional information\"\"\"\n \n def __init__(self, name, carbs, protein, fat):\n self.name = name\n self.carbs = carbs\n self.protein = protein\n self.fat = fat\n \n \n def __repr__(self):\n return 'Ingredient({0}, {1}, {2}, {3})'.format(self.name, self.carbs, self.protein, self.fat)\n \n \n def get_nutrition(self):\n \"\"\"Returns the nutritional information for the ingredient\"\"\"\n return (self.carbs, self.protein, self.fat)\n \n\nclass Recipe(object):\n \"\"\"The Recipe object containing the ingredients\"\"\"\n \n def __init__(self, name, ingredients):\n self.name = name\n self.ingredients = ingredients\n \n \n def get_nutrition(self):\n \"\"\"Returns the nutritional information for the recipe\"\"\"\n nutrition = [0, 0, 0]\n for amount, ingredient in self.ingredients:\n nutrition[0] += amount * ingredient.carbs\n nutrition[1] += amount * ingredient.protein\n nutrition[2] += amount * ingredient.fat\n return nutrition\n\nbread = Recipe('Bread', [(820, Ingredient('Flour', 0.77, 0.10, 0.01)), \n (30, Ingredient('Oil', 0, 0, 1)), \n (36, Ingredient('Sugar', 1, 0, 0)), \n (7, Ingredient('Yeast', 0.3125, 0.5, 0.0625)),\n (560, Ingredient('Water', 0, 0, 0))])\nprint(bread.ingredients)\n# Should be roughly [(820, Ingredient(Flour, 0.77, 0.1, 0.01)), (30, Ingredient(Oil, 0, 0, 1)), \n# (36, Ingredient(Sugar, 1, 0, 0)), (7, Ingredient(Yeast, 0.3125, 0.5, 0.0625)), (560, Ingredient(Water, 0, 0, 0))]\n\nprint(bread.nutrition)\n#Should be roughly {'carbs': 669.5875, 'protein': 85.5, 'fat': 38.6375} the order is not important\n\n#Points to note:\n# - The different call to Ingredient, you can use isinstance or type to change the \n# behaviour depending on the arguments supplied\n# - Cholesterol as an extra nutrient, your implementation should accept any nutrient\n# - Use of Recipe (bread) as an ingredient\nbasic_french_toast = Recipe('Basic French Toast', [(300, Ingredient('Egg', {'carbs': 0.0077, 'protein': 0.1258, \n 'fat': 0.0994, 'cholesterol': 0.00423})), \n (0.25, bread)])\nprint(basic_french_toast.ingredients)\n# Should be roughly:\n# [(300, Ingredient(Egg, 0.0077, 0.1258, 0.0994)), (0.25, Recipe(Bread, [(820, Ingredient(Flour, 0.77, 0.1, 0.01)), \n# (30, Ingredient(Oil, 0, 0, 1)), (36, Ingredient(Sugar, 1, 0, 0)), (7, Ingredient(Yeast, 0.3125, 0.5, 0.0625)), \n# (560, Ingredient(Water, 0, 0, 0))]))]\n# Note the formatting for the Recipe object, a __repr__ method will be needed\n\nprint(basic_french_toast.nutrition)\n# Should be roughly {'protein': 59.115, 'carbs': 169.706875, 'cholesterol': 1.2690000000000001, 'fat': 39.479375000000005}\n# The order is not important" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
tensorflow/docs-l10n
site/ja/guide/estimator.ipynb
apache-2.0
[ "Copyright 2019 The TensorFlow Authors.", "#@title Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.", "Estimator\n<table class=\"tfo-notebook-buttons\" align=\"left\">\n <td> <a target=\"_blank\" href=\"https://www.tensorflow.org/guide/estimator\"><img src=\"https://www.tensorflow.org/images/tf_logo_32px.png\">TensorFlow.org で表示</a> </td>\n <td> <a target=\"_blank\" href=\"https://colab.research.google.com/github/tensorflow/docs-l10n/blob/master/site/ja/guide/estimator.ipynb\"><img src=\"https://www.tensorflow.org/images/colab_logo_32px.png\">Google Colab で実行</a> </td>\n <td> <a target=\"_blank\" href=\"https://github.com/tensorflow/docs-l10n/blob/master/site/ja/guide/estimator.ipynb\"><img src=\"https://www.tensorflow.org/images/GitHub-Mark-32px.png\">GitHub でソースを表示</a> </td>\n <td> <a href=\"https://storage.googleapis.com/tensorflow_docs/docs-l10n/site/ja/guide/estimator.ipynb\"><img src=\"https://www.tensorflow.org/images/download_logo_32px.png\">ノートブックをダウンロード</a> </td>\n</table>\n\n\n警告: 新しいコードには Estimators は推奨されません。Estimators は v1.Session スタイルのコードを実行しますが、これは正しく記述するのはより難しく、特に TF 2 コードと組み合わせると予期しない動作をする可能性があります。Estimators は、互換性保証の対象となりますが、セキュリティの脆弱性以外の修正は行われません。詳細については、移行ガイドを参照してください。\n\nこのドキュメントでは、tf.estimator という高位 TensorFlow API を紹介します。Estimator は以下のアクションをカプセル化します。\n\nトレーニング\n評価\n予測\n配信向けエクスポート\n\nTensorFlow は、事前に作成された複数の Estimator を実装します。カスタムの Estimator は依然としてサポートされていますが、主に下位互換性の対策としてサポートされているため、新しいコードでは、カスタム Estimator を使用してはいけません。事前に作成された Estimator とカスタム Estimator はすべて、tf.estimator.Estimator クラスに基づくクラスです。\n簡単な例については、Estimator チュートリアルを試してください。API デザインの概要については、ホワイトペーパーをご覧ください。\nセットアップ", "!pip install -U tensorflow_datasets\n\nimport tempfile\nimport os\n\nimport tensorflow as tf\nimport tensorflow_datasets as tfds", "メリット\ntf.keras.Model と同様に、estimator はモデルレベルの抽象です。tf.estimator は、tf.keras 向けに現在開発段階にある以下の機能を提供しています。\n\nパラメーターサーバーベースのトレーニング\nTFX の完全統合\n\nEstimator の機能\nEstimator には以下のメリットがあります。\n\nEstimator ベースのモデルは、モデルを変更することなくローカルホストまたは分散マルチサーバー環境で実行できます。さらに、モデルをコーディングし直すことなく、CPU、GPU、または TPU で実行できます。\nEstimator では、次を実行する方法とタイミングを制御する安全な分散型トレーニングループを使用できます。\nデータの読み込み\n例外の処理\nチェックポイントファイルの作成と障害からの復旧\nTensorBoard 用のサマリーの保存\n\n\n\nEstimator を使ってアプリケーションを記述する場合、データ入力パイプラインとモデルを分離する必要があります。分離することで、異なるデータセットを伴う実験を単純化することができます。\n事前作成済み Estimator を使用する\n既成の Estimator を使うと、基本の TensorFlow API より非常に高い概念レベルで作業することができます。Estimator がすべての「配管作業」を処理してくれるため、計算グラフやセッションの作成などに気を回す必要がありません。さらに、事前作成済みの Estimator では、コード変更を最小限に抑えて多様なモデルアーキテクチャを使った実験を行えます。たとえば tf.estimator.DNNClassifier は、密度の高いフィードフォワードのニューラルネットワークに基づく分類モデルをトレーニングする事前作成済みの Estimator クラスです。\n事前作成済み Estimator に依存する TensorFlow プログラムは、通常、次の 4 つのステップで構成されています。\n1. 入力関数を作成する\nたとえば、トレーニングセットをインポートする関数とテストセットをインポートする関数を作成する場合、Estimator は入力が次の 2 つのオブジェクトのペアとしてフォーマットされていることを期待します。\n\n特徴名のキーと対応する特徴データを含むテンソル(または SparseTensors)の値で構成されるディクショナリ\n1 つ以上のラベルを含むテンソル\n\ninput_fn は上記のフォーマットのペアを生成する tf.data.Dataset を返します。\nたとえば、次のコードは Titanic データセットの train.csv ファイルから tf.data.Dataset を構築します。", "def train_input_fn():\n titanic_file = tf.keras.utils.get_file(\"train.csv\", \"https://storage.googleapis.com/tf-datasets/titanic/train.csv\")\n titanic = tf.data.experimental.make_csv_dataset(\n titanic_file, batch_size=32,\n label_name=\"survived\")\n titanic_batches = (\n titanic.cache().repeat().shuffle(500)\n .prefetch(tf.data.AUTOTUNE))\n return titanic_batches", "input_fn は、tf.Graph で実行し、グラフテンソルを含む (features_dics, labels) ペアを直接返すこともできますが、定数を返すといった単純なケースではない場合に、エラーが発生しやすくなります。\n2. 特徴量カラムを定義する\ntf.feature_column は、特徴量名、その型、およびすべての入力前処理を特定します。\nたとえば、次のスニペットは 3 つの特徴量カラムを作成します。\n\n最初の特徴量カラムは、浮動小数点数の入力として直接 age 特徴量を使用します。\n2 つ目の特徴量カラムは、カテゴリカル入力として class 特徴量を使用します。\n3 つ目の特徴量カラムは、カテゴリカル入力として embark_town を使用しますが、オプションを列挙する必要がないように、またオプション数を設定するために、hashing trick を使用します。\n\n詳細については、特徴量カラムのチュートリアルをご覧ください。", "age = tf.feature_column.numeric_column('age')\ncls = tf.feature_column.categorical_column_with_vocabulary_list('class', ['First', 'Second', 'Third']) \nembark = tf.feature_column.categorical_column_with_hash_bucket('embark_town', 32)", "3. 関連する事前作成済み Estimator をインスタンス化する\nLinearClassifier という事前作成済み Estimator のインスタンス化の例を次に示します。", "model_dir = tempfile.mkdtemp()\nmodel = tf.estimator.LinearClassifier(\n model_dir=model_dir,\n feature_columns=[embark, cls, age],\n n_classes=2\n)", "詳細については、線形分類器のチュートリアルをご覧ください。\n4. トレーニング、評価、または推論メソッドを呼び出す\nすべての Estimator には、train、evaluate、および predict メソッドがあります。", "model = model.train(input_fn=train_input_fn, steps=100)\n\nresult = model.evaluate(train_input_fn, steps=10)\n\nfor key, value in result.items():\n print(key, \":\", value)\n\nfor pred in model.predict(train_input_fn):\n for key, value in pred.items():\n print(key, \":\", value)\n break", "事前作成済み Estimator のメリット\n事前作成済み Estimator は、次のようなベストプラクティスをエンコードするため、さまざまなメリットがあります。\n\nさまざまな部分の計算グラフをどこで実行するかを決定し、単一のマシンまたはクラスタに戦略を実装するためのベストプラクティス。\nイベント(要約)の書き込みと普遍的に役立つ要約のベストプラクティス。\n\n事前作成済み Estimator を使用しない場合は、上記の特徴量を独自に実装する必要があります。\nカスタム Estimator\n事前作成済みかカスタムかに関係なく、すべての Estimator の中核は、モデル関数の model_fn にあります。これは、トレーニング、評価、および予測に使用するグラフを構築するメソッドです。事前作成済み Estimator を使用する場合は、モデル関数はすでに実装されていますが、カスタム Estimator を使用する場合は、モデル関数を自分で記述する必要があります。\n\n注意: カスタム model_fn は 1.x スタイルのグラフモードでそのまま実行します。つまり、Eager execution はなく、依存関係の自動制御もないため、tf.estimator からカスタム model_fn に移行する必要があります。代替の API は tf.keras と tf.distribute です。トレーニングの一部に Estimator を使用する必要がある場合は、tf.keras.estimator.model_to_estimator コンバータを使用して keras.Model から Estimator を作成する必要があります。\n\nKeras モデルから Estimator を作成する\ntf.keras.estimator.model_to_estimator を使用して、既存の Keras モデルを Estimator に変換できます。モデルコードを最新の状態に変更したくても、トレーニングパイプラインに Estimator が必要な場合に役立ちます。\nKeras MobileNet V2 モデルをインスタンス化し、トレーニングに使用する optimizer、loss、および metrics とともにモデルをコンパイルします。", "keras_mobilenet_v2 = tf.keras.applications.MobileNetV2(\n input_shape=(160, 160, 3), include_top=False)\nkeras_mobilenet_v2.trainable = False\n\nestimator_model = tf.keras.Sequential([\n keras_mobilenet_v2,\n tf.keras.layers.GlobalAveragePooling2D(),\n tf.keras.layers.Dense(1)\n])\n\n# Compile the model\nestimator_model.compile(\n optimizer='adam',\n loss=tf.keras.losses.BinaryCrossentropy(from_logits=True),\n metrics=['accuracy'])", "コンパイルされた Keras モデルから Estimator を作成します。Keras モデルの初期化状態が、作成した Estimator に維持されます。", "est_mobilenet_v2 = tf.keras.estimator.model_to_estimator(keras_model=estimator_model)", "派生した Estimator をほかの Estimator と同じように扱います。", "IMG_SIZE = 160 # All images will be resized to 160x160\n\ndef preprocess(image, label):\n image = tf.cast(image, tf.float32)\n image = (image/127.5) - 1\n image = tf.image.resize(image, (IMG_SIZE, IMG_SIZE))\n return image, label\n\ndef train_input_fn(batch_size):\n data = tfds.load('cats_vs_dogs', as_supervised=True)\n train_data = data['train']\n train_data = train_data.map(preprocess).shuffle(500).batch(batch_size)\n return train_data", "トレーニングするには、Estimator の train 関数を呼び出します。", "est_mobilenet_v2.train(input_fn=lambda: train_input_fn(32), steps=50)", "同様に、評価するには、Estimator の evaluate 関数を呼び出します。", "est_mobilenet_v2.evaluate(input_fn=lambda: train_input_fn(32), steps=10)", "詳細については、tf.keras.estimator.model_to_estimator のドキュメントを参照してください。\nEstimator でオブジェクトベースのチェックポイントを保存する\nEstimator はデフォルトで、チェックポイントガイドで説明したオブジェクトグラフではなく、変数名でチェックポイントを保存します。tf.train.Checkpoint は名前ベースのチェックポイントを読み取りますが、モデルの一部を Estimator の model_fn の外側に移動すると変数名が変わることがあります。上位互換性においては、オブジェクトベースのチェックポイントを保存すると、Estimator の内側でモデルをトレーニングし、外側でそれを使用することが容易になります。", "import tensorflow.compat.v1 as tf_compat\n\ndef toy_dataset():\n inputs = tf.range(10.)[:, None]\n labels = inputs * 5. + tf.range(5.)[None, :]\n return tf.data.Dataset.from_tensor_slices(\n dict(x=inputs, y=labels)).repeat().batch(2)\n\nclass Net(tf.keras.Model):\n \"\"\"A simple linear model.\"\"\"\n\n def __init__(self):\n super(Net, self).__init__()\n self.l1 = tf.keras.layers.Dense(5)\n\n def call(self, x):\n return self.l1(x)\n\ndef model_fn(features, labels, mode):\n net = Net()\n opt = tf.keras.optimizers.Adam(0.1)\n ckpt = tf.train.Checkpoint(step=tf_compat.train.get_global_step(),\n optimizer=opt, net=net)\n with tf.GradientTape() as tape:\n output = net(features['x'])\n loss = tf.reduce_mean(tf.abs(output - features['y']))\n variables = net.trainable_variables\n gradients = tape.gradient(loss, variables)\n return tf.estimator.EstimatorSpec(\n mode,\n loss=loss,\n train_op=tf.group(opt.apply_gradients(zip(gradients, variables)),\n ckpt.step.assign_add(1)),\n # Tell the Estimator to save \"ckpt\" in an object-based format.\n scaffold=tf_compat.train.Scaffold(saver=ckpt))\n\ntf.keras.backend.clear_session()\nest = tf.estimator.Estimator(model_fn, './tf_estimator_example/')\nest.train(toy_dataset, steps=10)", "その後、tf.train.Checkpoint は Estimator のチェックポイントをその model_dir から読み込むことができます。", "opt = tf.keras.optimizers.Adam(0.1)\nnet = Net()\nckpt = tf.train.Checkpoint(\n step=tf.Variable(1, dtype=tf.int64), optimizer=opt, net=net)\nckpt.restore(tf.train.latest_checkpoint('./tf_estimator_example/'))\nckpt.step.numpy() # From est.train(..., steps=10)", "Estimator の SavedModel\nEstimator は、tf.Estimator.export_saved_model によって SavedModel をエクスポートします。", "input_column = tf.feature_column.numeric_column(\"x\")\n\nestimator = tf.estimator.LinearClassifier(feature_columns=[input_column])\n\ndef input_fn():\n return tf.data.Dataset.from_tensor_slices(\n ({\"x\": [1., 2., 3., 4.]}, [1, 1, 0, 0])).repeat(200).shuffle(64).batch(16)\nestimator.train(input_fn)", "Estimator を保存するには、serving_input_receiver を作成する必要があります。この関数は、SavedModel が受け取る生データを解析する tf.Graph の一部を構築します。\ntf.estimator.export モジュールには、これらの receivers を構築するための関数が含まれています。\n次のコードは、feature_columns に基づき、tf-serving と合わせて使用されることの多いシリアル化された tf.Example プロトコルバッファを受け入れるレシーバーを構築します。", "tmpdir = tempfile.mkdtemp()\n\nserving_input_fn = tf.estimator.export.build_parsing_serving_input_receiver_fn(\n tf.feature_column.make_parse_example_spec([input_column]))\n\nestimator_base_path = os.path.join(tmpdir, 'from_estimator')\nestimator_path = estimator.export_saved_model(estimator_base_path, serving_input_fn)", "また、Python からモデルを読み込んで実行することも可能です。", "imported = tf.saved_model.load(estimator_path)\n\ndef predict(x):\n example = tf.train.Example()\n example.features.feature[\"x\"].float_list.value.extend([x])\n return imported.signatures[\"predict\"](\n examples=tf.constant([example.SerializeToString()]))\n\nprint(predict(1.5))\nprint(predict(3.5))", "tf.estimator.export.build_raw_serving_input_receiver_fn を使用すると、tf.train.Example の代わりに生のテンソルを取る入力関数を作成することができます。\nEstimator を使った tf.distribute.Strategy の使用(制限サポート)\ntf.estimator は、もともと非同期パラメーターサーバー手法をサポートしていた分散型トレーニング TensorFlow API です。tf.estimator は現在では tf.distribute.Strategy をサポートするようになっています。tf.estimator を使用している場合は、コードを少し変更するだけで、分散型トレーニングに変更することができます。これにより、Estimator ユーザーは複数の GPU と複数のワーカーだけでなく、TPU でも同期分散型トレーニングを実行できるようになりましたが、Estimator でのこのサポートには制限があります。詳細については、以下に示す「現在、何がサポートされていますか」セクションをご覧ください。\nEstimator での tf.distribute.Strategy の使用は、Keras の事例とわずかに異なります。strategy.scope を使用する代わりに、ストラテジーオブジェクトを Estimator の RunConfig に渡します。\n詳細については、分散型トレーニングガイドをご覧ください。\n次は、事前に作成された Estimator LinearRegressor と MirroredStrategy を使ってこの動作を示すコードスニペットです。", "mirrored_strategy = tf.distribute.MirroredStrategy()\nconfig = tf.estimator.RunConfig(\n train_distribute=mirrored_strategy, eval_distribute=mirrored_strategy)\nregressor = tf.estimator.LinearRegressor(\n feature_columns=[tf.feature_column.numeric_column('feats')],\n optimizer='SGD',\n config=config)", "ここでは、事前に作成された Estimator が使用されていますが、同じコードはカスタム Estimator でも動作します。train_distribute はトレーニングの分散方法を判定し、eval_distribute は評価の分散方法を判定します。この点も、トレーニングと評価に同じストラテジーを使用する Keras と異なるところです。\n入力関数を使用して、この Estimator をトレーニングし、評価することができます。", "def input_fn():\n dataset = tf.data.Dataset.from_tensors(({\"feats\":[1.]}, [1.]))\n return dataset.repeat(1000).batch(10)\nregressor.train(input_fn=input_fn, steps=10)\nregressor.evaluate(input_fn=input_fn, steps=10)", "Estimator と Keras のもう 1 つの違いとして強調すべき点は入力の処理方法です。Keras では、データセットの各バッチは複数のレプリカに自動的に分断されますが、Estimator の場合は、バッチの自動分断やワーカーをまたいで自動的にシャーディングすることもありません。ワーカーやデバイスでのデータの分散方法はユーザーが完全に制御するものであるため、input_fn を提供してデータの分散方法を指定する必要があります。\ninput_fn はワーカー当たり一度呼び出されるため、ワーカー当たり 1 つのデータセットが与えられます。次に、そのデータセットの 1 つのバッチがそのワーカーの 1 つのレプリカに供給され、したがって、1 つのワーカーの N 個のレプリカに対して N 個のバッチが消費されることになります。言い換えると、input_fn が返すデータセットは、サイズ PER_REPLICA_BATCH_SIZE のバッチを提供するということです。ステップのグローバルバッチサイズは、PER_REPLICA_BATCH_SIZE * strategy.num_replicas_in_sync として取得することができます。\nマルチワーカートレーニングを行う場合は、データをワーカー間で分割するか、それぞれにランダムシードを使用してシャッフルする必要があります。これを行う方法の例は、「Estimator を使ったマルチワーカートレーニング」を参照してください。\nまた、同様に、マルチワーカーとパラメーターサーバーストラテジーを使用することができます。コードは変わりませんが、tf.estimator.train_and_evaluate を使用し、クラスタで実行している各バイナリの TF_CONFIG 環境変数を設定する必要があります。\n<a name=\"estimator_support\"></a>\n現在、何がサポートされていますか?\nTPUStrategy を除くすべてのストラテジーを使った Estimator でのトレーニングのサポートには制限があります。基本的なトレーニングと評価は機能しますが、v1.train.Scaffold などの多数の高度な機能はまだ機能しません。また、この統合には多数のバグも存在する可能性があります。現時点では、Keras とカスタムトレーニングループのサポートに注力しているため、このサポートを積極的に改善する予定はありません。可能な限り、それらの API で tf.distribute を使用するようにしてください。\nトレーニング API | MirroredStrategy | TPUStrategy | MultiWorkerMirroredStrategy | CentralStorageStrategy | ParameterServerStrategy\n:-- | :-- | :-- | :-- | :-- | :--\nEstimator API | 制限サポート | 未サポート | 制限サポート | 制限サポート | 制限サポート\n例とチュートリアル\n次は、Estimator によるさまざまなストラテジーの使用方法を示す、エンドツーエンドの例です。\n\nEstimator を使ったマルチワーカートレーニングのチュートリアルには、MNIST データセットで MultiWorkerMirroredStrategy を使って複数のワーカーをトレーニングする方法が説明されています。\nKubernetes テンプレートを使った tensorflow/ecosystem で分散ストラテジーによってマルチワーカートレーニングを実行するエンドツーエンドの例。Keras モデルから始め、tf.keras.estimator.model_to_estimator API を使って Estimator に変換します。\nResNet50 の公式モデル。MirroredStrategy または MultiWorkerMirroredStrategy を使ってトレーニングできます。" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
mne-tools/mne-tools.github.io
0.16/_downloads/plot_brainstorm_phantom_elekta.ipynb
bsd-3-clause
[ "%matplotlib inline", "Brainstorm Elekta phantom dataset tutorial\nHere we compute the evoked from raw for the Brainstorm Elekta phantom\ntutorial dataset. For comparison, see [1]_ and:\nhttp://neuroimage.usc.edu/brainstorm/Tutorials/PhantomElekta\n\nReferences\n.. [1] Tadel F, Baillet S, Mosher JC, Pantazis D, Leahy RM.\n Brainstorm: A User-Friendly Application for MEG/EEG Analysis.\n Computational Intelligence and Neuroscience, vol. 2011, Article ID\n 879716, 13 pages, 2011. doi:10.1155/2011/879716", "# Authors: Eric Larson <larson.eric.d@gmail.com>\n#\n# License: BSD (3-clause)\n\nimport os.path as op\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nimport mne\nfrom mne import find_events, fit_dipole\nfrom mne.datasets.brainstorm import bst_phantom_elekta\nfrom mne.io import read_raw_fif\n\nfrom mayavi import mlab\nprint(__doc__)", "The data were collected with an Elekta Neuromag VectorView system at 1000 Hz\nand low-pass filtered at 330 Hz. Here the medium-amplitude (200 nAm) data\nare read to construct instances of :class:mne.io.Raw.", "data_path = bst_phantom_elekta.data_path()\n\nraw_fname = op.join(data_path, 'kojak_all_200nAm_pp_no_chpi_no_ms_raw.fif')\nraw = read_raw_fif(raw_fname)", "Data channel array consisted of 204 MEG planor gradiometers,\n102 axial magnetometers, and 3 stimulus channels. Let's get the events\nfor the phantom, where each dipole (1-32) gets its own event:", "events = find_events(raw, 'STI201')\nraw.plot(events=events)\nraw.info['bads'] = ['MEG2421']", "The data have strong line frequency (60 Hz and harmonics) and cHPI coil\nnoise (five peaks around 300 Hz). Here we plot only out to 60 seconds\nto save memory:", "raw.plot_psd(tmax=60., average=False)", "Let's use Maxwell filtering to clean the data a bit.\nIdeally we would have the fine calibration and cross-talk information\nfor the site of interest, but we don't, so we just do:", "raw.fix_mag_coil_types()\nraw = mne.preprocessing.maxwell_filter(raw, origin=(0., 0., 0.))", "We know our phantom produces sinusoidal bursts below 25 Hz, so let's filter.", "raw.filter(None, 40., fir_design='firwin')\nraw.plot(events=events)", "Now we epoch our data, average it, and look at the first dipole response.\nThe first peak appears around 3 ms. Because we low-passed at 40 Hz,\nwe can also decimate our data to save memory.", "tmin, tmax = -0.1, 0.1\nevent_id = list(range(1, 33))\nepochs = mne.Epochs(raw, events, event_id, tmin, tmax, baseline=(None, -0.01),\n decim=3, preload=True)\nepochs['1'].average().plot(time_unit='s')", "Let's use a sphere head geometry model and let's see the coordinate\nalignement and the sphere location. The phantom is properly modeled by\na single-shell sphere with origin (0., 0., 0.).", "sphere = mne.make_sphere_model(r0=(0., 0., 0.), head_radius=None)\n\nmne.viz.plot_alignment(raw.info, subject='sample',\n meg='helmet', bem=sphere, dig=True,\n surfaces=['brain'])", "Let's do some dipole fits. We first compute the noise covariance,\nthen do the fits for each event_id taking the time instant that maximizes\nthe global field power.", "cov = mne.compute_covariance(epochs, tmax=0)\ndata = []\nfor ii in event_id:\n evoked = epochs[str(ii)].average()\n idx_peak = np.argmax(evoked.copy().pick_types(meg='grad').data.std(axis=0))\n t_peak = evoked.times[idx_peak]\n evoked.crop(t_peak, t_peak)\n data.append(evoked.data[:, 0])\nevoked = mne.EvokedArray(np.array(data).T, evoked.info, tmin=0.)\ndel epochs, raw\ndip = fit_dipole(evoked, cov, sphere, n_jobs=1)[0]", "Now we can compare to the actual locations, taking the difference in mm:", "actual_pos, actual_ori = mne.dipole.get_phantom_dipoles()\nactual_amp = 100. # nAm\n\nfig, (ax1, ax2, ax3) = plt.subplots(nrows=3, ncols=1, figsize=(6, 7))\n\ndiffs = 1000 * np.sqrt(np.sum((dip.pos - actual_pos) ** 2, axis=-1))\nprint('mean(position error) = %s' % (np.mean(diffs),))\nax1.bar(event_id, diffs)\nax1.set_xlabel('Dipole index')\nax1.set_ylabel('Loc. error (mm)')\n\nangles = np.arccos(np.abs(np.sum(dip.ori * actual_ori, axis=1)))\nprint('mean(angle error) = %s' % (np.mean(angles),))\nax2.bar(event_id, angles)\nax2.set_xlabel('Dipole index')\nax2.set_ylabel('Angle error (rad)')\n\namps = actual_amp - dip.amplitude / 1e-9\nprint('mean(abs amplitude error) = %s' % (np.mean(np.abs(amps)),))\nax3.bar(event_id, amps)\nax3.set_xlabel('Dipole index')\nax3.set_ylabel('Amplitude error (nAm)')\n\nfig.tight_layout()\nplt.show()", "Let's plot the positions and the orientations of the actual and the estimated\ndipoles", "def plot_pos_ori(pos, ori, color=(0., 0., 0.)):\n mlab.points3d(pos[:, 0], pos[:, 1], pos[:, 2], scale_factor=0.005,\n color=color)\n mlab.quiver3d(pos[:, 0], pos[:, 1], pos[:, 2],\n ori[:, 0], ori[:, 1], ori[:, 2],\n scale_factor=0.03,\n color=color)\n\nmne.viz.plot_alignment(evoked.info, bem=sphere, surfaces=[])\n\n# Plot the position and the orientation of the actual dipole\nplot_pos_ori(actual_pos, actual_ori, color=(1., 0., 0.))\n# Plot the position and the orientation of the estimated dipole\nplot_pos_ori(dip.pos, dip.ori, color=(0., 0., 1.))" ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
brettavedisian/Liquid-Crystals-Summer-2015
Smectic/SimplePol.ipynb
mit
[ "%matplotlib inline\nfrom sympy import *\nfrom sympy.utilities.lambdify import implemented_function\nfrom sympy.abc import x, y, z\nimport numpy as np\nimport matplotlib.pyplot as plt\ninit_printing(use_unicode=True)\n\nr, u, v, c, r_c, u_c, v_c, E, p, r_p, u_p, v_p, e, a, b, q, b_0, b_1, b_2, b_3, q_0, q_1, q_2, q_3, q_4, q_5 = symbols('r u v c r_c u_c v_c E p r_p u_p v_p e a b q b_0 b_1 b_2 b_3 q_0 q_1 q_2 q_3 q_4 q_5')\n\ngamma = symbols('gamma',positive=True)", "$f_{1}(c,p) = \\dfrac{1}{2}r_{c}c^{2}+\\dfrac{1}{4}u_{c}c^{4}+\\dfrac{1}{6}v_{c}c^{6}+\\dfrac{1}{2}r_{p}p^{2}-\\gamma cp-Ep$", "f1 = (1/2)*r_c*c**2+(1/4)*u_c*c**4+(1/6)*v_c*c**6-E*p+(1/2)*r_p*p**2-gamma*c*p", "$\\dfrac{\\partial f_{1}(c,p)}{\\partial p} = 0 = $", "pmin = solve(f1.diff(c),p)[0]\npmin\n\nE_cp = solve(f1.diff(p),E)[0]\nE_cp\n\nexpand(E_cp.subs(p,pmin))" ]
[ "code", "markdown", "code", "markdown", "code" ]
dereneaton/ipyrad
newdocs/API-analysis/cookbook-construct-ipcoal.ipynb
gpl-3.0
[ "<h1><span style=\"color:gray\">ipyrad-analysis toolkit:</span> construct </h1>\n\nThe program construct is a STRUCTURE-like tool that incorporates expectations of isolation by distance. It is available as an R package. This notebook demonstrates how to convert data to the proper format for use in construct using simulated data as an example. For details on running construct see their documentation. \nRequired software", "# conda install ipyrad ipcoal -c conda-forge -c bioconda\n\nimport ipyrad.analysis as ipa\nimport toytree\nimport ipcoal\n\nprint('ipyrad', ipa.__version__)\nprint('toytree', toytree.__version__)\nprint('ipcoal', ipcoal.__version__)", "Simulate example data", "# network model\ntree = toytree.rtree.unittree(7, treeheight=3e6, seed=123)\ntree.draw(ts='o', admixture_edges=(3, 2));\n\n# simulation model with admixture and missing data\nmodel = ipcoal.Model(tree, Ne=1e4, nsamples=4, admixture_edges=(3, 2, 0.5, 0.1))\nmodel.sim_snps(250)\nmodel.write_snps_to_hdf5(name=\"test-construct\", outdir=\"/tmp\", diploid=True)", "Input data file", "# the path to your HDF5 formatted snps file\nSNPS = \"/tmp/test-construct.snps.hdf5\"", "Population assignments", "IMAP = {\n \"r0\": [\"r0-0\", \"r0-1\"],\n \"r1\": [\"r1-0\", \"r1-1\"],\n \"r2\": [\"r2-0\", \"r2-1\"],\n \"r3\": [\"r3-0\", \"r3-1\"],\n \"r4\": [\"r4-0\", \"r4-1\"],\n \"r5\": [\"r5-0\", \"r5-1\"],\n \"r6\": [\"r6-0\", \"r6-1\"],\n}", "Filter missing data and convert to genotype frequencies", "# apply filtering to the SNPs file\ntool = ipa.snps_extracter(data=SNPS, imap=IMAP, minmap={i:2 for i in IMAP})\ntool.parse_genos_from_hdf5();\n\n# convert SNP data to genotype frequencies\ndf = tool.get_population_geno_frequency()\ndf.head()", "Write data to file", "# write to a file\ndf.to_csv(\"/tmp/freqs.csv\")" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
gammapy/PyGamma15
tutorials/analysis-stats/Tutorial.ipynb
bsd-3-clause
[ "Tutorial about statistical methods\nThe following contains a sequence of simple exercises, designed to get familiar with using Minuit for maximum likelihood fits and emcee to determine parameters by MCMC. Commands are generally commented, i.e. in order to activate them, simply uncomment them. A few functions are still to be defined... which is part of the exercise. Have fun!", "%matplotlib inline\nimport numpy as np\nimport matplotlib.pyplot as plt", "Generate a dataset to be fitted", "np.random.seed(42)\ny = np.random.random(10000)\nx = 1./np.sqrt(y)\nplt.hist(x, bins=100, range=(1,10), histtype='stepfilled',color='blue')\nplt.yscale('log')", "Maximum likelihood fit of a simple power law\nFirst define the negative-log likelihood function for a density proportional to x**(-a) the range 1 < x < infinity", "def nllp(a)\n# here define the function \n return 1.", "Then minimize it using iminuit", "import iminuit\n\n# minp = iminuit.Minuit(nllp,a= ?,error_a=?, errordef=?)\n\n# minp.migrad()", "Error analysis\nFirst determine the parabolic errors using hesse() and then do a parameter scan using minos() to determine the 68% confidence level errors.", "# minp.hesse()\n\n# minp.minos()\n# minp.draw_profile('a')", "Use of an un-normalised PDF\nThe above example shall be modified such that the normalisation of the likelihood function, which so far was determined analytically, now is determined numerically in the fit. This is the more realistic case, since in many case no (simple) analytical normalisation exists. As a first step, this requires to load the integration package.", "from scipy.integrate import quad\n\ndef pdfpn(x, a):\n return x**(-a)\ndef pdfpn_norm(a):\n# here insert the calculation of the normalisation as a function of a \n return 1.\ndef nllpn(a):\n# calculate and return the proper negative-log likelihood function\n return 1.", "Then do the same minimization steps as before.", "# minpn = iminuit.Minuit(nllpn, a=?, error_a=?, errordef=?)\n\n# minpn.migrad()", "Extend the fit model by an exponential cutoff\nThe exponential cutoff is implemented by exp(-bbx), i.e. exponential growth is not allowed for real valued parameters b. The implications of this ansatz shall be discussed when looking at the solution. After that, the example can be modified to use exp(-b*x). \nHere the likelihood function has no (simple) analytical normalisation anymore, i.e. we directly do the numerical approach.", "def pdfcn(x, a, b):\n return x**(-a)*np.exp(-b*b*x)\ndef pdfcn_norm(a, b):\n# determine the normalization \n return 1.\ndef nllcn(a, b):\n# calculate an return the negative-log likelihood function\n return 1.", "As before, use Minuit for minimisation and error analysis, but now in two dimensions. Study parabolic errors and minos errors, the latter both for the single variables and for both together.", "# mincn = iminuit.Minuit(nllcn, a=?, b=?, error_a=?, error_b=?, errordef=?)\n\n# mincn.migrad()\n\n# mincn.hesse()\n\n# mincn.minos()\n\n# mincn.draw_profile('a')\n\n# mincn.draw_profile('b')\n\n# mincn.draw_contour('a','b')", "Do the same analysis by MCMC", "import emcee", "emcee requires as input the log-likelihood of the posterior in the parameters a and b. In the following it is composed of the log-of the prior and the log-likelihood of the data. Initially use a simple uniform prior in a and b with the constraint b>0. Afterwards one can play with the prior to see how strongly it affects the result.", "# Define the posterior.\n# for clarity the prior and likelihood are separated\n# emcee requires log-posterior\n\ndef log_prior(theta):\n a, b = theta\n if b < 0:\n return -np.inf # log(0)\n else:\n return 0.\n\ndef log_likelihood(theta, x):\n a, b = theta\n return np.sum(-a*np.log(x) - b*b*x)\n\ndef log_posterior(theta, x):\n a , b = theta\n# construct and the log of the posterior \n return 1.", "Here we'll set up the computation. emcee combines multiple \"walkers\", each of which is its own MCMC chain. The number of trace results will be nwalkers * nsteps", "ndim = 2 # number of parameters in the model\nnwalkers = 50 # number of MCMC walkers\nnburn = 100 # \"burn-in\" period to let chains stabilize\nnsteps = 1000 # number of MCMC steps to take\n\n# random starting point\nnp.random.seed(0)\nstarting_guesses = np.random.random((nwalkers, ndim))\n", "run the MCMC (and time it using IPython's %time magic", "#sampler = emcee.EnsembleSampler(nwalkers, ndim, log_posterior, args=[x])\n#%time sampler.run_mcmc(starting_guesses, nsteps)\n#print(\"done\")", "sampler.chain is of shape (nwalkers, nsteps, ndim). Before analysis throw-out the burn-in points and reshape.", "#emcee_trace = sampler.chain[:, nburn:, :].reshape(-1, ndim).T\n#len(emcee_trace[0])", "Analyse the results. Plot the projected (marginalized) posteriors for the parameters a and b and also the joinyt density as sampled by the MCMC.", "# plt.hist(emcee_trace[0], 100, range=(?,?) , histtype='stepfilled', color='cyan')\n\n# plt.hist(emcee_trace[1], 100, range=(?,?) , histtype='stepfilled', color='cyan')\n\n# plt.plot(emcee_trace[0],emcee_trace[1],',k')", "As a final step, generate 2-dim bayesian confidence level contours containing 68.3% and 95.5% probability content. For that define a convenient plot functions and use them. Overlay the contours with the scatter plot.", "def compute_sigma_level(trace1, trace2, nbins=20):\n \"\"\"From a set of traces, bin by number of standard deviations\"\"\"\n L, xbins, ybins = np.histogram2d(trace1, trace2, nbins)\n L[L == 0] = 1E-16\n logL = np.log(L)\n\n shape = L.shape\n L = L.ravel()\n\n # obtain the indices to sort and unsort the flattened array\n i_sort = np.argsort(L)[::-1]\n i_unsort = np.argsort(i_sort)\n\n L_cumsum = L[i_sort].cumsum()\n L_cumsum /= L_cumsum[-1]\n \n xbins = 0.5 * (xbins[1:] + xbins[:-1])\n ybins = 0.5 * (ybins[1:] + ybins[:-1])\n\n return xbins, ybins, L_cumsum[i_unsort].reshape(shape)\n\n\n#xbins, ybins, sigma = compute_sigma_level(emcee_trace[0], emcee_trace[1])\n#plt.contour(xbins, ybins, sigma.T, levels=[0.683, 0.955])\n#plt.plot(emcee_trace[0], emcee_trace[1], ',k', alpha=0.1)" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
famunity/deep-learning-term1
3LinearAlgebra/project3/linear_regression_project.ipynb
mit
[ "# 任意选一个你喜欢的整数,这能帮你得到稳定的结果\nseed = 1234", "欢迎来到线性回归项目\n若项目中的题目有困难没完成也没关系,我们鼓励你带着问题提交项目,评审人会给予你诸多帮助。\n所有选做题都可以不做,不影响项目通过。如果你做了,那么项目评审会帮你批改,也会因为选做部分做错而判定为不通过。\n其中非代码题可以提交手写后扫描的 pdf 文件,或使用 Latex 在文档中直接回答。\n1 矩阵运算\n1.1 创建一个 4*4 的单位矩阵", "# 这个项目设计来帮你熟悉 python list 和线性代数\n# 你不能调用任何NumPy以及相关的科学计算库来完成作业\n\n\n# 本项目要求矩阵统一使用二维列表表示,如下:\nA = [[1,2,3], \n [2,3,3], \n [1,2,5]]\n\nB = [[1,2,3,5], \n [2,3,3,5], \n [1,2,5,1]]\n\n# 向量也用二维列表表示\nC = [[1],\n [2],\n [3]]\n\n#TODO 创建一个 4*4 单位矩阵\nI = [[1,0,0,0],\n [0,1,0,0],\n [0,0,1,0],\n [0,0,0,1]]", "1.2 返回矩阵的行数和列数", "# 运行以下代码测试你的 shape 函数\n%run -i -e test.py LinearRegressionTestCase.test_shape\n\n# TODO 返回矩阵的行数和列数\ndef shape(M):\n return len(M),len(M[0])", "1.3 每个元素四舍五入到特定小数数位", "# TODO 每个元素四舍五入到特定小数数位\n# 直接修改参数矩阵,无返回值\ndef matxRound(M, decPts=4):\n row, col = shape(M)\n for i in range(row):\n for j in range(col):\n M[i][j]=round(M[i][j],decPts)\n pass\n\n# 运行以下代码测试你的 matxRound 函数\n%run -i -e test.py LinearRegressionTestCase.test_matxRound", "1.4 计算矩阵的转置", "# TODO 计算矩阵的转置\ndef transpose(M):\n row, col = shape(M)\n MT = []\n for i in range(col):\n MT.append([x[i] for x in M])\n return MT\n\n# 运行以下代码测试你的 transpose 函数\n%run -i -e test.py LinearRegressionTestCase.test_transpose", "1.5 计算矩阵乘法 AB", "# TODO 计算矩阵乘法 AB,如果无法相乘则raise ValueError\ndef matxMultiply(A, B):\n rowA, colA = shape(A)\n rowB, colB = shape(B)\n if not colA == rowB:\n raise ValueError\n # result would be rowA x colB\n result = [[0] * colB for row in range(rowA)]\n BT = transpose(B)\n for i in range(rowA):\n rowa = A[i]\n for j in range(colB):\n colb = BT[j]\n element = sum([rowa[x]*colb[x] for x in range(colA)])\n result[i][j] = element\n return result\n\n# 运行以下代码测试你的 matxMultiply 函数\n%run -i -e test.py LinearRegressionTestCase.test_matxMultiply", "2 Gaussign Jordan 消元法\n2.1 构造增广矩阵\n$ A = \\begin{bmatrix}\n a_{11} & a_{12} & ... & a_{1n}\\\n a_{21} & a_{22} & ... & a_{2n}\\\n a_{31} & a_{22} & ... & a_{3n}\\\n ... & ... & ... & ...\\\n a_{n1} & a_{n2} & ... & a_{nn}\\\n\\end{bmatrix} , b = \\begin{bmatrix}\n b_{1} \\\n b_{2} \\\n b_{3} \\\n ... \\\n b_{n} \\\n\\end{bmatrix}$\n返回 $ Ab = \\begin{bmatrix}\n a_{11} & a_{12} & ... & a_{1n} & b_{1}\\\n a_{21} & a_{22} & ... & a_{2n} & b_{2}\\\n a_{31} & a_{22} & ... & a_{3n} & b_{3}\\\n ... & ... & ... & ...& ...\\\n a_{n1} & a_{n2} & ... & a_{nn} & b_{n} \\end{bmatrix}$", "# TODO 构造增广矩阵,假设A,b行数相同\ndef augmentMatrix(A, b):\n # result would be rowA x (colA+colb)\n rowA, colA = shape(A)\n result = [[0] * (colA+1) for row in range(rowA)]\n for i in range(rowA):\n for j in range(colA):\n result[i][j] = A[i][j]\n result[i][colA] = b[i][0]\n return result\n\n# 运行以下代码测试你的 augmentMatrix 函数\n%run -i -e test.py LinearRegressionTestCase.test_augmentMatrix", "2.2 初等行变换\n\n交换两行\n把某行乘以一个非零常数\n把某行加上另一行的若干倍:", "# TODO r1 <---> r2\n# 直接修改参数矩阵,无返回值\ndef swapRows(M, r1, r2):\n colM = shape(M)[1]\n for i in range(colM):\n tmp = M[r1][i]\n M[r1][i] = M[r2][i]\n M[r2][i] = tmp\n pass\n\n# 运行以下代码测试你的 swapRows 函数\n%run -i -e test.py LinearRegressionTestCase.test_swapRows\n\n# TODO r1 <--- r1 * scale\n# scale为0是非法输入,要求 raise ValueError\n# 直接修改参数矩阵,无返回值\ndef scaleRow(M, r, scale):\n if scale == 0:\n raise ValueError\n colM = shape(M)[1]\n for i in range(colM):\n M[r][i] *= scale\n pass\n\n# 运行以下代码测试你的 scaleRow 函数\n%run -i -e test.py LinearRegressionTestCase.test_scaleRow\n\n# TODO r1 <--- r1 + r2*scale\n# 直接修改参数矩阵,无返回值\ndef addScaledRow(M, r1, r2, scale):\n colM = shape(M)[1]\n for i in range(colM):\n M[r1][i] += M[r2][i]*scale\n pass\n\n# 运行以下代码测试你的 addScaledRow 函数\n%run -i -e test.py LinearRegressionTestCase.test_addScaledRow", "2.3 Gaussian Jordan 消元法求解 Ax = b\n2.3.1 算法\n步骤1 检查A,b是否行数相同\n步骤2 构造增广矩阵Ab\n步骤3 逐列转换Ab为化简行阶梯形矩阵 中文维基链接\n对于Ab的每一列(最后一列除外)\n 当前列为列c\n 寻找列c中 对角线以及对角线以下所有元素(行 c~N)的绝对值的最大值\n 如果绝对值最大值为0\n 那么A为奇异矩阵,返回None (你可以在选做问题2.4中证明为什么这里A一定是奇异矩阵)\n 否则\n 使用第一个行变换,将绝对值最大值所在行交换到对角线元素所在行(行c) \n 使用第二个行变换,将列c的对角线元素缩放为1\n 多次使用第三个行变换,将列c的其他元素消为0\n\n步骤4 返回Ab的最后一列\n注: 我们并没有按照常规方法先把矩阵转化为行阶梯形矩阵,再转换为化简行阶梯形矩阵,而是一步到位。如果你熟悉常规方法的话,可以思考一下两者的等价性。\n2.3.2 算法推演\n为了充分了解Gaussian Jordan消元法的计算流程,请根据Gaussian Jordan消元法,分别手动推演矩阵A为可逆矩阵,矩阵A为奇异矩阵两种情况。\n推演示例\n$Ab = \\begin{bmatrix}\n -7 & 5 & -1 & 1\\\n 1 & -3 & -8 & 1\\\n -10 & -2 & 9 & 1\\end{bmatrix}$\n$ --> $\n$\\begin{bmatrix}\n 1 & \\frac{1}{5} & -\\frac{9}{10} & -\\frac{1}{10}\\\n 0 & -\\frac{16}{5} & -\\frac{71}{10} & \\frac{11}{10}\\\n 0 & \\frac{32}{5} & -\\frac{73}{10} & \\frac{3}{10}\\end{bmatrix}$\n$ --> $\n$\\begin{bmatrix}\n 1 & 0 & -\\frac{43}{64} & -\\frac{7}{64}\\\n 0 & 1 & -\\frac{73}{64} & \\frac{3}{64}\\\n 0 & 0 & -\\frac{43}{4} & \\frac{5}{4}\\end{bmatrix}$\n$ --> $\n$\\begin{bmatrix}\n 1 & 0 & 0 & -\\frac{3}{16}\\\n 0 & 1 & 0 & -\\frac{59}{688}\\\n 0 & 0 & 1 & -\\frac{5}{43}\\end{bmatrix}$\n推演有以下要求:\n\n展示每一列的消元结果, 比如3*3的矩阵, 需要写三步\n用分数来表示\n分数不能再约分\n我们已经给出了latex的语法,你只要把零改成你要的数字(或分数)即可\n检查你的答案, 可以用这个, 或者后面通过单元测试后的gj_Solve\n\n你可以用python的 fractions 模块辅助你的约分\n以下开始你的尝试吧!", "# 不要修改这里!\nfrom helper import *\nA = generateMatrix(3,seed,singular=False)\nb = np.ones(shape=(3,1),dtype=int) # it doesn't matter\nAb = augmentMatrix(A.tolist(),b.tolist()) # 请确保你的增广矩阵已经写好了\nprintInMatrixFormat(Ab,padding=3,truncating=0)", "请按照算法的步骤3,逐步推演可逆矩阵的变换。\n在下面列出每一次循环体执行之后的增广矩阵。\n要求:\n1. 做分数运算\n2. 使用\\frac{n}{m}来渲染分数,如下:\n - $\\frac{n}{m}$\n - $-\\frac{a}{b}$\n$ Ab = \\begin{bmatrix}\n 0 & 0 & 0 & 0 \\\n 0 & 0 & 0 & 0 \\\n 0 & 0 & 0 & 0 \\end{bmatrix}$\n$ --> \\begin{bmatrix}\n 0 & 0 & 0 & 0 \\\n 0 & 0 & 0 & 0 \\\n 0 & 0 & 0 & 0 \\end{bmatrix}$\n$ --> \\begin{bmatrix}\n 0 & 0 & 0 & 0 \\\n 0 & 0 & 0 & 0 \\\n 0 & 0 & 0 & 0 \\end{bmatrix}$\n$...$", "# 不要修改这里!\nA = generateMatrix(3,seed,singular=True)\nb = np.ones(shape=(3,1),dtype=int)\nAb = augmentMatrix(A.tolist(),b.tolist()) # 请确保你的增广矩阵已经写好了\nprintInMatrixFormat(Ab,padding=3,truncating=0)", "请按照算法的步骤3,逐步推演奇异矩阵的变换。\n在下面列出每一次循环体执行之后的增广矩阵。\n要求:\n1. 做分数运算\n2. 使用\\frac{n}{m}来渲染分数,如下:\n - $\\frac{n}{m}$\n - $-\\frac{a}{b}$\n$ Ab = \\begin{bmatrix}\n 0 & 0 & 0 & 0 \\\n 0 & 0 & 0 & 0 \\\n 0 & 0 & 0 & 0 \\end{bmatrix}$\n$ --> \\begin{bmatrix}\n 0 & 0 & 0 & 0 \\\n 0 & 0 & 0 & 0 \\\n 0 & 0 & 0 & 0 \\end{bmatrix}$\n$ --> \\begin{bmatrix}\n 0 & 0 & 0 & 0 \\\n 0 & 0 & 0 & 0 \\\n 0 & 0 & 0 & 0 \\end{bmatrix}$\n$...$\n2.3.3 实现 Gaussian Jordan 消元法", "# TODO 实现 Gaussain Jordan 方法求解 Ax = b\n\n\"\"\" Gaussian Jordan 方法求解 Ax = b.\n 参数\n A: 方阵 \n b: 列向量\n decPts: 四舍五入位数,默认为4\n epsilon: 判读是否为0的阈值,默认 1.0e-16\n \n 返回列向量 x 使得 Ax = b \n 返回None,如果 A,b 高度不同\n 返回None,如果 A 为奇异矩阵\n\"\"\"\nfrom fractions import Fraction\ndef gj_Solve(A, b, decPts=4, epsilon = 1.0e-16):\n def max_idx(list):\n if max(list)<=epsilon:\n raise ValueError\n return 0 if len(list)<=0 else list.index(max(list))\n\n if not shape(A)[0] == shape(b)[0]:\n return None\n \n Ab = augmentMatrix(A, b)\n for i in range(shape(A)[1]):\n col_i = [abs(Ab[row_num][i]) for row_num in range(i, shape(Ab)[0])]\n try:\n idx = max_idx(col_i) + i\n swapRows(Ab, i, idx)\n scaleRow(Ab, i, 1.0/Ab[i][i])\n for j in range(shape(Ab)[0]):\n if j != i:\n addScaledRow(Ab, j, i, Fraction(-Ab[j][i]))\n except ValueError:\n return None\n result = [[0] * 1 for row in range(shape(Ab)[0])]\n \n for i in range(shape(Ab)[0]):\n result[i][0]=Ab[i][-1]\n \n return result\n\n# 运行以下代码测试你的 gj_Solve 函数\n%run -i -e test.py LinearRegressionTestCase.test_gj_Solve", "(选做) 2.4 算法正确判断了奇异矩阵:\n在算法的步骤3 中,如果发现某一列对角线和对角线以下所有元素都为0,那么则断定这个矩阵为奇异矩阵。\n我们用正式的语言描述这个命题,并证明为真。\n证明下面的命题:\n如果方阵 A 可以被分为4个部分: \n$ A = \\begin{bmatrix}\n I & X \\\n Z & Y \\\n\\end{bmatrix} , \\text{其中 I 为单位矩阵,Z 为全0矩阵,Y 的第一列全0}$,\n那么A为奇异矩阵。\n提示:从多种角度都可以完成证明\n- 考虑矩阵 Y 和 矩阵 A 的秩\n- 考虑矩阵 Y 和 矩阵 A 的行列式\n- 考虑矩阵 A 的某一列是其他列的线性组合\nTODO 证明:\n3 线性回归\n3.1 随机生成样本点", "# 不要修改这里!\n# 运行一次就够了!\nfrom helper import *\nfrom matplotlib import pyplot as plt\n%matplotlib inline\n\nX,Y = generatePoints(seed,num=100)\n\n## 可视化\nplt.xlim((-5,5))\nplt.xlabel('x',fontsize=18)\nplt.ylabel('y',fontsize=18)\nplt.scatter(X,Y,c='b')\nplt.show()", "3.2 拟合一条直线\n3.2.1 猜测一条直线", "#TODO 请选择最适合的直线 y = mx + b\nm1 = 3.2\nb1 = 7.2\n\n# 不要修改这里!\nplt.xlim((-5,5))\nx_vals = plt.axes().get_xlim()\ny_vals = [m1*x+b1 for x in x_vals]\nplt.plot(x_vals, y_vals, '-', color='r')\n\nplt.xlabel('x',fontsize=18)\nplt.ylabel('y',fontsize=18)\nplt.scatter(X,Y,c='b')\n\nplt.show()", "3.2.2 计算平均平方误差 (MSE)\n我们要编程计算所选直线的平均平方误差(MSE), 即数据集中每个点到直线的Y方向距离的平方的平均数,表达式如下:\n$$\nMSE = \\frac{1}{n}\\sum_{i=1}^{n}{(y_i - mx_i - b)^2}\n$$", "# TODO 实现以下函数并输出所选直线的MSE\n\ndef calculateMSE(X,Y,m,b):\n list_ = ([(val[1]-val[0]*m-b)**2 for val in zip(X,Y)])\n return sum(list_)/len(list_)\n\nprint(calculateMSE(X,Y,m1,b1))", "3.2.3 调整参数 $m, b$ 来获得最小的平方平均误差\n你可以调整3.2.1中的参数 $m1,b1$ 让蓝点均匀覆盖在红线周围,然后微调 $m1, b1$ 让MSE最小。\n3.3 (选做) 找到参数 $m, b$ 使得平方平均误差最小\n这一部分需要简单的微积分知识( $ (x^2)' = 2x $ )。因为这是一个线性代数项目,所以设为选做。\n刚刚我们手动调节参数,尝试找到最小的平方平均误差。下面我们要精确得求解 $m, b$ 使得平方平均误差最小。\n定义目标函数 $E$ 为\n$$\nE = \\frac{1}{2}\\sum_{i=1}^{n}{(y_i - mx_i - b)^2}\n$$\n因为 $E = \\frac{n}{2}MSE$, 所以 $E$ 取到最小值时,$MSE$ 也取到最小值。要找到 $E$ 的最小值,即要找到 $m, b$ 使得 $E$ 相对于 $m$, $E$ 相对于 $b$ 的偏导数等于0. \n因此我们要解下面的方程组。\n$$\n\\begin{cases}\n\\displaystyle\n\\frac{\\partial E}{\\partial m} =0 \\\n\\\n\\displaystyle\n\\frac{\\partial E}{\\partial b} =0 \\\n\\end{cases}\n$$\n3.3.1 计算目标函数相对于参数的导数\n首先我们计算两个式子左边的值\n证明/计算:\n$$\n\\frac{\\partial E}{\\partial m} = \\sum_{i=1}^{n}{-x_i(y_i - mx_i - b)}\n$$\n$$\n\\frac{\\partial E}{\\partial b} = \\sum_{i=1}^{n}{-(y_i - mx_i - b)}\n$$\nTODO 证明:\n3.3.2 实例推演\n现在我们有了一个二元二次方程组\n$$\n\\begin{cases}\n\\displaystyle\n\\sum_{i=1}^{n}{-x_i(y_i - mx_i - b)} =0 \\\n\\\n\\displaystyle\n\\sum_{i=1}^{n}{-(y_i - mx_i - b)} =0 \\\n\\end{cases}\n$$\n为了加强理解,我们用一个实际例子演练。\n我们要用三个点 $(1,1), (2,2), (3,2)$ 来拟合一条直线 y = m*x + b, 请写出\n\n目标函数 $E$, \n二元二次方程组,\n并求解最优参数 $m, b$\n\nTODO 写出目标函数,方程组和最优参数\n3.3.3 将方程组写成矩阵形式\n我们的二元二次方程组可以用更简洁的矩阵形式表达,将方程组写成矩阵形式更有利于我们使用 Gaussian Jordan 消元法求解。\n请证明 \n$$\n\\begin{bmatrix}\n \\frac{\\partial E}{\\partial m} \\\n \\frac{\\partial E}{\\partial b} \n\\end{bmatrix} = X^TXh - X^TY\n$$\n其中向量 $Y$, 矩阵 $X$ 和 向量 $h$ 分别为 :\n$$\nY = \\begin{bmatrix}\n y_1 \\\n y_2 \\\n ... \\\n y_n\n\\end{bmatrix}\n,\nX = \\begin{bmatrix}\n x_1 & 1 \\\n x_2 & 1\\\n ... & ...\\\n x_n & 1 \\\n\\end{bmatrix},\nh = \\begin{bmatrix}\n m \\\n b \\\n\\end{bmatrix}\n$$\nTODO 证明:\n至此我们知道,通过求解方程 $X^TXh = X^TY$ 来找到最优参数。这个方程十分重要,他有一个名字叫做 Normal Equation,也有直观的几何意义。你可以在 子空间投影 和 投影矩阵与最小二乘 看到更多关于这个方程的内容。\n3.4 求解 $X^TXh = X^TY$\n在3.3 中,我们知道线性回归问题等价于求解 $X^TXh = X^TY$ (如果你选择不做3.3,就勇敢的相信吧,哈哈)", "# TODO 实现线性回归\n'''\n参数:X, Y 存储着一一对应的横坐标与纵坐标的两个一维数组\n返回:m,b 浮点数\n'''\ndef linearRegression(X,Y):\n MX = [[val,1] for val in X]\n MXT = transpose(MX)\n result_left = matxMultiply(MXT,MX)\n MY = [[val] for val in Y]\n result_right = matxMultiply(MXT,MY)\n [[m],[b]]=gj_Solve(result_left,result_right)\n return (m,b)\n\nm2,b2 = linearRegression(X,Y)\nassert isinstance(m2,float),\"m is not a float\"\nassert isinstance(b2,float),\"b is not a float\"\nprint(m2,b2)", "你求得的回归结果是什么?\n请使用运行以下代码将它画出来。", "# 请不要修改下面的代码\nx1,x2 = -5,5\ny1,y2 = x1*m2+b2, x2*m2+b2\n\nplt.xlim((-5,5))\nplt.xlabel('x',fontsize=18)\nplt.ylabel('y',fontsize=18)\nplt.scatter(X,Y,c='b')\nplt.plot((x1,x2),(y1,y2),'r')\nplt.title('y = {m:.4f}x + {b:.4f}'.format(m=m2,b=b2))\nplt.show()", "你求得的回归结果对当前数据集的MSE是多少?", "print(calculateMSE(X,Y,m2,b2))" ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
pybel/pybel
notebooks/Compiling a BEL Document.ipynb
mit
[ "Loading BEL Documents\nWe'll always start by importing pybel.", "import os\nfrom urllib.request import urlretrieve\n\nimport pybel\nimport logging\n\n\nlogging.getLogger('pybel').setLevel(logging.DEBUG)\nlogging.basicConfig(level=logging.DEBUG)\nlogging.getLogger('urllib3').setLevel(logging.WARNING)\n\nprint(pybel.get_version())\n\nDESKTOP_PATH = os.path.join(os.path.expanduser('~'), 'Desktop')\nmanager = pybel.Manager(f'sqlite:///{DESKTOP_PATH}/pybel_example_database.db')", "First, we'll download and parse a BEL document from the Human Brain Pharmacome project describing the 2018 paper from Boland et al., \"Promoting the clearance of neurotoxic proteins in neurodegenerative disorders of ageing\".", "url = 'https://raw.githubusercontent.com/pharmacome/conib/master/hbp_knowledge/tau/boland2018.bel'", "A BEL document can be downloaded and parsed from a URL using pybel.from_bel_script_url. Keep in mind, the first time we load a given BEL document, various BEL resources that are referenced in the document must be cached. Be patient - this can take up to ten minutes.", "boland_2018_graph = pybel.from_bel_script_url(url, manager=manager)\n\npybel.to_database(boland_2018_graph, manager=manager)", "The graph is loaded into an instance of the pybel.BELGraph class. We can use the pybel.BELGraph.summarize() to print a brief summary of the graph.", "boland_2018_graph.summarize()", "Next, we'll open and parse a BEL document from the Human Brain Pharmacome project describing the 2018 paper from Cabellero et al., \"Interplay of pathogenic forms of human tau with different autophagic pathways\". This example uses urlretrieve() to download the file locally to demonstrate how to load from a local file path.", "url = 'https://raw.githubusercontent.com/pharmacome/conib/master/hbp_knowledge/tau/caballero2018.bel'\npath = os.path.join(DESKTOP_PATH, 'caballero2018.bel')\n\nif not os.path.exists(path):\n urlretrieve(url, path)", "A BEL document can also be parsed from a path to a file using pybel.from_bel_script. Like before, we will summarize the graph after parsing it.", "cabellero_2018_graph = pybel.from_bel_script(path, manager=manager)\n\ncabellero_2018_graph.summarize()\n\npybel.to_database(cabellero_2018_graph, manager=manager)", "We can combine two or more graphs in a list using pybel.union.", "combined_graph = pybel.union([boland_2018_graph, cabellero_2018_graph])\n\ncombined_graph.summarize()", "Note that there are some overlapping nodes, but no overlapping edges." ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
anhaidgroup/py_entitymatching
notebooks/guides/step_wise_em_guides/Performing Blocking Using Blackbox Blocker.ipynb
bsd-3-clause
[ "Introduction\nThis IPython notebook illustrates how to perform blocking using rule-based blocker.\nFirst, we need to import py_entitymatching package and other libraries as follows:", "# Import py_entitymatching package\nimport py_entitymatching as em\nimport os\nimport pandas as pd", "Then, read the (sample) input tables for blocking purposes.", "# Get the datasets directory\ndatasets_dir = em.get_install_path() + os.sep + 'datasets'\n\n# Get the paths of the input tables\npath_A = datasets_dir + os.sep + 'person_table_A.csv'\npath_B = datasets_dir + os.sep + 'person_table_B.csv'\n\n# Read the CSV files and set 'ID' as the key attribute\nA = em.read_csv_metadata(path_A, key='ID')\nB = em.read_csv_metadata(path_B, key='ID')", "Different Ways to Block Using Blackbox Based Blocker\nThere are three different ways to do overlap blocking:\n\nBlock two tables to produce a candidate set of tuple pairs.\nBlock a candidate set of tuple pairs to typically produce a reduced candidate set of tuple pairs.\nBlock two tuples to check if a tuple pair would get blocked.\n\nBlock Tables to Produce a Candidate Set of Tuple Pairs\nFirst, define a blackbox function", "def address_address_function(x, y):\n # x, y will be of type pandas series\n \n # get name attribute\n x_address = x['address']\n y_address = y['address']\n # get the city\n x_split, y_split = x_address.split(','), y_address.split(',')\n x_city = x_split[len(x_split) - 1]\n y_city = y_split[len(y_split) - 1]\n # check if the cities match\n if x_city != y_city:\n return True\n else:\n return False\n\n# Instantiate blackbox blocker\nbb = em.BlackBoxBlocker()\n# Set the black box function\nbb.set_black_box_function(address_address_function)\n\nC = bb.block_tables(A, B, l_output_attrs=['name', 'address'], r_output_attrs=['name', 'address'])\n\nC", "Block Candidate Set\nFirst, define a blackbox function", "def name_name_function(x, y):\n # x, y will be of type pandas series\n \n # get name attribute\n x_name = x['name']\n y_name = y['name']\n # get last names\n x_name = x_name.split(' ')[1]\n y_name = y_name.split(' ')[1]\n # check if last names match\n if x_name != y_name:\n return True\n else:\n return False\n\n# Instantiate blackbox blocker\nbb = em.BlackBoxBlocker()\n# Set the black box function\nbb.set_black_box_function(name_name_function)\n\nD = bb.block_candset(C)\n\nD", "Block Two tuples To Check If a Tuple Pair Would Get Blocked\nFirst, define the black box function first", "def address_address_function(x, y):\n # x, y will be of type pandas series\n \n # get name attribute\n x_address = x['address']\n y_address = y['address']\n # get the city\n x_split, y_split = x_address.split(','), y_address.split(',')\n x_city = x_split[len(x_split) - 1]\n y_city = y_split[len(y_split) - 1]\n # check if the cities match\n if x_city != y_city:\n return True\n else:\n return False\n\n# Instantiate blackabox blocker\nbb = em.BlackBoxBlocker()\n# Set the blackbox function \nbb.set_black_box_function(address_address_function)\n\nA.ix[[0]]\n\nB.ix[[0]]\n\nstatus = bb.block_tuples(A.ix[0], B.ix[0])\n\nprint(status)" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
myuuuuun/NumericalCalculation
chapter2/Chapter2.ipynb
mit
[ "第2章 関数近似(補間)\n教科書第2章に載っているアルゴリズムを実装していきます。\n各種ライブラリのインポート・後で使う汎用関数を定義", "#!/usr/bin/python\n#-*- encoding: utf-8 -*-\n\"\"\"\nCopyright (c) 2015 @myuuuuun\nhttps://github.com/myuuuuun/NumericalCalculation\n\nThis software is released under the MIT License.\n\"\"\"\n%matplotlib inline\nfrom __future__ import division, print_function\nimport math\nimport numpy as np\nimport functools\nimport sys\nimport types\nimport matplotlib.pyplot as plt\nEPSIRON = 1.0e-8\n\n\n\"\"\"\n係数行列[a_0, a_1, ..., a_n] から、n次多項式 a_0 + a_1 * x + ... + a_n * x^n\nを生成して返す(関数を返す)\n\"\"\"\ndef make_polynomial(a_matrix):\n def __func__(x):\n f = 0\n for n, a_i in enumerate(a_matrix):\n f += a_i * pow(x, n)\n\n return f\n\n return __func__\n\n\n\"\"\"\nグラフを描画し、その上に元々与えられていた点列を重ねてプロットする\n\nINPUT:\npoints: 与えられた点列のリスト[[x_0, f_0], [x_1, f_1], ..., [x_n, f_n]]\nx_list: 近似曲線を描写するxの範囲・密度\nf_list: 上のxに対応するfの値\n\"\"\"\ndef points_on_func(points, x_list, f_list, **kwargs):\n title = kwargs.get('title', \"Given Points and Interpolation Curve\")\n xlim = kwargs.get('xlim', False)\n ylim = kwargs.get('ylim', False)\n \n fig, ax = plt.subplots()\n plt.title(title)\n\n plt.plot(x_list, f_list, color='b', linewidth=1, label=\"Interpolation Curve\")\n\n points_x = [point[0] for point in points]\n points_y = [point[1] for point in points]\n plt.plot(points_x, points_y, 'o', color='r', label=\"Given Points\")\n \n plt.xlabel(\"x\")\n plt.ylabel(\"f\")\n if xlim:\n ax.set_xlim(xlim)\n \n if ylim:\n ax.set_ylim(ylim)\n plt.legend()\n plt.show()", "式(2.5)の実装\nn+1個の点列を入力し、逆行列を解いて、補間多項式を求め、n次補間多項式の係数行列[a_0, a_1, ..., a_n]を返す\nINPUT\npoints: n+1個の点列[[x_0, f_0], [x_1, f_1], ..., [x_n, f_n]]\nOUTPUT\nn次補間多項式の係数行列[a_0, a_1, ..., a_n]を返す", "def lagrange(points):\n # 次元数\n dim = len(points) - 1\n\n # matrix Xをもとめる(ヴァンデルモンドの行列式)\n x_matrix = np.array([[pow(point[0], j) for j in range(dim + 1)] for point in points])\n\n # matrix Fをもとめる\n f_matrix = np.array([point[1] for point in points])\n \n # 線形方程式 X * A = F を解く\n a_matrix = np.linalg.solve(x_matrix, f_matrix)\n\n return a_matrix\n\n# lagrange()で求めた補間多項式と、元の点列をプロットしてみる\n# 与えられた点列のリスト\npoints = [[1, 1], [2, 2], [3, 1], [4, 1], [5, 3]]\n\n# ラグランジュの補間多項式の係数行列を求める\na_matrix = lagrange(points)\n\n# 係数行列を多項式に変換\nfunc_lagrange = make_polynomial(a_matrix)\n\n# 0から8まで、0.1刻みでxとfの値のセットを求める\nx_list = np.arange(0, 8, 0.1)\nf_list = func_lagrange(x_list)\n\n# プロットする\npoints_on_func(points, x_list, f_list)", "式(2.7)の実装\n補間多項式を変形した式から、逆行列の計算をすることなく、ラグランジュの補間多項式を求める\nただし、今回は補間多項式の係数行列を返すのではなく、具体的なxの値のリストに対して、補間値のリストを生成して返す\nINPUT\npoints: 与えられた点列を入力\nx_list: 補間値を求めたいxのリストを入力\nOUTPUT\nf_list: x_listの各要素に対する補間値のリスト", "def lagrange2(points, x_list=np.arange(-5, 5, 0.1)):\n dim = len(points) - 1\n\n f_list = []\n for x in x_list:\n L = 0\n for i in range(dim + 1):\n Li = 1\n for j in range(dim + 1):\n if j != i:\n Li *= (x - points[j][0]) / (points[i][0] - points[j][0])\n\n Li *= points[i][1]\n L += Li\n\n f_list.append(L)\n\n return f_list\n\npoints = [[1, 1], [2, 2], [3, 1], [4, 1], [5, 3]]\na_matrix = lagrange2(points, np.arange(0, 8, 0.1))\npoints_on_func(points, np.arange(0, 8, 0.1), a_matrix)" ]
[ "markdown", "code", "markdown", "code", "markdown", "code" ]
csaladenes/csaladenes.github.io
present/mcc2/PythonDataScienceHandbook/05.13-Kernel-Density-Estimation.ipynb
mit
[ "<!--BOOK_INFORMATION-->\n<img align=\"left\" style=\"padding-right:10px;\" src=\"figures/PDSH-cover-small.png\">\nThis notebook contains an excerpt from the Python Data Science Handbook by Jake VanderPlas; the content is available on GitHub.\nThe text is released under the CC-BY-NC-ND license, and code is released under the MIT license. If you find this content useful, please consider supporting the work by buying the book!\n<!--NAVIGATION-->\n< In Depth: Gaussian Mixture Models | Contents | Application: A Face Detection Pipeline >\n<a href=\"https://colab.research.google.com/github/jakevdp/PythonDataScienceHandbook/blob/master/notebooks/05.13-Kernel-Density-Estimation.ipynb\"><img align=\"left\" src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open in Colab\" title=\"Open and Execute in Google Colaboratory\"></a>\nIn-Depth: Kernel Density Estimation\nIn the previous section we covered Gaussian mixture models (GMM), which are a kind of hybrid between a clustering estimator and a density estimator.\nRecall that a density estimator is an algorithm which takes a $D$-dimensional dataset and produces an estimate of the $D$-dimensional probability distribution which that data is drawn from.\nThe GMM algorithm accomplishes this by representing the density as a weighted sum of Gaussian distributions.\nKernel density estimation (KDE) is in some senses an algorithm which takes the mixture-of-Gaussians idea to its logical extreme: it uses a mixture consisting of one Gaussian component per point, resulting in an essentially non-parametric estimator of density.\nIn this section, we will explore the motivation and uses of KDE.\nWe begin with the standard imports:", "%matplotlib inline\nimport matplotlib.pyplot as plt\nimport seaborn as sns; sns.set()\nimport numpy as np", "Motivating KDE: Histograms\nAs already discussed, a density estimator is an algorithm which seeks to model the probability distribution that generated a dataset.\nFor one dimensional data, you are probably already familiar with one simple density estimator: the histogram.\nA histogram divides the data into discrete bins, counts the number of points that fall in each bin, and then visualizes the results in an intuitive manner.\nFor example, let's create some data that is drawn from two normal distributions:", "def make_data(N, f=0.3, rseed=1):\n rand = np.random.RandomState(rseed)\n x = rand.randn(N)\n x[int(f * N):] += 5\n return x\n\nx = make_data(1000)", "We have previously seen that the standard count-based histogram can be created with the plt.hist() function.\nBy specifying the normed parameter of the histogram, we end up with a normalized histogram where the height of the bins does not reflect counts, but instead reflects probability density:", "hist = plt.hist(x, bins=30, density=True)", "Notice that for equal binning, this normalization simply changes the scale on the y-axis, leaving the relative heights essentially the same as in a histogram built from counts.\nThis normalization is chosen so that the total area under the histogram is equal to 1, as we can confirm by looking at the output of the histogram function:", "density, bins, patches = hist\nwidths = bins[1:] - bins[:-1]\n(density * widths).sum()", "One of the issues with using a histogram as a density estimator is that the choice of bin size and location can lead to representations that have qualitatively different features.\nFor example, if we look at a version of this data with only 20 points, the choice of how to draw the bins can lead to an entirely different interpretation of the data!\nConsider this example:", "x = make_data(20)\nbins = np.linspace(-5, 10, 10)\n\nfig, ax = plt.subplots(1, 2, figsize=(12, 4),\n sharex=True, sharey=True,\n subplot_kw={'xlim':(-4, 9),\n 'ylim':(-0.02, 0.3)})\nfig.subplots_adjust(wspace=0.05)\nfor i, offset in enumerate([0.0, 0.6]):\n ax[i].hist(x, bins=bins + offset, density=True)\n ax[i].plot(x, np.full_like(x, -0.01), '|k',\n markeredgewidth=1)", "On the left, the histogram makes clear that this is a bimodal distribution.\nOn the right, we see a unimodal distribution with a long tail.\nWithout seeing the preceding code, you would probably not guess that these two histograms were built from the same data: with that in mind, how can you trust the intuition that histograms confer?\nAnd how might we improve on this?\nStepping back, we can think of a histogram as a stack of blocks, where we stack one block within each bin on top of each point in the dataset.\nLet's view this directly:", "fig, ax = plt.subplots()\nbins = np.arange(-3, 8)\nax.plot(x, np.full_like(x, -0.1), '|k',\n markeredgewidth=1)\nfor count, edge in zip(*np.histogram(x, bins)):\n for i in range(count):\n ax.add_patch(plt.Rectangle((edge, i), 1, 1,\n alpha=0.5))\nax.set_xlim(-4, 8)\nax.set_ylim(-0.2, 8)", "The problem with our two binnings stems from the fact that the height of the block stack often reflects not on the actual density of points nearby, but on coincidences of how the bins align with the data points.\nThis mis-alignment between points and their blocks is a potential cause of the poor histogram results seen here.\nBut what if, instead of stacking the blocks aligned with the bins, we were to stack the blocks aligned with the points they represent?\nIf we do this, the blocks won't be aligned, but we can add their contributions at each location along the x-axis to find the result.\nLet's try this:", "x_d = np.linspace(-4, 8, 2000)\ndensity = sum((abs(xi - x_d) < 0.5) for xi in x)\n\nplt.fill_between(x_d, density, alpha=0.5)\nplt.plot(x, np.full_like(x, -0.1), '|k', markeredgewidth=1)\n\nplt.axis([-4, 8, -0.2, 8]);", "The result looks a bit messy, but is a much more robust reflection of the actual data characteristics than is the standard histogram.\nStill, the rough edges are not aesthetically pleasing, nor are they reflective of any true properties of the data.\nIn order to smooth them out, we might decide to replace the blocks at each location with a smooth function, like a Gaussian.\nLet's use a standard normal curve at each point instead of a block:", "from scipy.stats import norm\nx_d = np.linspace(-4, 8, 1000)\ndensity = sum(norm(xi).pdf(x_d) for xi in x)\n\nplt.fill_between(x_d, density, alpha=0.5)\nplt.plot(x, np.full_like(x, -0.1), '|k', markeredgewidth=1)\n\nplt.axis([-4, 8, -0.2, 5]);", "This smoothed-out plot, with a Gaussian distribution contributed at the location of each input point, gives a much more accurate idea of the shape of the data distribution, and one which has much less variance (i.e., changes much less in response to differences in sampling).\nThese last two plots are examples of kernel density estimation in one dimension: the first uses a so-called \"tophat\" kernel and the second uses a Gaussian kernel.\nWe'll now look at kernel density estimation in more detail.\nKernel Density Estimation in Practice\nThe free parameters of kernel density estimation are the kernel, which specifies the shape of the distribution placed at each point, and the kernel bandwidth, which controls the size of the kernel at each point.\nIn practice, there are many kernels you might use for a kernel density estimation: in particular, the Scikit-Learn KDE implementation supports one of six kernels, which you can read about in Scikit-Learn's Density Estimation documentation.\nWhile there are several versions of kernel density estimation implemented in Python (notably in the SciPy and StatsModels packages), I prefer to use Scikit-Learn's version because of its efficiency and flexibility.\nIt is implemented in the sklearn.neighbors.KernelDensity estimator, which handles KDE in multiple dimensions with one of six kernels and one of a couple dozen distance metrics.\nBecause KDE can be fairly computationally intensive, the Scikit-Learn estimator uses a tree-based algorithm under the hood and can trade off computation time for accuracy using the atol (absolute tolerance) and rtol (relative tolerance) parameters.\nThe kernel bandwidth, which is a free parameter, can be determined using Scikit-Learn's standard cross validation tools as we will soon see.\nLet's first show a simple example of replicating the above plot using the Scikit-Learn KernelDensity estimator:", "from sklearn.neighbors import KernelDensity\n\n# instantiate and fit the KDE model\nkde = KernelDensity(bandwidth=1.0, kernel='gaussian')\nkde.fit(x[:, None])\n\n# score_samples returns the log of the probability density\nlogprob = kde.score_samples(x_d[:, None])\n\nplt.fill_between(x_d, np.exp(logprob), alpha=0.5)\nplt.plot(x, np.full_like(x, -0.01), '|k', markeredgewidth=1)\nplt.ylim(-0.02, 0.22)", "The result here is normalized such that the area under the curve is equal to 1.\nSelecting the bandwidth via cross-validation\nThe choice of bandwidth within KDE is extremely important to finding a suitable density estimate, and is the knob that controls the bias–variance trade-off in the estimate of density: too narrow a bandwidth leads to a high-variance estimate (i.e., over-fitting), where the presence or absence of a single point makes a large difference. Too wide a bandwidth leads to a high-bias estimate (i.e., under-fitting) where the structure in the data is washed out by the wide kernel.\nThere is a long history in statistics of methods to quickly estimate the best bandwidth based on rather stringent assumptions about the data: if you look up the KDE implementations in the SciPy and StatsModels packages, for example, you will see implementations based on some of these rules.\nIn machine learning contexts, we've seen that such hyperparameter tuning often is done empirically via a cross-validation approach.\nWith this in mind, the KernelDensity estimator in Scikit-Learn is designed such that it can be used directly within the Scikit-Learn's standard grid search tools.\nHere we will use GridSearchCV to optimize the bandwidth for the preceding dataset.\nBecause we are looking at such a small dataset, we will use leave-one-out cross-validation, which minimizes the reduction in training set size for each cross-validation trial:", "from sklearn.model_selection import GridSearchCV\nfrom sklearn.model_selection import LeaveOneOut\n\nbandwidths = 10 ** np.linspace(-1, 1, 100)\ngrid = GridSearchCV(KernelDensity(kernel='gaussian'),\n {'bandwidth': bandwidths},\n cv=LeaveOneOut())\ngrid.fit(x[:, None]);", "Now we can find the choice of bandwidth which maximizes the score (which in this case defaults to the log-likelihood):", "grid.best_params_", "The optimal bandwidth happens to be very close to what we used in the example plot earlier, where the bandwidth was 1.0 (i.e., the default width of scipy.stats.norm).\nExample: KDE on a Sphere\nPerhaps the most common use of KDE is in graphically representing distributions of points.\nFor example, in the Seaborn visualization library (see Visualization With Seaborn), KDE is built in and automatically used to help visualize points in one and two dimensions.\nHere we will look at a slightly more sophisticated use of KDE for visualization of distributions.\nWe will make use of some geographic data that can be loaded with Scikit-Learn: the geographic distributions of recorded observations of two South American mammals, Bradypus variegatus (the Brown-throated Sloth) and Microryzomys minutus (the Forest Small Rice Rat).\nWith Scikit-Learn, we can fetch this data as follows:", "from sklearn.datasets import fetch_species_distributions\n\n# this step might fail based on permssions and network access\n# if in Docker, specify --network=host\n# if in docker-compose specify version 3.4 and build -> network: host\ndata = fetch_species_distributions()\n\n# Get matrices/arrays of species IDs and locations\nlatlon = np.vstack([data.train['dd lat'],\n data.train['dd long']]).T\nspecies = np.array([d.decode('ascii').startswith('micro')\n for d in data.train['species']], dtype='int')", "With this data loaded, we can use the Basemap toolkit (mentioned previously in Geographic Data with Basemap) to plot the observed locations of these two species on the map of South America.", "# !conda install -c conda-forge basemap-data-hires -y\n\n# RESTART KERNEL\n\n#Hack to fix missing PROJ4 env var\nimport os\nimport conda\n\nconda_file_dir = conda.__file__\nconda_dir = conda_file_dir.split('lib')[0]\nproj_lib = os.path.join(os.path.join(conda_dir, 'share'), 'proj')\nos.environ[\"PROJ_LIB\"] = proj_lib\n\nfrom mpl_toolkits.basemap import Basemap\nfrom sklearn.datasets.species_distributions import construct_grids\n\nxgrid, ygrid = construct_grids(data)\n\n# plot coastlines with basemap\nm = Basemap(projection='cyl', resolution='c',\n llcrnrlat=ygrid.min(), urcrnrlat=ygrid.max(),\n llcrnrlon=xgrid.min(), urcrnrlon=xgrid.max())\nm.drawmapboundary(fill_color='#DDEEFF')\nm.fillcontinents(color='#FFEEDD')\nm.drawcoastlines(color='gray', zorder=2)\nm.drawcountries(color='gray', zorder=2)\n\n# plot locations\nm.scatter(latlon[:, 1], latlon[:, 0], zorder=3,\n c=species, cmap='rainbow', latlon=True);", "Unfortunately, this doesn't give a very good idea of the density of the species, because points in the species range may overlap one another.\nYou may not realize it by looking at this plot, but there are over 1,600 points shown here!\nLet's use kernel density estimation to show this distribution in a more interpretable way: as a smooth indication of density on the map.\nBecause the coordinate system here lies on a spherical surface rather than a flat plane, we will use the haversine distance metric, which will correctly represent distances on a curved surface.\nThere is a bit of boilerplate code here (one of the disadvantages of the Basemap toolkit) but the meaning of each code block should be clear:", "# Set up the data grid for the contour plot\nX, Y = np.meshgrid(xgrid[::5], ygrid[::5][::-1])\nland_reference = data.coverages[6][::5, ::5]\nland_mask = (land_reference > -9999).ravel()\nxy = np.vstack([Y.ravel(), X.ravel()]).T\nxy = np.radians(xy[land_mask])\n\n# Create two side-by-side plots\nfig, ax = plt.subplots(1, 2)\nfig.subplots_adjust(left=0.05, right=0.95, wspace=0.05)\nspecies_names = ['Bradypus Variegatus', 'Microryzomys Minutus']\ncmaps = ['Purples', 'Reds']\n\nfor i, axi in enumerate(ax):\n axi.set_title(species_names[i])\n \n # plot coastlines with basemap\n m = Basemap(projection='cyl', llcrnrlat=Y.min(),\n urcrnrlat=Y.max(), llcrnrlon=X.min(),\n urcrnrlon=X.max(), resolution='c', ax=axi)\n m.drawmapboundary(fill_color='#DDEEFF')\n m.drawcoastlines()\n m.drawcountries()\n \n # construct a spherical kernel density estimate of the distribution\n kde = KernelDensity(bandwidth=0.03, metric='haversine')\n kde.fit(np.radians(latlon[species == i]))\n\n # evaluate only on the land: -9999 indicates ocean\n Z = np.full(land_mask.shape[0], -9999.0)\n Z[land_mask] = np.exp(kde.score_samples(xy))\n Z = Z.reshape(X.shape)\n\n # plot contours of the density\n levels = np.linspace(0, Z.max(), 25)\n axi.contourf(X, Y, Z, levels=levels, cmap=cmaps[i])", "Compared to the simple scatter plot we initially used, this visualization paints a much clearer picture of the geographical distribution of observations of these two species.\nExample: Not-So-Naive Bayes\nThis example looks at Bayesian generative classification with KDE, and demonstrates how to use the Scikit-Learn architecture to create a custom estimator.\nIn In Depth: Naive Bayes Classification, we took a look at naive Bayesian classification, in which we created a simple generative model for each class, and used these models to build a fast classifier.\nFor Gaussian naive Bayes, the generative model is a simple axis-aligned Gaussian.\nWith a density estimation algorithm like KDE, we can remove the \"naive\" element and perform the same classification with a more sophisticated generative model for each class.\nIt's still Bayesian classification, but it's no longer naive.\nThe general approach for generative classification is this:\n\n\nSplit the training data by label.\n\n\nFor each set, fit a KDE to obtain a generative model of the data.\n This allows you for any observation $x$ and label $y$ to compute a likelihood $P(x~|~y)$.\n\n\nFrom the number of examples of each class in the training set, compute the class prior, $P(y)$.\n\n\nFor an unknown point $x$, the posterior probability for each class is $P(y~|~x) \\propto P(x~|~y)P(y)$.\n The class which maximizes this posterior is the label assigned to the point.\n\n\nThe algorithm is straightforward and intuitive to understand; the more difficult piece is couching it within the Scikit-Learn framework in order to make use of the grid search and cross-validation architecture.\nThis is the code that implements the algorithm within the Scikit-Learn framework; we will step through it following the code block:", "from sklearn.base import BaseEstimator, ClassifierMixin\n\n\nclass KDEClassifier(BaseEstimator, ClassifierMixin):\n \"\"\"Bayesian generative classification based on KDE\n \n Parameters\n ----------\n bandwidth : float\n the kernel bandwidth within each class\n kernel : str\n the kernel name, passed to KernelDensity\n \"\"\"\n def __init__(self, bandwidth=1.0, kernel='gaussian'):\n self.bandwidth = bandwidth\n self.kernel = kernel\n \n def fit(self, X, y):\n self.classes_ = np.sort(np.unique(y))\n training_sets = [X[y == yi] for yi in self.classes_]\n self.models_ = [KernelDensity(bandwidth=self.bandwidth,\n kernel=self.kernel).fit(Xi)\n for Xi in training_sets]\n self.logpriors_ = [np.log(Xi.shape[0] / X.shape[0])\n for Xi in training_sets]\n return self\n \n def predict_proba(self, X):\n logprobs = np.array([model.score_samples(X)\n for model in self.models_]).T\n result = np.exp(logprobs + self.logpriors_)\n return result / result.sum(1, keepdims=True)\n \n def predict(self, X):\n return self.classes_[np.argmax(self.predict_proba(X), 1)]", "The anatomy of a custom estimator\nLet's step through this code and discuss the essential features:\n```python\nfrom sklearn.base import BaseEstimator, ClassifierMixin\nclass KDEClassifier(BaseEstimator, ClassifierMixin):\n \"\"\"Bayesian generative classification based on KDE\nParameters\n----------\nbandwidth : float\n the kernel bandwidth within each class\nkernel : str\n the kernel name, passed to KernelDensity\n\"\"\"\n\n```\nEach estimator in Scikit-Learn is a class, and it is most convenient for this class to inherit from the BaseEstimator class as well as the appropriate mixin, which provides standard functionality.\nFor example, among other things, here the BaseEstimator contains the logic necessary to clone/copy an estimator for use in a cross-validation procedure, and ClassifierMixin defines a default score() method used by such routines.\nWe also provide a doc string, which will be captured by IPython's help functionality (see Help and Documentation in IPython).\nNext comes the class initialization method:\npython\n def __init__(self, bandwidth=1.0, kernel='gaussian'):\n self.bandwidth = bandwidth\n self.kernel = kernel\nThis is the actual code that is executed when the object is instantiated with KDEClassifier().\nIn Scikit-Learn, it is important that initialization contains no operations other than assigning the passed values by name to self.\nThis is due to the logic contained in BaseEstimator required for cloning and modifying estimators for cross-validation, grid search, and other functions.\nSimilarly, all arguments to __init__ should be explicit: i.e. *args or **kwargs should be avoided, as they will not be correctly handled within cross-validation routines.\nNext comes the fit() method, where we handle training data:\npython \n def fit(self, X, y):\n self.classes_ = np.sort(np.unique(y))\n training_sets = [X[y == yi] for yi in self.classes_]\n self.models_ = [KernelDensity(bandwidth=self.bandwidth,\n kernel=self.kernel).fit(Xi)\n for Xi in training_sets]\n self.logpriors_ = [np.log(Xi.shape[0] / X.shape[0])\n for Xi in training_sets]\n return self\nHere we find the unique classes in the training data, train a KernelDensity model for each class, and compute the class priors based on the number of input samples.\nFinally, fit() should always return self so that we can chain commands. For example:\npython\nlabel = model.fit(X, y).predict(X)\nNotice that each persistent result of the fit is stored with a trailing underscore (e.g., self.logpriors_).\nThis is a convention used in Scikit-Learn so that you can quickly scan the members of an estimator (using IPython's tab completion) and see exactly which members are fit to training data.\nFinally, we have the logic for predicting labels on new data:\n```python\n def predict_proba(self, X):\n logprobs = np.vstack([model.score_samples(X)\n for model in self.models_]).T\n result = np.exp(logprobs + self.logpriors_)\n return result / result.sum(1, keepdims=True)\ndef predict(self, X):\n return self.classes_[np.argmax(self.predict_proba(X), 1)]\n\n`\nBecause this is a probabilistic classifier, we first implementpredict_proba()which returns an array of class probabilities of shape[n_samples, n_classes].\nEntry[i, j]of this array is the posterior probability that sampleiis a member of classj``, computed by multiplying the likelihood by the class prior and normalizing.\nFinally, the predict() method uses these probabilities and simply returns the class with the largest probability.\nUsing our custom estimator\nLet's try this custom estimator on a problem we have seen before: the classification of hand-written digits.\nHere we will load the digits, and compute the cross-validation score for a range of candidate bandwidths using the GridSearchCV meta-estimator (refer back to Hyperparameters and Model Validation):", "from sklearn.datasets import load_digits\nfrom sklearn.model_selection import GridSearchCV\n\ndigits = load_digits()\n\nbandwidths = 10 ** np.linspace(0, 2, 100)\ngrid = GridSearchCV(KDEClassifier(), {'bandwidth': bandwidths})\ngrid.fit(digits.data, digits.target)\n\n# scores = [val.mean_validation_score for val in grid.grid_scores_]\nscores = grid.cv_results_['mean_test_score']", "Next we can plot the cross-validation score as a function of bandwidth:", "plt.semilogx(bandwidths, scores)\nplt.xlabel('bandwidth')\nplt.ylabel('accuracy')\nplt.title('KDE Model Performance')\nprint(grid.best_params_)\nprint('accuracy =', grid.best_score_)", "We see that this not-so-naive Bayesian classifier reaches a cross-validation accuracy of just over 96%; this is compared to around 80% for the naive Bayesian classification:", "from sklearn.naive_bayes import GaussianNB\nfrom sklearn.model_selection import cross_val_score\ncross_val_score(GaussianNB(), digits.data, digits.target).mean()", "One benefit of such a generative classifier is interpretability of results: for each unknown sample, we not only get a probabilistic classification, but a full model of the distribution of points we are comparing it to!\nIf desired, this offers an intuitive window into the reasons for a particular classification that algorithms like SVMs and random forests tend to obscure.\nIf you would like to take this further, there are some improvements that could be made to our KDE classifier model:\n\nwe could allow the bandwidth in each class to vary independently\nwe could optimize these bandwidths not based on their prediction score, but on the likelihood of the training data under the generative model within each class (i.e. use the scores from KernelDensity itself rather than the global prediction accuracy)\n\nFinally, if you want some practice building your own estimator, you might tackle building a similar Bayesian classifier using Gaussian Mixture Models instead of KDE.\n<!--NAVIGATION-->\n< In Depth: Gaussian Mixture Models | Contents | Application: A Face Detection Pipeline >\n<a href=\"https://colab.research.google.com/github/jakevdp/PythonDataScienceHandbook/blob/master/notebooks/05.13-Kernel-Density-Estimation.ipynb\"><img align=\"left\" src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open in Colab\" title=\"Open and Execute in Google Colaboratory\"></a>" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
mortada/notebooks
blog/unbiased_variance_estimator.ipynb
apache-2.0
[ "Variance Estimation\nIn statistics we know that the mean and variance of a population $Y$ are defined to be:\n\\begin{equation}\n\\left{\n\\begin{aligned}\n \\text{Mean}(Y) &= \\mu = \\frac{1}{N} \\sum_{i=1}^{N} Y_i \\\n \\text{Var}(Y) &= \\sigma^2 = \\frac{1}{N} \\sum_{i=1}^{N} (Y_i - \\mu)^2 \\\n\\end{aligned}\n\\right.\n\\end{equation}\nwhere $N$ is the size of the population.\n<!-- PELICAN_END_SUMMARY -->\n\nGiven the population $Y$, we can draw a sample $X$ and compute statistics for $X$:\n\\begin{equation}\n\\left{\n\\begin{aligned}\n \\text{Mean}(X) &= \\bar{X} = \\frac{1}{n} \\sum_{j=1}^{n} X_j \\\n \\text{Var}(X) &= s^2 = \\frac{1}{n - 1} \\sum_{j=1}^{n} (X_j - \\bar{X})^2 \\\n\\end{aligned}\n\\right.\n\\end{equation}\nwhere lowercase $n$ is the size of the sample, typically a much smaller number than $N$. One detail that is often not clearly explained in introductory statistics is why we should divide by $n - 1$ instead of $n$ in the calculation for the sample variance.\nWhy divide by n - 1?\nIt turns out that we should divide by $n - 1$ because dividing by $n$ would give us a biased estimator of the population variance. Let's look at a concrete example before diving into the math for why. Let's say we have a population of 100,000 data points. These can represent, for instance, a movie rating for each of 100,000 people.", "%matplotlib inline\nimport matplotlib.pyplot as plt\nfrom IPython.core.pylabtools import figsize\nfigsize(15, 5)\n\nimport pandas as pd\nimport numpy as np\n\nnp.random.seed(42)\n\nN = 100000 # size of population\n\npopulation = pd.Series(np.random.randint(1, 11, N))", "We can easily calculate the population mean and population variance:", "population.mean()\n\n((population - population.mean()) ** 2).sum() / N", "Note that we are dividing by $N$ in the variance calculation, also that in numpy or pandas this is the same as simply using the method var with ddof=0", "population.var(ddof=0)", "where ddof=0 means to divide by $N$, and ddof=1 means to divide by $N - 1$.\nSimulation\nAs usual in statistics, the population parameters are often unknown. But we can estimate them by drawing samples from the population. Here we are drawing a random sample of size $30$. As of version 0.16.1, pandas has a convenient Series.sample() function for this:", "samples = {}\nn = 30 # size of each sample\nnum_samples = 500 # we are drawing 500 samples, each with size n\nfor i in range(num_samples):\n samples[i] = population.sample(n).reset_index(drop=True)\n\nsamples = pd.DataFrame(samples)\nsamples.T.tail()", "As we expect, if we average all the sample means we can see that the it is a good estimate for the true population mean:", "df = pd.DataFrame({'estimated mean': pd.expanding_mean(samples.mean()),\n 'actual population mean': pd.Series(population.mean(), index=samples.columns)})\ndf.plot(ylim=(4.5, 6.5))", "Now let's compare the results we would get by using the biased estimator (dividing by $n$) and the unbiased estimator (dividing by $n-1$)", "df = pd.DataFrame({'biased var estimate (divide by n)': pd.expanding_mean(samples.var(ddof=0)),\n 'unbiased var estimate (divide by n - 1)': pd.expanding_mean(samples.var(ddof=1)),\n 'actual population var': pd.Series(population.var(ddof=0), index=samples.columns)})\ndf.plot(ylim=(6.5, 10.5))", "We can see that the biased estimator (dividing by $n$) is clearly not estimating the true population variance as accurately as the unbiased estimator (dividing by $n-1$). \nMathematical Proof\nTo prove that dividing by $n - 1$ is an unbiased estimator, we need to show that expected value of the estimaor is indeed $\\sigma^2$:\n\\begin{equation}\n E(s^2) = E\\left(\\frac{1}{n - 1} \\sum_{j=1}^{n} (X_j - \\bar{X})^2\\right) = \\sigma^2\n\\end{equation}\nFirst we'll need to recall a few basic properties of expectation and variance:\n\\begin{equation}\n\\left{\n\\begin{aligned}\n & E(Z_1 + Z_2) = E(Z_1) + E(Z_2), \\text{ for any } Z_1, Z_2 \\\n & \\text{Var}(a Z) = a^2 \\text{Var}(Z), \\text{ for any } Z \\\n & \\text{Var}(Z_1 + Z_2) = \\text{Var}(Z_1) + \\text{Var}(Z_2), \\text{ if } Z_1 \\text{ and } Z_2 \\text{ are independent} \\\n\\end{aligned}\n\\right.\n\\end{equation}\nAlso, the following is a useful form for variance:\n\\begin{equation}\n \\text{Var}(Z) = E((Z - E(Z))^2) = E(Z^2 - 2ZE(Z) + E(Z)^2) = E(Z^2) - E(Z)^2\n\\end{equation}\nThis is equivalent to\n\\begin{equation}\n E(Z^2) = \\text{Var}(Z) + E(Z)^2\n\\end{equation}\nUsing the above properties we can now simplify the expression for $E(s^2)$:\n\\begin{aligned}\n E(s^2) = E\\left(\\frac{1}{n - 1} \\sum_{j=1}^{n} (X_j - \\bar{X})^2\\right) = & \\frac{1}{n - 1} E \\left( \\sum_{j=1}^{n} (X_j^2 - 2X_j\\bar{X} + \\bar{X}^2) \\right) \\\n = & \\ \\frac{1}{n - 1} E \\left( \\sum_{j=1}^{n} X_j^2 - 2n\\bar{X}^2 + n\\bar{X}^2 \\right) \\\n = & \\ \\frac{1}{n - 1} E \\left( \\sum_{j=1}^{n} X_j^2 - n\\bar{X}^2 \\right) \\\n = & \\ \\frac{1}{n - 1} \\left[ E \\left( \\sum_{j=1}^{n} X_j^2 \\right) - E \\left( n\\bar{X}^2 \\right) \\right] \\\n = & \\ \\frac{1}{n - 1} \\left[ \\sum_{j=1}^{n} E \\left( X_j^2 \\right) - n E \\left( \\bar{X}^2 \\right) \\right] \\\n\\end{aligned}\nNow notice that the first term can be simplied as:\n\\begin{aligned}\n \\sum_{j=1}^{n} E \\left( X_j^2 \\right) = & \\sum_{j=1}^{n} \\left( Var(X_j) + E(X_j)^2 \\right) \\\n = & \\sum_{j=1}^{n} \\left( \\sigma^2 + \\mu ^2 \\right) \\\n = & \\ n \\sigma^2 + n \\mu ^2 \\\n\\end{aligned}\nUsing the same trick, the second term becomes:\n\\begin{aligned}\n E(\\bar{X}^2) = & \\ Var(\\bar{X}) + E(\\bar{X})^2 \\\n = & Var(\\frac{1}{n} \\sum_{j=1}^{n} X_j) + \\mu ^2 \\\n = & \\frac{1}{n^2} Var(\\sum_{j=1}^{n} X_j) + \\mu ^2 \\\n = & \\frac{1}{n^2} \\sum_{j=1}^{n} Var(X_j) + \\mu ^2, \\text{ because all } X_j\\text{'s are independent} \\\n = & \\frac{1}{n^2} n\\sigma^2 + \\mu ^2 \\\n = & \\frac{1}{n} \\sigma^2 + \\mu ^2 \\\n\\end{aligned} \nPlugging the two terms back we finally get:\n\\begin{aligned}\n E(s^2) = & \\ \\frac{1}{n-1} \\left[ \\sum_{j=1}^{n} E \\left( X_j^2 \\right) - n E \\left(\\bar{X}^2 \\right) \\right] \\\n = & \\ \\frac{1}{n-1} \\left[n \\sigma^2 + n \\mu ^2 - n \\left( \\frac{1}{n} \\sigma^2 + \\mu ^2 \\right) \\right] \\\n = & \\ \\frac{1}{n-1} \\left[n \\sigma^2 + n \\mu ^2 - \\sigma^2 - n \\mu ^2 \\right] \\\n = & \\ \\sigma^2 \\\n\\end{aligned}\nDividing by $n-1$ gives us an unbiased estimate for the population variance indeed!\nSource of Bias\nOne intuitive way to think about why the bias exists is to notice that we generally don't actually know the true population mean $\\mu$, and therefore the sample variance is being computed using the estimated mean $\\bar{X}$. However the quadratic form $\\sum_{j=1}^{n} (X_j - a)^2$ is actually minimized by $a = \\bar{X}$, which means that whatever the true population mean $\\mu$ is, we will always have\n\\begin{equation}\n \\sum_{j=1}^{n} (X_j - \\mu)^2 \\geq \\sum_{j=1}^{n} (X_j - \\bar{X})^2\n\\end{equation}\nTherefore we are underestimating the true variance because we don't know the true mean.\nIn fact, we can see that we are underestimating by exactly $\\sigma^2$ on average:\n\\begin{aligned}\n E\\left(\\sum_{j=1}^{n} (X_j - \\mu)^2\\right) = & \\ E \\left(\\sum_{j=1}^{n} (X_j - \\bar{X} + \\bar{X} - \\mu)^2\\right) \\\n = & \\ E \\left(\\sum_{j=1}^{n} (X_j - \\bar{X})^2 + \\sum_{j=1}^{n} 2(X_j - \\bar{X})(\\bar{X} - \\mu) + \\sum_{j=1}^{n} (\\bar{X} - \\mu)^2 \\right) \\\n = & \\ E \\left(\\sum_{j=1}^{n} (X_j - \\bar{X})^2 + \\sum_{j=1}^{n} (\\bar{X} - \\mu)^2 \\right) \\\n = & \\ E \\left(\\sum_{j=1}^{n} (X_j - \\bar{X})^2 \\right) + E \\left(\\sum_{j=1}^{n} (\\bar{X} - \\mu)^2 \\right) \\\n = & \\ E \\left(\\sum_{j=1}^{n} (X_j - \\bar{X})^2 \\right) + \\sum_{j=1}^{n} E \\left((\\bar{X} - \\mu)^2 \\right) \\\n = & \\ E \\left(\\sum_{j=1}^{n} (X_j - \\bar{X})^2 \\right) + \\sum_{j=1}^{n} \\left( \\text{Var} (\\bar{X} - \\mu) + E (\\bar{X} - \\mu)^2 \\right) \\\n = & \\ E \\left(\\sum_{j=1}^{n} (X_j - \\bar{X})^2 \\right) + \\sum_{j=1}^{n} \\left( \\text{Var} (\\bar{X}) + E (\\bar{X} - \\mu)^2 \\right) \\\n = & \\ E \\left(\\sum_{j=1}^{n} (X_j - \\bar{X})^2 \\right) + \\sum_{j=1}^{n} \\text{Var} (\\bar{X}) \\\n = & \\ E \\left(\\sum_{j=1}^{n} (X_j - \\bar{X})^2 \\right) + n \\text{Var} (\\bar{X}) \\\n = & \\ E \\left(\\sum_{j=1}^{n} (X_j - \\bar{X})^2 \\right) + n \\text{Var} (\\frac{1}{n} \\sum_{j=1}^{n} X_j) \\\n = & \\ E \\left(\\sum_{j=1}^{n} (X_j - \\bar{X})^2 \\right) + n \\frac{1}{n^2} \\sum_{j=1}^{n} \\text{Var} (X_j) \\\n = & \\ E \\left(\\sum_{j=1}^{n} (X_j - \\bar{X})^2 \\right) + \\sigma^2 \\\n\\end{aligned}\nCombined with the result we have from the proof in the previous section, we can see that if we somehow magically knew the true mean $\\mu$, dividing by $n$ would be unbiased:\n\\begin{aligned}\n E\\left(\\frac{1}{n} \\sum_{j=1}^{n} (X_j - \\mu)^2\\right) = & \\ \\frac{1}{n} E \\left(\\sum_{j=1}^{n} (X_j - \\mu)^2\\right) \\\n = & \\ \\frac{1}{n} \\left[ E \\left(\\sum_{j=1}^{n} (X_j - \\bar{X})^2\\right) + \\sigma^2 \\right] \\\n = & \\ \\frac{1}{n} \\left[ (n - 1) \\sigma^2 + \\sigma^2 \\right] \\\n = & \\ \\sigma^2 \\\n\\end{aligned}\nHowever since we don't know the true mean and are using the estimated mean $\\bar{X}$ instead, we'd need to divide by $n - 1$ to correct for the bias. This is also known as Bessel's correction." ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
Pinafore/ds-hw
python-tutorials/defaultdict.ipynb
mit
[ "Python default dictionary vs dictionary\nThis notebook motivates and explains why python has default dictionaries\nRead more here: https://docs.python.org/3/library/collections.html#collections.defaultdict\nSuppose you have a list of tuples where each one has a string key and integer value. Your task is to sum all the values which have the same key", "data = [\n ('california', 1),\n ('california', 3),\n ('colorado', 0),\n ('colorado', 10),\n ('washington', 2),\n ('washington', 4)\n]", "With an ordinary dictionary, I would need to check if they key exists. If it doesn't I need to initialize it with a value. For instrutional purposes I will call the int() function which will return the default value for an integer which is 0.", "# This won't work because I haven't initialized keys\n\nsummed = dict()\nfor row in data:\n key, value = row # destructure the tuple\n summed[key] = summed[key] + value", "As expected, the first time we try to set the value for california, it doesn't exist in the dictionary so the right handside of the equal sign errors. Thats easy to fix like this", "summed = dict()\nfor row in data:\n key, value = row\n if key not in summed:\n summed[key] = int()\n \n summed[key] = summed[key] + value\n\nsummed", "Lets see one more example that instead of summing the numbers we wan't to collect everything into a list. So lets replace int() with list() since we wan't to make an empty list. We also need to change the summing term to use append instead", "merged = dict()\nfor row in data:\n key, value = row\n if key not in merged:\n merged[key] = list()\n \n merged[key].append(value)\n\nmerged", "Its inconvenient to do this check every time so python has a nice way to make this pattern simpler. This is what collections.defaultdict was designed for. It does the following:\n\nTakes a single argument which is a function which we will call func\nWhen a key is accessed (for example with merged[key], check if it exists. If it doesn't, instead of erroring initialize it to the return of func then proceed as normal\n\nLets see both examples from above using this", "from collections import defaultdict\n\nsummed = defaultdict(int)\nfor row in data:\n key, value = row\n summed[key] = summed[key] + value\n\nsummed\n\nmerged = defaultdict(list)\nfor row in data:\n key, value = row\n merged[key].append(value)\n\nmerged\n\ndef myinit():\n return -100\n\nsummed = defaultdict(myinit)\nfor row in data:\n key, value = row\n summed[key] += value\n\nsummed", "As expected, the results are exactly the same, and it is based on the initial method you pass it. This function is called a factory method since each time a key needs to be initialized you can imagine that the function acts as a factory which creates new values. Lets cover one of the common mistakes with default dictionaries before concluding. The source of this mistake is that any time a non-existent key is accessed its initialized.", "d = defaultdict(str)\n\n# initially this is empty so all of these should be false\nprint('pedro in dictionary:', 'pedro' in d)\nprint('jordan in dictionary:', 'jordan' in d)\n\n# Lets set something in the dictionary now and check that again\n\nd['jordan'] = 'professor'\n\nprint('jordan is in dictionary:', 'jordan' in d)\nprint('pedro is in dictionary:', 'pedro' in d)\n\n# Lets accidentally access 'pedro' before setting it then see what happens\n\npedro_job = d['pedro']\n\nprint('pedro is in dictionary:', 'pedro' in d)\nprint(d)\nprint('-->', d['pedro'], '<--', type(d['pedro']))", "So this is odd! You never set a key (only accessed it), but nonetheless pedro is in the dictionary. This is because when the 'pedro' key was accessed and not there, python set it to the return of str which returns an empty string. Lets set this to the real value and be done", "d['pedro'] = 'PhD Student'\n\nprint('pedro is in dictionary:', 'pedro' in d)\nprint(d)\nprint('-->', d['pedro'], '<--', type(d['pedro']))" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
flaxandteal/python-course-lecturer-notebooks
.ipynb_checkpoints/Python Course - 002a - And so we begin-checkpoint.ipynb
mit
[ "... and so we begin\nCritical information\nFirst steps\nOrder of the day\n\n\nLearn to use Jupyter / iPython Notebook\n\n\nGet familiar with basic Python\n\n\nStart with Spyder, a traditional editor\n\n\nFundamental Python-in-Science skills\n\n\nWhat is Jupyter\n(previously iPython Notebook)\nAn interactive Q&A-style Python prompt, with output in formatted text, images, graphs and more (and it even works with other languages too)\nA bit like a cross between Mathematica and Wolfram Alpha, that runs in your browser, but you can save all your worksheets locally. We will explore this, as it is very useful for doing quick calculations, collaborating on research, as a whiteboard, nonlinear discussions where you can adjust graphs or calculations, as teaching tool (I hope), or simply storing your train of thought in computations and notes. This series of slides was prepared in Jupyter, which is why I can do this...\nIt lets you do things like...", "import datetime\nprint(datetime.date.today())", "You want to add in .weekday()\nLets you output LaTeX-style (formatted) maths\nExample calculating the output of $ \\int x^3 dx $:", "from sympy import *\ninit_printing()\nx = Symbol(\"x\")\nintegrate(x ** 3, x)", "and just to prove I'm not making it up... (change 3 to 6 and Ctrl+Enter)\nHow we're going to approach this\n\n\nMotivated by analysing scientific data\n\n\nLearning about basic debugging and standard bits of the language\n\n\nUsing Etherpad for discussion, group notes and info\n\n\nFor a more detailed intro to Python in science, definitely check out swcarpentry.github.io/python-novice-inflammation/\n\n\nThe dataset used in this course is that generated by the Software Carpentry team, ensuring it has been well tested across a large number of sessions internationally. They make their resources freely available under a Creative Commons license, so you should definitely check it out. I should mention that, while it has been helpful for preparing these sessions, this course is endorsed by or affiliated with Software Carpentry.\nToday's tools\nHighly technical sticky notes!\n\n\nStars say I'm just working away over here - put them up when you start\n\n\nuse them to show you're busy\n\n\nArrows say I've completed the task - put them up when you finish\n\n\nhelps us see when most people are ready to move on\nLinux\nYou all have machines running Linux - common in scientific computing, easier to manage libraries/programs, easier for me to help you.\n\nClick on the word Activities (top-left)\nClick the Firefox button (1/2 way down on left)\n\nThis isn't a Linux course, so we're going to keep it very basic, and stick to programming\nEtherpad\nLive questions, notes and comments\nIn Firefox, go to https://etherpad.mozilla.org and enter\nqub-python-course-23Yn9\n\nEnter your name at the top-right\nIn the chat window (bottom right) say, hi, hello, bout ye &lt;Return&gt;\nIn the big window, stick your name at the end, with some info\n\n2 - , or anything else (polite, obviously)\n3 - on a new line - suggested info is on that page\n4 - don't forget to put up your arrow sticker when done\n5 - you can even pick your favourite colour by clicking the little box next to your name\n(Etherpad name: qub-python-course-23Yn9)\nhttps://etherpad.mozilla.org\n<br/>\nJupyter\nToday's first Python tool\n\nPress Alt-F2 and type jupyter notebook &lt;Return&gt;\nWhen the new window appears, click on Basic control structures to open it\n\nThis is our complicated command for the day - the rest we get to do by point-and-click\nnote the 'y'" ]
[ "markdown", "code", "markdown", "code", "markdown" ]
vermouth1992/tf-playground
pytorch/ANIML.ipynb
apache-2.0
[ "import numpy as np\nimport matplotlib.pyplot as plt\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.autograd import Variable as V\nimport pandas as pd\nimport random\nimport seaborn as sbs\nfrom tqdm import tqdm_notebook as tqdm\nsbs.set_style('darkgrid')\n%matplotlib inline\nimport warnings\nwarnings.filterwarnings('ignore')", "This notebook reproduces both MAML and the similar Reptile.\nThe Problem\nhttps://towardsdatascience.com/fun-with-small-image-data-sets-8c83d95d0159\nThe goal of both of these algorithms is to learn to do well at the K-shot learning problem.\nIn K-shot learning, we need to train a neural network to generalize based on a very small number of examples (often on the order of 10 or so) instead of the often thousands of examples we see in datasets like ImageNet. However, in preparation for K-shot learning, you are allowed to train on many similar K-shot problems to learn the best way to generalize based on only K examples. \nThis is learning to learn or metalearning. We have already seen metalearning in my post on \"Learning to Learn by Gradient Descent by Gradient Descent\", which you can find here:\nhttps://becominghuman.ai/paper-repro-learning-to-learn-by-gradient-descent-by-gradient-descent-6e504cc1c0de\nThe metalearning approach of both Reptile and MAML is to come up with an initialization for neural networks that is easily generalizable to similar tasks. This is different to \"Learning to Learn by Gradient Descent by Gradient Descent\" in which we weren't learning an initialization but rather an optimizer.\nThis approach is very similar to transfer learning, in which we train a network on, say, ImageNet, and it later turns out that fine-tuning this network makes it easy to learn another image dataset, with much less data. Indeed, transfer learning can be seen as a form of metalearning.\nThe difference here is that the initial network was trained with the explicit purpose of being easily generalizable, whereas transfer learning just \"accidentally\" happens to work, and thus might not work optimally.\nIndeed, it is fairly easy to find a in which transfer learnings fails to learn a good initialization. For this we need to look at the 1D sine wave regression problem.\nSine Wave Regression\nIn this K-shot problem, each task consists in learning a modified sine function. Specifically, for each task, the underlying function will be of the form y = a sin(x + b), with both a and b chosen randomly, and the goal of our neural network is to learn to find y given x based on only 10 (x, y) pairs.\nLet's write our sine wave task and plot a couple of examples:", "class SineWaveTask:\n def __init__(self):\n self.a = np.random.uniform(0.1, 5.0)\n self.b = np.random.uniform(0, 2*np.pi)\n self.train_x = None\n \n def f(self, x):\n return self.a * np.sin(x + self.b)\n \n def training_set(self, size=10, force_new=False):\n if self.train_x is None and not force_new:\n self.train_x = np.random.uniform(-5, 5, size)\n x = self.train_x\n elif not force_new:\n x = self.train_x\n else:\n x = np.random.uniform(-5, 5, size)\n y = self.f(x)\n return torch.Tensor(x), torch.Tensor(y)\n \n def test_set(self, size=50):\n x = np.linspace(-5, 5, size)\n y = self.f(x)\n return torch.Tensor(x), torch.Tensor(y)\n \n def plot(self, *args, **kwargs):\n x, y = self.test_set(size=100)\n return plt.plot(x.numpy(), y.numpy(), *args, **kwargs)\n \nSineWaveTask().plot()\nSineWaveTask().plot()\nSineWaveTask().plot()\nplt.show()", "To understand why this is going to be a problem for transfer learning, let's plot 1,000 of them:", "for _ in range(1000):\n SineWaveTask().plot(color='black')", "Looks like there is a lot of overlap at each x value, to say the least...\nSince there are multiple possible values for each x across multiple tasks, if we train a single neural net to deal with multiple tasks at the same time, its best bet will simply be to return the average y value across all tasks for each x. What does that look like?", "all_x, all_y = [], []\n\nfor _ in range(10000):\n curx, cury = SineWaveTask().test_set(size=100)\n all_x.append(curx.numpy())\n all_y.append(cury.numpy())\n\navg, = plt.plot(all_x[0], np.mean(all_y, axis=0))\nrand, = SineWaveTask().plot()\nplt.legend([avg, rand], ['Average', 'Random'])\nplt.show()", "The average is basically 0, which means a neural network trained on a lot of tasks would simply return 0 everywhere! It is unclear that this will actually help very much, and yet this is the transfer learning approach in this case...\nLet's see how well it does by actually implementing the model:", "TRAIN_SIZE = 10000\nTEST_SIZE = 1000\n\nclass ModifiableModule(nn.Module):\n def params(self):\n return [p for _, p in self.named_params()]\n \n def named_leaves(self):\n return []\n \n def named_submodules(self):\n return []\n \n def named_params(self):\n subparams = []\n for name, mod in self.named_submodules():\n for subname, param in mod.named_params():\n subparams.append((name + '.' + subname, param))\n return self.named_leaves() + subparams\n \n def set_param(self, name, param):\n if '.' in name:\n n = name.split('.')\n module_name = n[0]\n rest = '.'.join(n[1:])\n for name, mod in self.named_submodules():\n if module_name == name:\n mod.set_param(rest, param)\n break\n else:\n setattr(self, name, param)\n \n def copy(self, other, same_var=False):\n for name, param in other.named_params():\n if not same_var:\n param = V(param.data.clone(), requires_grad=True)\n self.set_param(name, param)\n\nclass GradLinear(ModifiableModule):\n def __init__(self, *args, **kwargs):\n super().__init__()\n ignore = nn.Linear(*args, **kwargs)\n self.weights = V(ignore.weight.data, requires_grad=True)\n self.bias = V(ignore.bias.data, requires_grad=True)\n \n def forward(self, x):\n return F.linear(x, self.weights, self.bias)\n \n def named_leaves(self):\n return [('weights', self.weights), ('bias', self.bias)]\n\nclass SineModel(ModifiableModule):\n def __init__(self):\n super().__init__()\n self.hidden1 = GradLinear(1, 40)\n self.hidden2 = GradLinear(40, 40)\n self.out = GradLinear(40, 1)\n \n def forward(self, x):\n x = F.relu(self.hidden1(x))\n x = F.relu(self.hidden2(x))\n return self.out(x)\n \n def named_submodules(self):\n return [('hidden1', self.hidden1), ('hidden2', self.hidden2), ('out', self.out)]\n \nSINE_TRAIN = [SineWaveTask() for _ in range(TRAIN_SIZE)]\nSINE_TEST = [SineWaveTask() for _ in range(TEST_SIZE)]\n\nONE_SIDED_EXAMPLE = None\nwhile ONE_SIDED_EXAMPLE is None:\n cur = SineWaveTask()\n x, _ = cur.training_set()\n x = x.numpy()\n if np.max(x) < 0 or np.min(x) > 0:\n ONE_SIDED_EXAMPLE = cur\n\nSINE_TRANSFER = SineModel()\n\ndef sine_fit1(net, wave, optim=None, get_test_loss=False, create_graph=False, force_new=False):\n net.train()\n if optim is not None:\n optim.zero_grad()\n x, y = wave.training_set(force_new=force_new)\n loss = F.mse_loss(net(V(x[:, None])), V(y).unsqueeze(1))\n loss.backward(create_graph=create_graph, retain_graph=True)\n if optim is not None:\n optim.step()\n if get_test_loss:\n net.eval()\n x, y = wave.test_set()\n loss_test = F.mse_loss(net(V(x[:, None])), V(y))\n return loss.data.cpu().numpy()[0], loss_test.data.cpu().numpy()[0]\n return loss.data.cpu().numpy()#[0]\n\ndef fit_transfer(epochs=1):\n optim = torch.optim.Adam(SINE_TRANSFER.params())\n\n for _ in range(epochs):\n for t in random.sample(SINE_TRAIN, len(SINE_TRAIN)):\n sine_fit1(SINE_TRANSFER, t, optim)\n \nfit_transfer()\n\ndef copy_sine_model(model):\n m = SineModel()\n m.copy(model)\n return m\n\ndef eval_sine_test(model, test, fits=(0, 1), lr=0.01):\n xtest, ytest = test.test_set()\n xtrain, ytrain = test.training_set()\n\n model = copy_sine_model(model)\n # Not sure if this should be Adam or SGD.\n optim = torch.optim.SGD(model.params(), lr)\n \n def get_loss(res):\n return F.mse_loss(res, V(ytest[:, None])).cpu().data.numpy()#[0]\n \n fit_res = []\n if 0 in fits:\n results = model(V(xtest[:, None]))\n fit_res.append((0, results, get_loss(results)))\n for i in range(np.max(fits)):\n sine_fit1(model, test, optim)\n if i + 1 in fits:\n results = model(V(xtest[:, None]))\n fit_res.append(\n (\n i + 1, \n results,\n get_loss(results)\n )\n )\n\n return fit_res\n\ndef plot_sine_test(model, test, fits=(0, 1), lr=0.01):\n xtest, ytest = test.test_set()\n xtrain, ytrain = test.training_set()\n\n fit_res = eval_sine_test(model, test, fits, lr)\n \n train, = plt.plot(xtrain.numpy(), ytrain.numpy(), '^')\n ground_truth, = plt.plot(xtest.numpy(), ytest.numpy())\n plots = [train, ground_truth]\n legend = ['Training Points', 'True Function']\n for n, res, loss in fit_res:\n cur, = plt.plot(xtest.numpy(), res.cpu().data.numpy()[:, 0], '--')\n plots.append(cur)\n legend.append(f'After {n} Steps')\n plt.legend(plots, legend)\n plt.show()\n \nplot_sine_test(SINE_TRANSFER, SINE_TEST[0], fits=[0, 1, 10], lr=0.02)", "Basically it looks like our transfer model learns a constant function and that it is really hard to fine tune it to something better than a constant function. It's not even clear that our transfer learning is any better than random initialization...", "def plot_sine_learning(models, fits=(0, 1), lr=0.01, marker='s', linestyle='--'):\n data = {'model': [], 'fits': [], 'loss': [], 'set': []}\n for name, models in models:\n if not isinstance(models, list):\n models = [models]\n for n_model, model in enumerate(models):\n for n_test, test in enumerate(SINE_TEST):\n n_test = n_model * len(SINE_TEST) + n_test\n fit_res = eval_sine_test(model, test, fits, lr)\n for n, _, loss in fit_res:\n data['model'].append(name)\n data['fits'].append(n)\n data['loss'].append(loss)\n data['set'].append(n_test)\n \n ax = sbs.lineplot(x='fits', y='loss', hue='set',\n pd.DataFrame(data), condition='model', value='loss',\n time='fits', unit='set', marker=marker, linestyle=linestyle)\n \nplot_sine_learning(\n [('Transfer', SINE_TRANSFER), ('Random', SineModel())],\n list(range(100)),\n marker='',\n linestyle='-'\n)", "MAML\nWe now come to MAML, the first of the two algorithms we will look at today.\nAs mentioned before, we are trying to find a set of weights such that running gradient descent on similar tasks makes progress as quickly as possible. MAML takes this extremely literally by running one iteration of gradient descent and then updating the initial weights based on how much progress that one iteration made towards the true task. More concretely it:\n* Creates a copy of the initialization weights\n* Runs an iteration of gradient descent for a random task on the copy\n* Backpropagates the loss on a test set through the iteration of gradient descent and back to the initial weights, so that we can update the initial weights in a direction in which they would have been easier to update.\nWe thus need to take a gradient of a gradient, aka a second degree derivative in this process. Fortunately this is something that PyTorch supports now, unfortunately PyTorch makes it a bit awkward to update the parameters of a model in a way that we can still run gradient descent through them (we already saw this is \"Learning to Learn by Gradient Descent by Gradient Descent\"), which explains the weird way in which the model is written.\nBecause we are going to use second derivatives, we need to make sure that the computational graph that allowed us to compute the original gradients stays around, which is why we pass create_graph=True to .backward().\nThe code below also implements first order MAML, which we explain later:", "def maml_sine(model, epochs, lr_inner=0.01, batch_size=1, first_order=False):\n optimizer = torch.optim.Adam(model.params())\n \n for _ in tqdm(range(epochs)):\n # Note: the paper doesn't specify the meta-batch size for this task,\n # so I just use 1 for now.\n for i, t in enumerate(random.sample(SINE_TRAIN, len(SINE_TRAIN))):\n new_model = SineModel()\n new_model.copy(model, same_var=True)\n loss = sine_fit1(new_model, t, create_graph=not first_order)\n for name, param in new_model.named_params():\n grad = param.grad\n if first_order:\n grad = V(grad.detach().data)\n new_model.set_param(name, param - lr_inner * grad)\n \n sine_fit1(new_model, t, force_new=True)\n\n if (i + 1) % batch_size == 0:\n optimizer.step()\n optimizer.zero_grad()\n \nSINE_MAML = [SineModel() for _ in range(5)]\n\nfor m in SINE_MAML:\n maml_sine(m, 4)\n\nplot_sine_test(SINE_MAML[0], SINE_TEST[0], fits=[0, 1, 10], lr=0.01)\nplt.show()\n\nplot_sine_learning(\n [('Transfer', SINE_TRANSFER), ('MAML', SINE_MAML[0]), ('Random', SineModel())],\n list(range(10)),\n)\nplt.show()\n\nplot_sine_test(SINE_MAML[0], ONE_SIDED_EXAMPLE, fits=[0, 1, 10], lr=0.01)\nplt.show()", "So MAML works much better than transfer learning or random initialization for this problem. Yay!\nHowever, it is a bit annoying that we have to use second order derivatives for this... it forces the code to be complicated and it also makes things a fair bit slower (around 33% according to the paper, which matches what we shall see here).\nIs there an approximation of MAML that doesn't use the second order derivatives? Of course, we can simply pretend that the gradients that we used for the inner gradient descent just came out of nowhere, and thus just improve the initial parameters without taking into account these second order derivatives, which is what we did before by handling the first_order parameter.\nSo how good is this first order approximation? Almost as good as the original MAML, as it turns out!", "SINE_MAML_FIRST_ORDER = [SineModel() for _ in range(5)]\n\nfor m in SINE_MAML_FIRST_ORDER:\n maml_sine(m, 4, first_order=True)\n\nplot_sine_test(SINE_MAML_FIRST_ORDER[0], SINE_TEST[0], fits=[0, 1, 10], lr=0.01)\nplt.show()\n\nplot_sine_learning(\n [('MAML', SINE_MAML), ('MAML First Order', SINE_MAML_FIRST_ORDER)],\n list(range(10)),\n)\nplt.show()\n\nplot_sine_test(SINE_MAML_FIRST_ORDER[0], ONE_SIDED_EXAMPLE, fits=[0, 1, 10], lr=0.01)\nplt.show()", "Reptile\nThe first order approximation for MAML tells us that something interesting is going on: after all, it seems like how the gradients were generated should be relevant for a good initialization, and yet it apparently isn't so much.\nReptile takes this idea even further by telling us to do the following: run SGD for a few iterations on a given task, and then move your initialization weights a little bit in the direction of the weights you obtained after your k iterations of SGD. An algorithm so simple, it takes only a couple lines of pseudocode:\n\nWhen I first read this, I was quite consternated: isn't this the same as training your weights alternatively on each task, just like in transfer learning? How would this ever work?\nIndeed, the Reptile paper anticipates this very reaction:\n\nYou might be thinking “isn’t this the same as training on the expected loss Eτ [Lτ]?” and then checking if the date is April 1st.\n\nAs it happens, I am writing this on April 2nd, so this is all serious. So what's going on?\nWell, indeed if we had run SGD for a single iteration, we would have something equivalent to the transfer learning described above, but we aren't we are using a few iterations, and so indeed the weights we update towards each time actually depend indirectly on the second derivatives of the loss, similar to MAML.\nOk, but still, why would this work? Well Reptile provides a compelling intuition for this: for each task, there are weights that are optimal. Indeed, there are probably many sets of weights that are optimal. This means that if you take several tasks, there should be a set of weights for which the distance to at least one optimal set of weights for each task is minimal. This set of weights is where we want to initialize our networks, since it is likely to be the one for which the least work is necessary to reach the optimum for any task. This is the set of weights that Reptile finds.\nWe can see this expressed visually in the following image: the two black lines represent the sets of optimal weights for two different tasks, while the gray line represents the initialization weights. Reptile tries to get the initialization weights closer and closer to the point where the optimal weights are nearest to each other.\n\nLet's now implement Reptile and compare it to MAML:", "def reptile_sine(model, epochs, lr_inner=0.01, lr_outer=0.001, k=32, batch_size=32):\n optimizer = torch.optim.Adam(model.params(), lr=lr_outer)\n \n name_to_param = dict(model.named_params())\n \n for _ in tqdm(range(epochs)):\n for i, t in enumerate(random.sample(SINE_TRAIN, len(SINE_TRAIN))):\n new_model = SineModel()\n new_model.copy(model)\n inner_optim = torch.optim.SGD(new_model.params(), lr=lr_inner)\n for _ in range(k):\n sine_fit1(new_model, t, inner_optim)\n \n for name, param in new_model.named_params():\n cur_grad = (name_to_param[name].data - param.data) / k / lr_inner\n if name_to_param[name].grad is None:\n name_to_param[name].grad = V(torch.zeros(cur_grad.size()))\n name_to_param[name].grad.data.add_(cur_grad / batch_size)\n# if (i + 1) % 500 == 0:\n# print(name_to_param[name].grad)\n \n if (i + 1) % batch_size == 0:\n to_show = name_to_param['hidden1.bias']\n optimizer.step()\n optimizer.zero_grad()\n\nSINE_REPTILE = [SineModel() for _ in range(5)]\n\nfor m in SINE_REPTILE:\n reptile_sine(m, 4, k=3, batch_size=1)\n\nplot_sine_test(SINE_REPTILE[0], SINE_TEST[0], fits=[0, 1, 10], lr=0.01)\nplt.show()\n\nplot_sine_learning(\n [('MAML', SINE_MAML), ('MAML First Order', SINE_MAML_FIRST_ORDER), ('Reptile', SINE_REPTILE)],\n list(range(32)),\n)\nplt.show()\n\nplot_sine_test(SINE_REPTILE[0], ONE_SIDED_EXAMPLE, fits=[0, 1, 10], lr=0.01)\nplt.show()", "It looks like Reptile does indeed achieve similar or even slightly better performance to MAML with a much simpler and slightly faster algorithm! Very exciting. And this applies to many more problems than just this toy example of sine waves! For more details, I really do recommend you read the paper. At this point, you should have enough background to understand them quite easily." ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
hetaodie/hetaodie.github.io
assets/media/uda-ml/code/boston_housing/.ipynb_checkpoints/boston_housing-checkpoint.ipynb
mit
[ "机器学习工程师纳米学位\n模型评价与验证\n项目 1: 预测波士顿房价\n欢迎来到机器学习的预测波士顿房价项目!在此文件中,有些示例代码已经提供给你,但你还需要实现更多的功能来让项目成功运行。除非有明确要求,你无须修改任何已给出的代码。以编程练习开始的标题表示接下来的内容中有需要你必须实现的功能。每一部分都会有详细的指导,需要实现的部分也会在注释中以TODO标出。请仔细阅读所有的提示!\n除了实现代码外,你还必须回答一些与项目和实现有关的问题。每一个需要你回答的问题都会以'问题 X'为标题。请仔细阅读每个问题,并且在问题后的'回答'文字框中写出完整的答案。你的项目将会根据你对问题的回答和撰写代码所实现的功能来进行评分。\n\n提示:Code 和 Markdown 区域可通过 Shift + Enter 快捷键运行。此外,Markdown可以通过双击进入编辑模式。\n\n\n第一步. 导入数据\n在这个项目中,你将利用马萨诸塞州波士顿郊区的房屋信息数据训练和测试一个模型,并对模型的性能和预测能力进行测试。通过该数据训练后的好的模型可以被用来对房屋做特定预测---尤其是对房屋的价值。对于房地产经纪等人的日常工作来说,这样的预测模型被证明非常有价值。\n此项目的数据集来自UCI机器学习知识库(数据集已下线)。波士顿房屋这些数据于1978年开始统计,共506个数据点,涵盖了麻省波士顿不同郊区房屋14种特征的信息。本项目对原始数据集做了以下处理:\n- 有16个'MEDV' 值为50.0的数据点被移除。 这很可能是由于这些数据点包含遗失或看不到的值。\n- 有1个数据点的 'RM' 值为8.78. 这是一个异常值,已经被移除。\n- 对于本项目,房屋的'RM', 'LSTAT','PTRATIO'以及'MEDV'特征是必要的,其余不相关特征已经被移除。\n- 'MEDV'特征的值已经过必要的数学转换,可以反映35年来市场的通货膨胀效应。\n运行下面区域的代码以载入波士顿房屋数据集,以及一些此项目所需的 Python 库。如果成功返回数据集的大小,表示数据集已载入成功。", "# Import libraries necessary for this project\nimport numpy as np\nimport pandas as pd\nfrom sklearn.model_selection import ShuffleSplit\n\n# Import supplementary visualizations code visuals.py\nimport visuals as vs\n\n# Pretty display for notebooks\n%matplotlib inline\n\n# Load the Boston housing dataset\ndata = pd.read_csv('housing.csv')\nprices = data['MEDV']\nfeatures = data.drop('MEDV', axis = 1)\n \n# Success\nprint(\"Boston housing dataset has {} data points with {} variables each.\".format(*data.shape))", "第二步. 分析数据\n在项目的第一个部分,你会对波士顿房地产数据进行初步的观察并给出你的分析。通过对数据的探索来熟悉数据可以让你更好地理解和解释你的结果。\n由于这个项目的最终目标是建立一个预测房屋价值的模型,我们需要将数据集分为特征(features)和目标变量(target variable)。\n- 特征 'RM', 'LSTAT',和 'PTRATIO',给我们提供了每个数据点的数量相关的信息。\n- 目标变量:'MEDV',是我们希望预测的变量。\n他们分别被存在 features 和 prices 两个变量名中。\n编程练习 1:基础统计运算\n你的第一个编程练习是计算有关波士顿房价的描述统计数据。我们已为你导入了 NumPy,你需要使用这个库来执行必要的计算。这些统计数据对于分析模型的预测结果非常重要的。\n在下面的代码中,你要做的是:\n- 计算 prices 中的 'MEDV' 的最小值、最大值、均值、中值和标准差;\n- 将运算结果储存在相应的变量中。", "# TODO: Minimum price of the data\nminimum_price = None\n\n# TODO: Maximum price of the data\nmaximum_price = None\n\n# TODO: Mean price of the data\nmean_price = None\n\n# TODO: Median price of the data\nmedian_price = None\n\n# TODO: Standard deviation of prices of the data\nstd_price = None\n\n# Show the calculated statistics\nprint(\"Statistics for Boston housing dataset:\\n\")\nprint(\"Minimum price: ${:.2f}\".format(minimum_price)) \nprint(\"Maximum price: ${:.2f}\".format(maximum_price))\nprint(\"Mean price: ${:.2f}\".format(mean_price))\nprint(\"Median price ${:.2f}\".format(median_price))\nprint(\"Standard deviation of prices: ${:.2f}\".format(std_price))", "问题 1 - 特征观察\n如前文所述,本项目中我们关注的是其中三个值:'RM'、'LSTAT' 和'PTRATIO',对每一个数据点:\n- 'RM' 是该地区中每个房屋的平均房间数量;\n- 'LSTAT' 是指该地区有多少百分比的业主属于是低收入阶层(有工作但收入微薄);\n- 'PTRATIO' 是该地区的中学和小学里,学生和老师的数目比(学生/老师)。\n凭直觉,上述三个特征中对每一个来说,你认为增大该特征的数值,'MEDV'的值会是增大还是减小呢?每一个答案都需要你给出理由。\n提示:你预期一个'RM' 值是6的房屋跟'RM' 值是7的房屋相比,价值更高还是更低呢?\n问题 1 - 回答:\n\n第三步. 建立模型\n在项目的第三步中,你需要了解必要的工具和技巧来让你的模型进行预测。用这些工具和技巧对每一个模型的表现做精确的衡量可以极大地增强你预测的信心。\n编程练习2:定义衡量标准\n如果不能对模型的训练和测试的表现进行量化地评估,我们就很难衡量模型的好坏。通常我们会定义一些衡量标准,这些标准可以通过对某些误差或者拟合程度的计算来得到。在这个项目中,你将通过运算决定系数 $R^2$ 来量化模型的表现。模型的决定系数是回归分析中十分常用的统计信息,经常被当作衡量模型预测能力好坏的标准。\n$R^2$ 的数值范围从0至1,表示目标变量的预测值和实际值之间的相关程度平方的百分比。一个模型的 $R^2$ 值为0还不如直接用平均值来预测效果好;而一个 $R^2$ 值为1的模型则可以对目标变量进行完美的预测。从0至1之间的数值,则表示该模型中目标变量中有百分之多少能够用特征来解释。模型也可能出现负值的 $R^2$,这种情况下模型所做预测有时会比直接计算目标变量的平均值差很多。\n在下方代码的 performance_metric 函数中,你要实现:\n- 使用 sklearn.metrics 中的 r2_score 来计算 y_true 和 y_predict 的 $R^2$ 值,作为对其表现的评判。\n- 将他们的表现评分储存到 score 变量中。", "# TODO: Import 'r2_score'\n\ndef performance_metric(y_true, y_predict):\n \"\"\" Calculates and returns the performance score between \n true and predicted values based on the metric chosen. \"\"\"\n \n # TODO: Calculate the performance score between 'y_true' and 'y_predict'\n score = None\n \n # Return the score\n return score", "问题 2 - 拟合程度\n假设一个数据集有五个数据且一个模型做出下列目标变量的预测:\n| 真实数值 | 预测数值 |\n| :-------------: | :--------: |\n| 3.0 | 2.5 |\n| -0.5 | 0.0 |\n| 2.0 | 2.1 |\n| 7.0 | 7.8 |\n| 4.2 | 5.3 |\n你觉得这个模型已成功地描述了目标变量的变化吗?如果成功,请解释为什么,如果没有,也请给出原因。 \n提示1:运行下方的代码,使用 performance_metric 函数来计算 y_true 和 y_predict 的决定系数。\n提示2:$R^2$ 分数是指可以从自变量中预测的因变量的方差比例。 换一种说法:\n\n$R^2$ 为0意味着因变量不能从自变量预测。\n$R^2$ 为1意味着可以从自变量预测因变量。\n$R^2$ 在0到1之间表示因变量可预测的程度。\n$R^2$ 为0.40意味着 Y 中40%的方差可以从 X 预测。", "# Calculate the performance of this model\nscore = performance_metric([3, -0.5, 2, 7, 4.2], [2.5, 0.0, 2.1, 7.8, 5.3])\nprint(\"Model has a coefficient of determination, R^2, of {:.3f}.\".format(score))", "问题 2 - 回答:\n编程练习 3: 数据分割与重排\n接下来,你需要把波士顿房屋数据集分成训练和测试两个子集。通常在这个过程中,数据也会被重排列,以消除数据集中由于顺序而产生的偏差。\n在下面的代码中,你需要\n\n使用 sklearn.model_selection 中的 train_test_split, 将 features 和 prices 的数据都分成用于训练的数据子集和用于测试的数据子集。\n分割比例为:80%的数据用于训练,20%用于测试;\n选定一个数值以设定 train_test_split 中的 random_state ,这会确保结果的一致性;\n将分割后的训练集与测试集分配给 X_train, X_test, y_train 和 y_test。", "# TODO: Import 'train_test_split'\n\n# TODO: Shuffle and split the data into training and testing subsets\nX_train, X_test, y_train, y_test = (None, None, None, None)\n\n# Success\nprint(\"Training and testing split was successful.\")", "问题 3 - 训练及测试\n将数据集按一定比例分为训练用的数据集和测试用的数据集对学习算法有什么好处?\n如果用模型已经见过的数据,例如部分训练集数据进行测试,又有什么坏处?\n提示: 如果没有数据来对模型进行测试,会出现什么问题?\n问题 3 - 回答:\n\n第四步. 分析模型的表现\n在项目的第四步,我们来看一下不同参数下,模型在训练集和验证集上的表现。这里,我们专注于一个特定的算法(带剪枝的决策树,但这并不是这个项目的重点),和这个算法的一个参数 'max_depth'。用全部训练集训练,选择不同'max_depth' 参数,观察这一参数的变化如何影响模型的表现。画出模型的表现来对于分析过程十分有益。\n学习曲线\n下方区域内的代码会输出四幅图像,它们是一个决策树模型在不同最大深度下的表现。每一条曲线都直观得显示了随着训练数据量的增加,模型学习曲线的在训练集评分和验证集评分的变化,评分使用决定系数 $R^2$。曲线的阴影区域代表的是该曲线的不确定性(用标准差衡量)。\n运行下方区域中的代码,并利用输出的图形回答下面的问题。", "# Produce learning curves for varying training set sizes and maximum depths\nvs.ModelLearning(features, prices)", "问题 4 - 学习曲线\n\n选择上述图像中的其中一个,并给出其最大深度。\n随着训练数据量的增加,训练集曲线的评分有怎样的变化?验证集曲线呢?\n如果有更多的训练数据,是否能有效提升模型的表现呢?\n\n提示:学习曲线的评分是否最终会收敛到特定的值?一般来说,你拥有的数据越多,模型表现力越好。但是,如果你的训练和测试曲线以高于基准阈值的分数收敛,这是否有必要?基于训练和测试曲线已经收敛的前提下,思考添加更多训练点的优缺点。\n问题 4 - 回答:\n复杂度曲线\n下列代码内的区域会输出一幅图像,它展示了一个已经经过训练和验证的决策树模型在不同最大深度条件下的表现。这个图形将包含两条曲线,一个是训练集的变化,一个是验证集的变化。跟学习曲线相似,阴影区域代表该曲线的不确定性,模型训练和测试部分的评分都用的 performance_metric 函数。\n运行下方区域中的代码,并利用输出的图形并回答下面的问题5与问题6。", "vs.ModelComplexity(X_train, y_train)", "问题 5 - 偏差(bias)与方差(variance)之间的权衡取舍\n\n当模型以最大深度 1训练时,模型的预测是出现很大的偏差还是出现了很大的方差?\n当模型以最大深度10训练时,情形又如何呢?\n图形中的哪些特征能够支持你的结论?\n\n提示: 高偏差表示欠拟合(模型过于简单),而高方差表示过拟合(模型过于复杂,以至于无法泛化)。考虑哪种模型(深度1或10)对应着上述的情况,并权衡偏差与方差。\n问题 5 - 回答:\n问题 6- 最优模型的猜测\n\n结合问题 5 中的图,你认为最大深度是多少的模型能够最好地对未见过的数据进行预测?\n你得出这个答案的依据是什么?\n\n提示:查看问题5上方的图表,并查看模型在不同 depth下的验证分数。随着深度的增加模型的表现力会变得更好吗?我们在什么情况下获得最佳验证分数而不会使我们的模型过度复杂?请记住,奥卡姆剃刀:“在竞争性假设中,应该选择假设最少的那一个。”\n问题 6 - 回答:\n\n第五步. 评估模型的表现\n在项目的最后一节中,你将构建一个模型,并使用 fit_model 中的优化模型去预测客户特征集。\n问题 7- 网格搜索(Grid Search)\n\n什么是网格搜索法?\n如何用它来优化模型?\n\n提示:在解释网格搜索算法时,首先要理解我们为什么使用网格搜索算法,以及我们使用它的最终目的是什么。为了使你的回答更具有说服力,你还可以给出一个模型中可以使用此方法进行优化参数的示例。\n问题 7 - 回答:\n问题 8 - 交叉验证\n\n什么是K折交叉验证法(k-fold cross-validation)?\nGridSearchCV 是如何结合交叉验证来完成对最佳参数组合的选择的?\nGridSearchCV 中的'cv_results_'属性能告诉我们什么?\n网格搜索为什么要使用K折交叉验证?K折交叉验证能够避免什么问题?\n\n提示:在解释k-fold交叉验证时,一定要理解'k'是什么,和数据集是如何分成不同的部分来进行训练和测试的,以及基于'k'值运行的次数。\n在考虑k-fold交叉验证如何帮助网格搜索时,你可以使用特定的数据子集来进行训练与测试有什么缺点,以及K折交叉验证是如何帮助缓解这个问题。\n问题 8 - 回答:\n编程练习 4:拟合模型\n在这个练习中,你将需要将所学到的内容整合,使用决策树算法训练一个模型。为了得出的是一个最优模型,你需要使用网格搜索法训练模型,以找到最佳的 'max_depth' 参数。你可以把'max_depth' 参数理解为决策树算法在做出预测前,允许其对数据提出问题的数量。决策树是监督学习算法中的一种。\n另外,你会发现在实现的过程中是使用ShuffleSplit()作为交叉验证的另一种形式(参见'cv_sets'变量)。虽然它不是你在问题8中描述的K-fold交叉验证方法,但它同样非常有用!下面的ShuffleSplit()实现将创建10个('n_splits')混洗集合,并且对于每个混洗集,数据的20%('test_size')将被用作验证集合。当您在实现代码的时候,请思考一下它与K-fold cross-validation的不同与相似之处。\n请注意,ShuffleSplit 在 Scikit-Learn 版本0.17和0.18中有不同的参数。对于下面代码单元格中的 fit_model 函数,您需要实现以下内容:\n\n定义 'regressor' 变量: 使用 sklearn.tree 中的 DecisionTreeRegressor 创建一个决策树的回归函数;\n定义 'params' 变量: 为 'max_depth' 参数创造一个字典,它的值是从1至10的数组;\n定义 'scoring_fnc' 变量: 使用 sklearn.metrics 中的 make_scorer 创建一个评分函数。将 ‘performance_metric’ 作为参数传至这个函数中;\n定义 'grid' 变量: 使用 sklearn.model_selection 中的 GridSearchCV 创建一个网格搜索对象;将变量'regressor', 'params', 'scoring_fnc'和 'cross_validator' 作为参数传至这个对象构造函数中;\n\n如果你对 Python 函数的默认参数定义和传递不熟悉,可以参考这个MIT课程的视频。", "# TODO: Import 'make_scorer', 'DecisionTreeRegressor', and 'GridSearchCV'\n\ndef fit_model(X, y):\n \"\"\" Performs grid search over the 'max_depth' parameter for a \n decision tree regressor trained on the input data [X, y]. \"\"\"\n \n # Create cross-validation sets from the training data\n # sklearn version 0.18: ShuffleSplit(n_splits=10, test_size=0.1, train_size=None, random_state=None)\n # sklearn versiin 0.17: ShuffleSplit(n, n_iter=10, test_size=0.1, train_size=None, random_state=None)\n cv_sets = ShuffleSplit(n_splits=10, test_size=0.20, random_state=42)\n \n # TODO: Create a decision tree regressor object\n regressor = None\n\n # TODO: Create a dictionary for the parameter 'max_depth' with a range from 1 to 10\n params = {}\n\n # TODO: Transform 'performance_metric' into a scoring function using 'make_scorer' \n scoring_fnc = None\n\n # TODO: Create the grid search cv object --> GridSearchCV()\n # Make sure to include the right parameters in the object:\n # (estimator, param_grid, scoring, cv) which have values 'regressor', 'params', 'scoring_fnc', and 'cv_sets' respectively.\n grid = None\n\n # Fit the grid search object to the data to compute the optimal model\n grid = grid.fit(X, y)\n\n # Return the optimal model after fitting the data\n return grid.best_estimator_", "第六步. 做出预测\n当我们用数据训练出一个模型,它现在就可用于对新的数据进行预测。在决策树回归函数中,模型已经学会对新输入的数据提问,并返回对目标变量的预测值。你可以用这个预测来获取数据未知目标变量的信息,这些数据必须是不包含在训练数据之内的。\n问题 9 - 最优模型\n最优模型的最大深度(maximum depth)是多少?此答案与你在问题 6所做的猜测是否相同?\n运行下方区域内的代码,将决策树回归函数代入训练数据的集合,以得到最优化的模型。", "# Fit the training data to the model using grid search\nreg = fit_model(X_train, y_train)\n\n# Produce the value for 'max_depth'\nprint(\"Parameter 'max_depth' is {} for the optimal model.\".format(reg.get_params()['max_depth']))", "问题 9 - 回答:\n问题 10 - 预测销售价格\n想像你是一个在波士顿地区的房屋经纪人,并期待使用此模型以帮助你的客户评估他们想出售的房屋。你已经从你的三个客户收集到以下的资讯:\n| 特征 | 客戶 1 | 客戶 2 | 客戶 3 |\n| :---: | :---: | :---: | :---: |\n| 房屋内房间总数 | 5 间房间 | 4 间房间 | 8 间房间 |\n| 社区贫困指数(%被认为是贫困阶层) | 17% | 32% | 3% |\n| 邻近学校的学生-老师比例 | 15:1 | 22:1 | 12:1 |\n\n你会建议每位客户的房屋销售的价格为多少?\n从房屋特征的数值判断,这样的价格合理吗?为什么?\n\n提示:用你在分析数据部分计算出来的统计信息来帮助你证明你的答案。\n运行下列的代码区域,使用你优化的模型来为每位客户的房屋价值做出预测。", "# Produce a matrix for client data\nclient_data = [[5, 17, 15], # Client 1\n [4, 32, 22], # Client 2\n [8, 3, 12]] # Client 3\n\n# Show predictions\nfor i, price in enumerate(reg.predict(client_data)):\n print(\"Predicted selling price for Client {}'s home: ${:,.2f}\".format(i+1, price))", "问题 10 - 回答:\n编程练习 5\n你刚刚预测了三个客户的房子的售价。在这个练习中,你将用你的最优模型在整个测试数据上进行预测, 并计算相对于目标变量的决定系数 $R^2$ 的值。\n提示:\n* 你可能需要用到 X_test, y_test, optimal_reg, performance_metric。\n* 参考问题10的代码进行预测。\n* 参考问题2的代码来计算R^2的值。", "# TODO Calculate the r2 score between 'y_true' and 'y_predict'\n\nr2 = None\n\nprint(\"Optimal model has R^2 score {:,.2f} on test data\".format(r2))", "问题11 - 分析决定系数\n你刚刚计算了最优模型在测试集上的决定系数,你会如何评价这个结果?\n问题11 - 回答\n模型健壮性\n一个最优的模型不一定是一个健壮模型。有的时候模型会过于复杂或者过于简单,以致于难以泛化新增添的数据;有的时候模型采用的学习算法并不适用于特定的数据结构;有的时候样本本身可能有太多噪点或样本过少,使得模型无法准确地预测目标变量。这些情况下我们会说模型是欠拟合的。\n问题 12 - 模型健壮性\n模型是否足够健壮来保证预测的一致性?\n提示: 执行下方区域中的代码,采用不同的训练和测试集执行 fit_model 函数10次。注意观察对一个特定的客户来说,预测是如何随训练数据的变化而变化的。", "vs.PredictTrials(features, prices, fit_model, client_data)", "问题 12 - 回答:\n问题 13 - 实用性探讨\n简单地讨论一下你建构的模型能否在现实世界中使用? \n提示:回答以下几个问题,并给出相应结论的理由:\n- 1978年所采集的数据,在已考虑通货膨胀的前提下,在今天是否仍然适用?\n- 数据中呈现的特征是否足够描述一个房屋?\n- 在波士顿这样的大都市采集的数据,能否应用在其它乡镇地区?\n- 你觉得仅仅凭房屋所在社区的环境来判断房屋价值合理吗?\n问题 13 - 回答:\n第七步.完成和提交\n当你完成了以上所有的代码和问题,你需要将 iPython Notebook 导出 HTML,导出方法:在左上角的菜单中选择 File -> Download as -> HTML (.html)。当你提交项目时,需要包含可运行的 .ipynb 文件和导出的 HTML 文件。" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
YaleDHLab/lab-workshops
beautifulsoup/next-steps-with-html-parsing.ipynb
mit
[ "Next Steps with HTML Parsing\nIn our Introduction to HTML Parsing notebook, we learned the basics of extracting text data from HTML pages. This notebook builds on that foundation to help explore some of the more advanced features of BeautifulSoup and some of the more difficult use cases in which one might leverage BeautifulSoup.\nParsing XML\nWe've been focusing on \"HTML\" documents so far, but we can also use BeautifulSoup to parse \"XML\" documents. For example, the following snippet parses the ECCO TCP's XML version of David Garrick's \"Ode on Dedicating a Building\":", "import bs4\n\n# read in the xml file\nsoup = bs4.BeautifulSoup(open('Ode.xml'), 'html.parser')\n\n# get the text content inside the \"EEBO\" tag\ntext = soup.find('eebo').get_text()\n\n# print the text\nprint(text)", "<h2 style='color:green'>Reviewing XML Parsing</h2>\n\nSee if you can use the pattern displayed above to read in and then print the text within \"Rom.xml\". Note that this file does not contain an \"eebo\" tag.\nFiltering Selections\nSometimes an HTML selection returns a mixture of elements we wish to process and others we wish to skip altogether. For example, suppose a web page has multiple div1 tags, and we only wish to parse some of them. In that case, we can use a conditional to ensure we only process the ones we care about. Let's see this in action:", "import bs4\n\n# read in the xml file\nsoup = bs4.BeautifulSoup(open('Ode.xml'), 'html.parser')\n\n# get a list of the div1 tags\nelems = soup.find_all('div1')\n\n# iterate over the div1 tags in soup\nfor i in elems:\n \n # only proceed if the current tag has the attribute type=\"ode\"\n if i['type'] == 'ode':\n \n # print the text content of this div1 element\n print(i.get_text())", "If you are working with an HTML or XML document that contains multiple tags that match a selection, and you only wish to work with a subset of those matched elements, you can use the if syntax above to filter your elements.\n<h2 style='color:green'>Practicing Selection Filtering</h2>\n\nLet's practice some selection filtering operations by processing \"Farce.xml\". See if you can print only the text content within the prologue. To do so, you will need to inspect \"Farce.xml\" to understand its structure!" ]
[ "markdown", "code", "markdown", "code", "markdown" ]
ssh0/sotsuron_for_public
07_model_3_4_1.ipynb
mit
[ "model 3-4:近距離の点をクラスター化するモデル", "%matplotlib inline\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom scipy.spatial.distance import euclidean as euc\nimport collections\nimport operator\nimport random\nimport bisect\nfrom itertools import chain\nfrom scipy.optimize import leastsq\n\ndef uniq_list(seq):\n seen = set()\n seen_add = seen.add\n return [x for x in seq if x not in seen and not seen_add(x)]\n\ndef accumulate(iterable, func=operator.add):\n \"\"\"Return running totals\n \n Usage:\n accumulate([1,2,3,4,5]) --> 1 3 6 10 15\n accumulate([1,2,3,4,5], operator.mul) --> 1 2 6 24 120\n \"\"\"\n it = iter(iterable)\n total = next(it)\n yield total\n for element in it:\n total = func(total, element)\n yield total\n\ndef weighted_choice(d):\n choices, weights = zip(*d)\n cumdist = list(accumulate(weights))\n x = random.random() * cumdist[-1]\n return choices[bisect.bisect(cumdist, x)]\n\nclass Person:\n \n def __init__(self, master, id, ideas, w):\n \"\"\"Initialize argmunets.\n \n Keyword arguments:\n master : Master class (call from \"Meeting\")\n self.id : Id for each person [0, 1, ..., N-1]\n self.ideas: ideas in space [0,1] × [0,1]\n self.w : probability weight for the person to speak\n \"\"\"\n self.id = id\n self.ideas = ideas\n self.w = w\n # add_ideas : place, tag : (x, y), [person_id, cluster_id]\n master.ideas += [[(i1, i2), [self.id, 0, self.w]] for i1, i2 in self.ideas]\n\n\nclass Cluster:\n \n def __init__(self, ideas, r):\n \"\"\"make cluster with self.r\n \n cluster_link: \n \"\"\"\n self.ideas = ideas\n self.r = r\n self.l = 0\n self.cluster_link = []\n self.clustering()\n \n def clustering(self):\n self.cell_num = int(1./self.r)\n lr = 1./self.cell_num\n \n self.cell = dict() # key: (cellx,celly), value: list of ids\n self.rcell = []\n for i, idea in enumerate(self.ideas):\n cellx = int(idea[0][0]/lr)\n celly = int(idea[0][1]/lr)\n if self.cell.has_key((cellx, celly)):\n self.cell[(cellx, celly)] += [i]\n else:\n self.cell[(cellx, celly)] = [i]\n self.rcell.append((cellx, celly))\n num = 1\n for i in range(len(self.ideas)):\n num += self.find_nearest(i, num)\n return self.cluster_link\n\n def find_nearest(self, idea_id, num):\n \"\"\"find nearest idea\n\n idea_id: index in self.ideas\n \"\"\"\n cx, cy = self.rcell[idea_id]\n place = self.ideas[idea_id][0]\n CX = uniq_list([max(0, cx - 1), cx, min(cx + 1, self.cell_num - 1)])\n CY = uniq_list([max(0, cy - 1), cy, min(cy + 1, self.cell_num - 1)])\n tmp = [self.cell[(i, j)] for i in CX for j in CY if self.cell.has_key((i, j))]\n tmp = list(chain.from_iterable(tmp))\n tmp.remove(idea_id)\n if len(tmp) == 0:\n self.ideas[idea_id][1][1] = num\n return 1\n \n nearest = []\n cid = [num]\n for k in tmp:\n if euc(self.ideas[k][0], place) > self.r:\n continue\n nearest.append(k)\n prenum = self.ideas[k][1][1]\n if prenum == 0:\n cid.append(num)\n self.cluster_link.append((idea_id, k))\n elif prenum < num:\n cid.append(prenum)\n if not (k, idea_id) in self.cluster_link:\n self.cluster_link.append((idea_id, k))\n self.l += len(nearest)\n cluster_id = min(cid)\n if cluster_id < num:\n ans = 0\n else:\n ans = 1\n self.ideas[idea_id][1][1] = cluster_id\n for i in nearest:\n self.ideas[i][1][1] = cluster_id\n cid.remove(num)\n if len(cid) == 0:\n return ans\n cid.remove(cluster_id)\n if len(cid) == 0:\n return ans\n for i in cid:\n for x in self.ideas:\n if x[1][1] == i:\n x[1][1] = cluster_id\n return ans\n\n \nclass Meeting:\n \n def __init__(self, K, N, S=20, r=0.06, draw=True):\n self.K = K\n self.N = N\n self.S = S\n self.r = r\n self.ideas = []\n self.minutes = []\n self.ave_l = 0\n self.draw = draw\n \n def gather_people(self, ideass=None, weights=None):\n \"\"\"Gather participants.\n \n Keyword arguments:\n ideas : list of ideas for each person\n ex) [((0.3,0.1),(0.2,0.5)), ((0.5,0.6))] when N = 2\n weights: list of weights for the probability of the person to speak\n \"\"\"\n if not ideass:\n x = np.random.rand(self.N, self.S*2)\n ideass = []\n for _x in x:\n ideass.append([(i,j) for i,j in zip(_x[::2], _x[1::2])])\n if not weights:\n weights = [1.] * self.N\n for i, ideas, w in zip(range(self.N), ideass, weights):\n Person(self, i, ideas, w)\n\n def init(self):\n self.gather_people()\n cluster = Cluster(self.ideas, self.r)\n self.cluster_link = cluster.cluster_link\n self.ave_l = cluster.l/float(len(self.ideas))\n if self.draw:\n colors = ['b', 'g', 'r', 'c', 'm', 'y', 'k']\n self.fig = plt.figure(figsize=(9, 9))\n self.ax = self.fig.add_subplot(1, 1, 1)\n self.labels = []\n self.s1 = []\n for idea, tag in self.ideas:\n x = idea[0]\n y = idea[1]\n s = self.ax.scatter(x, y,\n c=colors[tag[0]%len(colors)],\n alpha=0.2)\n self.s1.append(s)\n data = []\n for link in self.cluster_link:\n ix = self.ideas[link[0]][0][0]\n iy = self.ideas[link[0]][0][1]\n jx = self.ideas[link[1]][0][0]\n jy = self.ideas[link[1]][0][1]\n data += [(ix, jx), (iy, jy), 'k']\n self.ax.plot(*data, alpha=0.5)\n \n def progress(self):\n self.init()\n preidea = self.ideas[np.random.choice(range(len(self.ideas)))]\n self.minutes.append(preidea)\n l = list(self.ideas)\n self.k = 1\n\n while self.k < self.K + 1:\n \n # remove ideas in the same cluster\n l = [idea for idea in l if idea[1][1] != preidea[1][1]]\n\n # if no one can speak: meeting ends.\n if len(l) == 0:\n break\n\n # confirm cluster id which is nearest from the preidea\n distance = [(euc(preidea[0], i[0]), i) for i in l]\n minclusterid = min(distance)[1][1][1]\n \n # gather ideas in the cluster\n tmp = [idea for idea in l if idea[1][1] == minclusterid]\n d = dict()\n for t in tmp:\n d[t[1][0]] = d.get(t[1][0], 0) + t[1][2]\n d = [(k, v) for k, v in d.items()]\n # chose whose ideas to be chosed from the cluster\n whois = weighted_choice(d)\n \n # gather ideas\n who = [idea for idea in tmp if idea[1][0] == whois]\n p = [(idea, idea[1][2]) for idea in who]\n # chose the next idea from the id is \"whois\"\n idea = weighted_choice(p)\n\n self.minutes.append(idea)\n preidea = idea\n self.callback()\n self.k += 1\n self.after()\n\n def callback(self):\n if self.draw:\n ix = self.minutes[-2][0][0]\n iy = self.minutes[-2][0][1]\n jx = self.minutes[-1][0][0]\n jy = self.minutes[-1][0][1]\n l1 = self.ax.plot([ix, jx], [iy, jy], color='b', alpha=0.5)\n self.ax.text((ix+jx)/2, (iy+jy)/2, self.k)\n else:\n pass\n\n def after(self):\n if self.draw:\n plt.show()\n else:\n pass", "閾値$r$を変えたときに意見の総数に対するクラスターの数との関係。横軸$r$、縦軸$1- (\\text{クラスターの数})/(\\text{意見の総数})$の通常のプロット(上段)と両対数プロット(下段)。", "trial = 100\n\nr = np.logspace(-2, np.log10(0.2), num=50)\nphi1 = []\nfor _r in r:\n _phi = 0.\n for t in range(trial):\n meeting = Meeting(K=50, N=6, r=_r, draw=False)\n meeting.init()\n _phi += len(uniq_list([x[1][1] for x in meeting.ideas]))/float(len(meeting.ideas))\n phi1.append(1 - _phi/trial)\n\ndef myplot1(x, y, xfit=np.array([]), yfit=np.array([]), param=None,\n scale=['linear', 'linear', 'log', 'log']):\n \"\"\"my plot function\n \n x: {'label_x', xdata}\n y: {'label_y', ydata}\n param: {'a': 10, 'b': 20}\n \"\"\"\n if param:\n s = [r'$%s = %f$' % (k, v) for k, v in param.items()]\n label = s[0]\n for _s in s[1:]:\n label += \", \" + _s\n label_x, xdata = x.items()[0]\n label_y, ydata = y.items()[0]\n fig = plt.figure(figsize=(8, 12))\n ax1 = fig.add_subplot(211)\n\n ax1.plot(xdata, ydata)\n if len(xfit):\n ax1.plot(xfit, yfit, label=label)\n ax1.legend(loc='best')\n ax1.set_xlabel(label_x)\n ax1.set_ylabel(label_y)\n ax1.set_xscale(scale[0])\n ax1.set_yscale(scale[1])\n \n ax2 = fig.add_subplot(212)\n ax2.plot(xdata, ydata)\n if len(xfit):\n ax2.plot(xfit, yfit, label=label)\n ax2.legend(loc='best')\n ax2.set_xlabel(label_x)\n ax2.set_ylabel(label_y)\n ax2.set_xscale(scale[2])\n ax2.set_yscale(scale[3])\n plt.show()", "通常のプロット", "myplot1({r'$r$': r}, {r'$\\phi$': phi1})", "フィッティング用関数", "def myfit(fit_func, parameter, x, y, xmin, xmax):\n \"\"\"my fitting and plotting function.\n \n fit_func: function (parameter(type:list), x) \n parameter: list of tuples: [('param1', param1), ('param2', param2), ...]\n x, y: dict\n xmin, xmax: float\n \"\"\"\n xkey, xdata = x.items()[0]\n ykey, ydata = y.items()[0]\n\n def fit(parameter, x, y):\n return y - fit_func(parameter, x)\n\n # use x : xmin < x < xmax\n i = 0\n while xdata[i] < xmin:\n i += 1\n imin, imax = i, i\n while xdata[i] < xmax:\n i += 1\n imax = i - 1\n\n paramdata = [b for a, b in parameter]\n paramkey = [a for a, b in parameter]\n res = leastsq(fit, paramdata, args=(xdata[imin:imax], ydata[imin:imax]))\n for p in res[0]:\n print xkey + \": \" + str(p)\n fitted = fit_func(res[0], xdata[imin:imax])\n\n fittedparam = dict([(k, v) for k, v in zip(paramkey, res[0])])\n myplot1(x, y, xdata[imin:imax], fitted, param=fittedparam)", "$\\phi(r) = 10^{b}r^{a}$として最小2乗法でフィッティング", "param = [('a', 1.5), ('b', 0.)]\nxmin, xmax = 0., 0.07\nx = {r'$r$': r}\ny = {r'$\\phi$': phi1}\ndef fit_func(parameter, x):\n a = parameter[0]\n b = parameter[1]\n return np.power(x, a)*np.power(10, b)\nmyfit(fit_func, param, x, y, xmin, xmax)", "両変数を対数にした状態で直線としてフィットしてみる。得られたパラメータによるフィッティング関数のプロットは、元の状態に戻してから行う。後に示す直接べき関数として求めた場合に比べて、$r$の小さい領域での直線の傾きがよく合っているように見える。", "a = 1.5\nb = 0.\nparam = [a, b]\nrmin, rmax = 0., 0.07\n\ndef fit_func(parameter, x):\n a = parameter[0]\n b = parameter[1]\n return a*np.log10(x) + b\n\ndef fit(parameter, x, y):\n return np.log10(y) - fit_func(parameter, x)\n\ni = 0\nwhile r[i] < rmin:\n i += 1\nimin, imax = i, i\nwhile r[i] < rmax:\n i += 1\nimax = i - 1\n\nres = leastsq(fit, param, args=(r[imin:imax], phi1[imin:imax]))\nprint u\"傾き: \" + str(res[0][0])\nprint u\"切片: \" + str(res[0][1])\nR1 = np.power(10, fit_func(res[0], r[imin:imax]))\n\nmyplot1({r'$r$': r}, {r'$\\phi$': phi1}, r[imin:imax], R1, param={'a': res[0][0], 'b': res[0][1]})", "S字型の曲線であるので、\n$$\\phi (r) = 1 - \\exp \\left[ - \\left( \\frac{r}{\\omega} \\right)^{a} \\right]$$\nとしてパラメータ$\\omega$に関して最小2乗法でフィッティングを行った場合。", "omega = 0.06\na = 2.0\nparam = [omega, a]\nrmin, rmax = 0.01, 0.2\n\ndef fit_func(parameter, x):\n omega = parameter[0]\n a = parameter[1]\n return 1 - np.exp(-(x/omega)**a)\n\ndef fit(parameter, x, y):\n return y - fit_func(parameter, x)\n\ni = 0\nwhile r[i] < rmin:\n i += 1\nimin, imax = i, i\nwhile r[i] < rmax:\n i += 1\nimax = i - 1\n\nres = leastsq(fit, param, args=(r[imin:imax], phi1[imin:imax]))\nprint u\"omega: \" + str(res[0][0])\nprint u\"a: \" + str(res[0][1])\nR3 = fit_func(res[0], r[imin:imax])\n\nmyplot1({r'$r$': r}, {r'$\\phi$': phi1}, r[imin:imax], R3, param={'\\omega': res[0][0], 'a': res[0][1]})", "$r$を固定して$N$を変更したときのクラスター数と点の総数の間の関係\n横軸を$X_{i}$の数$N$、縦軸を$1-(\\text{クラスタ数}/\\text{点の総数})$としたときのグラフを書いてみる。", "trial = 100\n\nN = np.arange(1, 20)\nphi6 = []\nfor _N in N:\n _phi = 0.\n for t in range(trial):\n meeting = Meeting(K=50, N=_N, r=0.07, draw=False)\n meeting.init()\n _phi += len(uniq_list([x[1][1] for x in meeting.ideas]))/float(len(meeting.ideas))\n phi6.append(1 - _phi/trial)\n\nmyplot1({r'$N$': N}, {r'$\\phi$': phi6})", "このとき、意見の総数と参加者の数、一人あたりの意見の数の間には比例の関係が成り立っており、この数のみに依存して、どちらを変えるかは問題ではない。したがって、より刻みを多く取ることのできる一人あたりの意見の数$S$を変えて計算した場合を見てみることにする。", "trial = 100\n\nS = np.arange(10, 70)\nphi7 = []\nfor _S in S:\n _phi = 0.\n for t in range(trial):\n meeting = Meeting(K=50, S=_S, N=6, r=0.07, draw=False)\n meeting.init()\n _phi += len(uniq_list([x[1][1] for x in meeting.ideas]))/float(len(meeting.ideas))\n phi7.append(1 - _phi/trial)\n\nmyplot1({r'$S$': S}, {r'$\\phi$': phi7})", "グラフの形から、\n$$\\phi(S) = 1- \\exp\\left[- \\left( \\frac{S}{\\omega} \\right)^{a}\\right]$$\nであるとしてフィッティングを行ってみる。", "omega = 20.\na = 1.\nparam = [omega, a]\n\ndef fit_func(parameter, x):\n omega = parameter[0]\n a = parameter[1]\n return 1. - np.exp(-(x/omega)**a)\n\ndef fit(parameter, x, y):\n return y - fit_func(parameter, x)\n\nres = leastsq(fit, param, args=(S, phi7))\nprint u\"omega: \" + str(res[0][0])\nprint u\"a: \" + str(res[0][1])\nR5 = fit_func(res[0], S)\n\nmyplot1({r'$S$': S}, {r'$\\phi$': phi7}, S, R5, param={r'\\omega': res[0][0], r'a': res[0][1]})", "閾値$r$を決めたときに、領域$\\Omega$内の任意の点を一様に選んだとき、その中に点が存在する確率の期待値は、解析的計算によって\n$$p'(r) = \\frac{1}{2}r^{4} -\\frac{8}{3}r^{3} + \\pi r^{2}$$\n$r$を定めたとき、すべての点の個数が$M$個であるとすると、一つの点の点がもつ次数の期待値$l$は\n$$l = p'(r)(M-1) = \\left( \\frac{1}{2}r^{4} -\\frac{8}{3}r^{3} + \\pi r^{2} \\right)(M-1)$$\nとなる。これを実際のシミュレーションの結果と照らして確かめる。", "trial = 100\n\nr = np.linspace(0.01, 0.5, num=50)\nphi3 = []\nfor _r in r:\n _phi = 0.\n for t in range(trial):\n meeting = Meeting(K=50, N=6, r=_r, draw=False)\n meeting.init()\n _phi += meeting.ave_l\n phi3.append(_phi/trial)\n\nfig = plt.figure(figsize=(8,6))\nax = fig.add_subplot(111)\n\nr = np.linspace(0.01, 0.5, num=50)\ndef func(x):\n return (1./2*x**4 - 8/3.*x**3 + np.pi*x**2)*(120-1)\ny = func(r)\ndef func2(x):\n return np.sqrt((-0.25*x**8 + 8/3.*x**7 - (64/9.+np.pi)*x**6 + 16/3.*np.pi*x**5\n + (0.5-np.pi**2)*x**4 - 8/3.*x**3 + np.pi*x**2)*(120-1)/(trial))\n\ndelta = func2(r)\ny1 = y + delta\ny2 = y - delta\ny3 = np.zeros(50)\ny3[y2>0] = y2[y2>0]\nax.fill_between(r, y1, y3, facecolor='green', alpha=0.2)\nax.plot(r, phi3)\nax.plot(r, y)\nax.set_xlabel(r'$r$')\nax.set_ylabel(r\"Average number of edges for each time: $l$\")\nplt.show()" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
AllenDowney/ThinkBayes2
notebooks/clustering.ipynb
mit
[ "Clustering and the k-means Algorithm\nCopyright 2020 Allen B. Downey\nLicense: Attribution-NonCommercial-ShareAlike 4.0 International (CC BY-NC-SA 4.0)\nIntroduction\nThis notebook introduces cluster analysis and one of the most common algorithms for it, k-means.\nIt also introduces \n\n\nJupyter, which is a tool for creating notebooks like this one;\n\n\nNumPy, which we'll use to perform array operations;\n\n\nPandas, which we'll use to read and clean the data; and\n\n\nscikit-learn, which provides an implementation of k-means.\n\n\nWe'll proceed \"top-down\"; that is, we'll use scikit-learn first, then we'll open the hood and see how it works.\nIf you want to follow along:\n\n\nUse this link to run this notebook and do the exercises: tinyurl.com/DowPen20\n\n\nUse this link to run the same notebook with solutions: tinyurl.com/DowPenSoln20\n\n\nBio\nI am a professor at Olin College, which is a small engineering school near Boston, Massachusetts, USA.\nOlin was created in 1999 with the mission to transform engineering education.", "%%html\n<iframe src=\"https://www.google.com/maps/embed?pb=!1m14!1m8!1m3!1d1512.1750667940496!2d-71.26457056946273!3d42.29270982134376!3m2!1i1024!2i768!4f13.1!3m3!1m2!1s0x0%3A0xa038229eeed8c35b!2sOlin%20College%20of%20Engineering!5e1!3m2!1sen!2sus!4v1594232142090!5m2!1sen!2sus\" width=\"600\" height=\"450\" frameborder=\"0\" style=\"border:0;\" allowfullscreen=\"\" aria-hidden=\"false\" tabindex=\"0\"></iframe>", "Classes and books\nI have been at Olin since 2003. I teach classes related to software, data science, Bayesian statistics, and physical modeling.\nI have written several books on these topics, including Think Python and Think Stats. Most are published by O'Reilly Media, which is famous for putting animals on their covers:\n<img src=\"https://greenteapress.com/covers/think_python_cover_small.jpeg\">\nBut all of them are freely available from Green Tea Press.\nFinally, I write a blog about Data Science and related topics, called Probably Overthinking It.\nJupyter and Colab\nJupyter is a tool for writing notebooks that contain text, code, and results.\nYou can install Jupyter on your own computer, but can also use services like Colab that run the notebook for you.\nIn that case, you don't have to install anything; you just need a browser.\nA notebook contains:\n\n\nText cells, which contain text in Markdown or HTML, and\n\n\nCode cells, which contain code in Python or one of about 100 other languages.\n\n\nThis is a text cell; the one below is a code cell.", "print('Hello, Jupyter')", "On Colab, code cells have a triangle \"Play\" icon on the left side. You can press it to run the code in the cell.\nOr if you select a cell by clicking on it, you can run it by pressing Shift-Enter.\nAs an exercise:\n\nRun the print statement in the previous cell.\nModify the code in that cell and run it again.\nRun the next cell, which imports the Python modules we'll use later.", "import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns", "Also run the following cell, which defines a function we'll use.", "def decorate(**options):\n \"\"\"Decorate the current axes.\n \n Call decorate with keyword arguments like\n decorate(title='Title',\n xlabel='x',\n ylabel='y')\n \n The keyword arguments can be any of the axis properties\n https://matplotlib.org/api/axes_api.html\n \"\"\"\n ax = plt.gca()\n ax.set(**options)\n \n handles, labels = ax.get_legend_handles_labels()\n if handles:\n ax.legend(handles, labels)\n\n plt.tight_layout()", "Clustering\nCluster analysis is a set of tools for looking at data and \n\n\nDiscovering groups, species, or categories,\n\n\nDefining boundaries between groups.\n\n\nIt is a form of \"unsupervised\" learning, which means that the only input is the dataset itself; the algorithm is not given any correct examples to learn from.\nAs an example, I'll used data collected and made available by Dr. Kristen Gorman at the Palmer Long-Term Ecological Research Station in Antarctica.\nThis dataset was published to support this article: Gorman, Williams, and Fraser, \"Ecological Sexual Dimorphism and Environmental Variability within a Community of Antarctic Penguins (Genus Pygoscelis)\", March 2014.\nThe following cell downloads the raw data.", "# Load the data files from https://github.com/allisonhorst/palmerpenguins\n# With gratitude to Allison Horst (@allison_horst)\n\nimport os\n\nif not os.path.exists('penguins_raw.csv'):\n !wget https://github.com/allisonhorst/palmerpenguins/raw/master/inst/extdata/penguins_raw.csv", "The dataset is stored in a CSV file, which contains one row for each penguin and one column for each variable.\nI'll use Pandas to read the CSV file and put the results in a DataFrame.", "df = pd.read_csv('penguins_raw.csv')\ndf.shape", "A DataFrame is like a 2-D array, but it also contains names for the columns and labels for the rows.\nThe shape of the DataFrame is the number of rows and columns.\nThe head method displays the first few rows.", "df.head()", "Three species of penguins are represented in the dataset: Adelie, Chinstrap and Gentoo, as shown in this illustration (by Allison Horst, available under the CC-BY license):\n<img width=\"400\" src=\"https://pbs.twimg.com/media/EaAWkZ0U4AA1CQf?format=jpg&name=4096x4096\">\nIn this dataset we are told that there are three species, and we are told which species each penguin belongs to.\nBut for purposes of clustering, we'll pretend we don't have this information and we'll see whether the algorithm \"discovers\" the different species.\nThe measurements we'll use are:\n\n\nBody Mass in grams (g).\n\n\nFlipper Length in millimeters (mm).\n\n\nCulmen Length in millimeters. \n\n\nCulmen Depth in millimeters.\n\n\nIf you are not familiar with the word \"culmen\", it refers to the top margin of the beak, as shown in the following illustration (also by Allison Horst):\n<img width=\"400\" src=\"https://pbs.twimg.com/media/EaAXQn8U4AAoKUj?format=jpg&name=4096x4096\">\nThis might seem like an artificial exercise. If we already know that there are three species, why are we trying to discover them?\nFor now, I'll just say that it's a learning example. But let's come back to this question: what is unsupervised clustering good for?\nDistributions of measurements\nThe measurements we have will be most useful for clustering if there are substantial differences between species and small variation within species. To see whether that is true, and to what degree, I will plot distributions of measurements for each species. \nFor convenience, I'll create a new column, called Species2, that contains a shorter version of the species names.", "def shorten(species):\n \"\"\"Select the first word from a string.\"\"\"\n return species.split()[0]\n\ndf['Species2'] = df['Species'].apply(shorten)", "I'll use the groupby method to divide the dataset by species.", "grouped = df.groupby('Species2')\ntype(grouped)", "The result is a GroupBy object that contains the three groups and their names. The following loop prints the group names and the number of penguins in each group.", "for name, group in grouped:\n print(name, len(group))", "We can use the GroupBy object to extract a column, like flipper length, from each group and compute its mean.", "varname = 'Flipper Length (mm)'\n\nfor name, group in grouped:\n print(name, group[varname].mean())", "We can also use it to display the distribution of values in each group.", "for name, group in grouped:\n sns.kdeplot(group[varname], label=name)\n \ndecorate(xlabel=varname,\n ylabel='PDF',\n title='Distributions of features')", "kdeplot uses kernel density estimation to make a smooth histogram of the values.\nIt looks like we can use flipper length to identify Gentoo penguins, but not to distinguish the other two species.\nTo make these steps easier to reuse, I'll wrap them a function.", "def make_kdeplots(df, varname):\n \"\"\"Make a KDE plot for each species.\n \n df: DataFrame\n varname: string column name\n by: string column name\n \n returns: dictionary from species name to Cdf\n \"\"\"\n grouped = df.groupby('Species2')\n for name, group in grouped:\n sns.kdeplot(group[varname], label=name)\n \n decorate(xlabel=varname,\n ylabel='PDF',\n title='Distributions of features')", "Now we can use it to explore other features, like culmen length.", "make_kdeplots(df, 'Culmen Length (mm)')", "It looks like we can use culmen length to identify Adelie penguins.\nExercise: Use make_kdeplots to display the distributions of one of the other two features:\n\n'Body Mass (g)'\n'Culmen Depth (mm)'", "# Solution goes here", "Scatter plot\nIf we can identify Gentoo penguins by flipper length and Adelie penguins by culmen length, maybe we can combine these variables to identify all three species. \nI'll start by making a scatter plot of the data.", "var1 = 'Flipper Length (mm)'\nvar2 = 'Culmen Length (mm)'\nvar3 = 'Culmen Depth (mm)'\nvar4 = 'Body Mass (g)'\n\nfor name, group in grouped:\n plt.plot(group[var1], group[var2], \n 'o', alpha=0.4, label=name)\n \ndecorate(xlabel=var1, ylabel=var2)", "Using those two features, we can divide the penguins into clusters with not much overlap.\nWe're going to make lots of scatter plots, so let's wrap that code in a function.\nAnd we'll generalize it to take by as a parameter, so we can group by any column, not just Species2.", "def scatterplot(df, var1, var2, by):\n \"\"\"Make a scatter plot.\n \n df: DataFrame\n var1: string column name, x-axis\n var2: string column name, y-axis\n by: string column name, groupby\n \"\"\"\n grouped = df.groupby(by)\n for name, group in grouped:\n plt.plot(group[var1], group[var2], \n 'o', alpha=0.4, label=name)\n \n decorate(xlabel=var1, ylabel=var2)", "Here's a scatter plot of flipper and culmen length for the three species.", "scatterplot(df, var1, var2, 'Species2')", "Exercise: Make a scatter plot using any other pair of variables.", "# Solution goes here", "We can think of these scatter plots as 2-D views of a 4-D feature space.\nClear the labels\nNow, let's pretend we don't know anything about the different species, and we'll see whether we can rediscover these clusters.\nTo see what the problem looks like, I'll add a column of labels to the DataFrame and set it to 0 for all penguins.", "df['labels'] = 0", "Now if we group by label, there's only one big cluster.", "scatterplot(df, var1, var2, 'labels')", "Let's see what happens if we run k-means clustering on this data.\nClustering\nFirst I'll use the implementation of k-means in scikit-learn; then we'll write our own.\nIn the dataset, we have 344 penguins and 19 variables.", "df.shape", "But some of the variables are NaN, which indicates missing data.\nSo I'll use dropna to drop any rows that have missing data for the two features we're going to use, flipper length and culmen length.", "features = [var1, var2]\ndata = df.dropna(subset=features).copy()\ndata.shape", "I'll extract just those two columns as a NumPy array.", "M = data[features].to_numpy()", "Now we can use KMeans to identify the clusters.\nn_clusters indicates how many cluster we want; this parameter is the $k$ the algorithm is named for.", "from sklearn.cluster import KMeans\n\nkmeans = KMeans(n_clusters=3).fit(M)\ntype(kmeans)", "The result is an object that contains \n\n\nLabels that indicates which cluster each penguin is assigned to, and\n\n\nThe centers of the clusters.\n\n\nI'll store the labels as a columns in data.", "data['labels'] = kmeans.labels_\ndata['labels']", "That way we can use scatterplot to show the clusters.", "scatterplot(data, var1, var2, 'labels')", "The KMeans object also contains the centers of the clusters as coordinate pairs in a NumPy array.", "kmeans.cluster_centers_", "To plot the centers, I'll transpose the array and assign the columns to x and y:", "xs, ys = np.transpose(kmeans.cluster_centers_)", "I'll plot the centers with x's and o's.", "options = dict(color='C3', ls='none', mfc='none')\nplt.plot(xs, ys, marker='o', ms=15, **options)\nplt.plot(xs, ys, marker='x', ms=10, **options);", "As usual, let wrap that up in a function.", "def plot_centers(centers, color='C3'):\n \"\"\"Plot cluster centers.\n \n centers: array with x and y columns\n color: string color specification\n \"\"\"\n xs, ys = np.transpose(centers)\n options = dict(color=color, ls='none', mfc='none')\n plt.plot(xs, ys, marker='o', ms=15, **options)\n plt.plot(xs, ys, marker='x', ms=10, **options)", "Now let's pull it all together.", "scatterplot(data, var1, var2, 'labels')\nplot_centers(kmeans.cluster_centers_)", "This figure shows the data, color-coded by assigned label, and the centers of the clusters.\nIt looks like k-means does a reasonable job of rediscovering the species, but with some confusion between Adelie (lower left) and Chinstrap (top center).\nAs a reminder, here are the right answers:", "scatterplot(data, var1, var2, 'Species2')", "Note that the color coding for the clusters is not consistent because the centers we get from k-means are in a random order.\nExercise: Here's the code from this section all in one place. Modify it to use any two features and see what the results look like.", "features2 = [var1, var2]\n\ndata2 = df.dropna(subset=features2).copy()\n\nM2 = data2[features2].to_numpy()\n\nkmeans2 = KMeans(n_clusters=3).fit(M2)\n\ndata2['labels'] = kmeans2.labels_\n\nscatterplot(data2, var1, var2, 'labels')", "Implementing k-means\nNow let's see how the algorithm works. At a high level, there are three steps:\n\nChoose $k$ random points in the dataset as initial centers.\nAssign each point in the dataset to the closest center.\nCompute new centers by calculating the \"center of mass\" in each cluster.\n\nThen you repeat steps 2 and 3 until the centers stop moving.\nTo select random points from the dataset, I'll use np.random.choice to select three indices.", "index = np.random.choice(len(M), size=3)\nindex", "And then use the indices to select rows from the dataset.", "centers = M[index]\ncenters", "I'll wrap that in a function:", "def choose_random_start(M, k):\n \"\"\"Choose k random elements of M.\n \n M: NumPy array with rows of coordinates\n k: number of centers to choose\n \n returns: NumPy array\n \"\"\"\n index = np.random.choice(len(M), size=k)\n centers = M[index]\n return centers", "Here's how we use it.", "centers = choose_random_start(M, 3)\ncenters", "And here's what the centers look like on the scatterplot.", "data['labels'] = 0\nscatterplot(data, var1, var2, 'labels')\nplot_centers(centers)", "The next step is to assign each point to the closest center. So we need to compute the distance between each point and each of the centers.\nCompute distances\nTo demonstrate the process, I'll pick just one of the centers.", "center_x, center_y = centers[0]\ncenter_x, center_y", "Now it will be convenient to have the x and y coordinates in separate arrays. I can do that with np.transpose, which turns the columns into rows; then I can assign the rows to x and y.", "x, y = np.transpose(M)\nx.shape", "Along the x-axis, the distance from each point to this center is x-center_x.\nAlong the y-axis, the distance is y-center_y.\nThe distance from each point to the center is the hypotenuse of the triangle, which I can compute with np.hypot:", "distances = np.hypot(x-center_x, y-center_y)\ndistances.shape", "The result is an array that contains the distance from each point to the chosen center.\nTo see if we got it right, I'll plot the center and the points, with the size of the points proportional to distance.", "plt.plot(center_x, center_y, 'rx', markersize=10)\nplt.scatter(x, y, s=distances)\n\ndecorate(xlabel=var1, ylabel=var2)", "At least visually, it seems like the size of the points is proportional to their distance from the center.\nSo let's put those steps into a function:", "def compute_distances(M, center):\n \"\"\"Compute distances to the given center.\n \n M: NumPy array of coordinates\n center: x, y coordinates of the center\n \n returns: NumPy array of float distances\n \"\"\"\n x, y = np.transpose(M)\n center_x, center_y = center\n distances = np.hypot(x-center_x, y-center_y)\n return distances", "We can use the function to make a list of distance arrays, one for each center.", "distance_arrays = [compute_distances(M, center)\n for center in centers]\nlen(distance_arrays)", "Labeling the points\nThe next step is to label each point with the index of the center it is closest to.\ndistance_arrays is a list of arrays, but we can convert it to a 2-D array like this:", "A = np.array(distance_arrays)\nA.shape", "A has one row for each center and one column for each point.\nNow we can use np.argmin to find the shortest distance in each column and return its index.", "data['labels'] = np.argmin(A, axis=0)\ndata['labels']", "The result is an array of indices in the range 0..2, which we assign to a column in data.\nLet's put these steps in a function.", "def compute_labels(M, centers):\n \"\"\"Label each point with the index of the closest center.\n \n M: NumPy array of coordinates\n centers: array of coordinates for the centers\n \n returns: array of labels, 0..k-1\n \"\"\"\n distance_arrays = [compute_distances(M, center)\n for center in centers]\n A = np.array(distance_arrays)\n labels = np.argmin(A, axis=0)\n return labels", "We can call it like this:", "data['labels'] = compute_labels(M, centers)", "And here are the results.", "scatterplot(data, var1, var2, 'labels')\nplot_centers(centers)", "If we get lucky, we might start with one point near the center of each cluster.\nBut even if we are unlucky, we can improve the results by recentering.\nFind new centers\nThe last step is to use the labels from the previous step to compute the center of each cluster.\nI'll start by using groupby to group the points by label.", "grouped = data.groupby('labels')\nfor name, group in grouped:\n print(name, len(group))", "We can use the GroupBy object to select the columns we're using and compute their means.", "data.groupby('labels')[features].mean()", "The result is a DataFrame that contains the central coordinates of each cluster.\nI'll put these steps in a function.", "def compute_new_centers(data, features):\n \"\"\"Compute the center of each cluster.\n \n data: DataFrame\n features: list of string column names\n \"\"\"\n means = data.groupby('labels')[features].mean()\n return means.to_numpy()", "The return value is a NumPy array that contains the new centers.", "new_centers = compute_new_centers(data, features)\nnew_centers", "Here's what it looks like with the old centers in gray and the new centers in red.", "scatterplot(data, var1, var2, 'labels')\nplot_centers(centers, color='gray')\nplot_centers(new_centers, color='C3')", "The k-means algorithm\nNow here's the whole algorithm in one function.", "def k_means(data, features, k):\n \"\"\"Cluster by k means.\n \n data: DataFrame\n features: list of string column names\n k: number of clusters\n \n returns: array of centers\n \"\"\"\n M = data[features].to_numpy()\n centers = choose_random_start(M, k)\n\n for i in range(15):\n data['labels'] = compute_labels(M, centers)\n centers = compute_new_centers(data, features)\n\n return centers", "And here's what the results look like after 15 iterations.", "centers = k_means(data, features, 3)\nscatterplot(data, var1, var2, 'labels')\nplot_centers(centers, color='C3')", "The results are (as far as I can see) identical to what we got from the scikit-learn implementation.", "kmeans = KMeans(n_clusters=3).fit(M)\ndata['labels'] = kmeans.labels_\n\nscatterplot(data, var1, var2, 'labels')\nplot_centers(kmeans.cluster_centers_)", "Animation\nHere's an animation that shows the algorithm in action.", "from time import sleep\nfrom IPython.display import clear_output\ninterval = 1\n\ncenters = choose_random_start(M, k=3)\nplt.figure()\n\nfor i in range(10):\n # label and scatter plot\n data['labels'] = compute_labels(M, centers)\n scatterplot(data, var1, var2, 'labels')\n plot_centers(centers, color='gray')\n \n # compute new centers and plot them\n new_centers = compute_new_centers(data, features) \n plot_centers(new_centers)\n centers = new_centers\n \n # show the plot, wait, and clear\n plt.show()\n sleep(interval) \n clear_output(wait=True)", "Exercise: Run the previous cell a few times. Do you always get the same clusters?\nNumber of clusters\nAll of this is based on the assumption that you know how many clusters you are looking for, which is true for some applications, but not always.\nLet's see what goes wrong if you ask for too many clusters, or too few.\nExercise: Run the following code with different values of n_clusters and see what the results look like.", "kmeans = KMeans(n_clusters=3).fit(M)\ndata['labels'] = kmeans.labels_\n\nscatterplot(data, var1, var2, 'labels')\nplot_centers(kmeans.cluster_centers_)", "Standarization\nOne of the problems with the results we have seen so far is that the lines between the clusters are mostly vertical.\nThat's because the range of values is wider for flipper length than culmen length, about 60 mm compared to 28 mm.", "M.max(axis=0) - M.min(axis=0)\n\nM.std(axis=0)", "When we compute the distance from each point to each center, the distances in the $x$ direction tend to dominate.\nThis is a common problem with algorithms that are based on distance in multidimensional space.\nIt is such a common problem that there is a common solution: feature scaling.\nThe goal of feature scaling is to transform the features so the distances along each axis are comparable.\nOne version of feature scaling is \"standardization\", which consists of\n\nSubtracting the mean from each feature, and\nDividing through by the standard deviation.\n\nHere's how we can do it with the features in M:", "means = M.mean(axis=0)\nmeans\n\nstds = M.std(axis=0)\nstds\n\nM_std = (M - means) / stds", "Let's see what happens if we run the algorithm again with standardized features.\nNotice that I have to transform the centers back before plotting them.", "kmeans = KMeans(n_clusters=3).fit(M_std)\ndata['labels'] = kmeans.labels_\n\nscatterplot(data, var1, var2, 'labels')\n\ncenters = kmeans.cluster_centers_ * stds + means\nplot_centers(centers)", "That looks a lot better! Again, here are the actual species for comparison.", "scatterplot(data, var1, var2, 'Species2')", "scikit-learn provides StandardScaler, which does the same thing.", "from sklearn.preprocessing import StandardScaler\n\nscaler = StandardScaler().fit(M)\nM_std = scaler.transform(M)", "And scaler provides inverse_transform, which we can use to transform the centers back.", "kmeans = KMeans(n_clusters=3).fit(M_std)\ndata['labels'] = kmeans.labels_\n\nscatterplot(data, var1, var2, 'labels')\n\ncenters = scaler.inverse_transform(kmeans.cluster_centers_)\nplot_centers(centers)", "Summary\nThe k-means algorithm does unsupervised clustering, which means that we don't tell it where the clusters are; we just provide the data and ask it to find a given number of clusters.\nIn this notebook, we asked it to find clusters in a group of penguins based on two features, flipper length and culmen length. The clusters it finds reflect the species in the dataset, especially if we standardize the data.\nIn this example we used only two features, because that makes it easy to visualize the results. But k-means extends easily to any number of dimensions (see the exercise below).\nSo, what is this good for?\nWell, Wikipedia provides this list of applications. Applying clustering analysis to these applications, I see a few general ideas:\n\n\nFrom an engineering point of view, clustering can be used to automate some kinds of analysis people do, which might be faster, more accurate, or less expensive. And it can work with large datasets and high numbers of dimensions that people can't handle.\n\n\nFrom a scientific point of view, clustering provides a way to test whether the patterns we see are in the data or in our minds.\n\n\nThis second point is related to old philosophical questions about the nature of categories. Putting things into categories seems to be a natural part of how humans think, but we have to wonder whether the categories we find truly \"carve nature at its joints\", as Plato put it.\nIf a clustering algorithm finds the same \"joints\" we do, we might have more confidence they are not entirely in our minds.\nExercise: Use the scikit-learn implementation of k-means to find clusters using all four features (flipper length, culmen length and depth, body mass). How do the results compare to what we got with just two features?", "# Solution goes here\n\n# Solution goes here\n\n# Solution goes here\n\n# Solution goes here\n\n# Solution goes here\n\n# Solution goes here" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
ES-DOC/esdoc-jupyterhub
notebooks/miroc/cmip6/models/miroc-es2l/seaice.ipynb
gpl-3.0
[ "ES-DOC CMIP6 Model Properties - Seaice\nMIP Era: CMIP6\nInstitute: MIROC\nSource ID: MIROC-ES2L\nTopic: Seaice\nSub-Topics: Dynamics, Thermodynamics, Radiative Processes. \nProperties: 80 (63 required)\nModel descriptions: Model description details\nInitialized From: -- \nNotebook Help: Goto notebook help page\nNotebook Initialised: 2018-02-20 15:02:40\nDocument Setup\nIMPORTANT: to be executed each time you run the notebook", "# DO NOT EDIT ! \nfrom pyesdoc.ipython.model_topic import NotebookOutput \n\n# DO NOT EDIT ! \nDOC = NotebookOutput('cmip6', 'miroc', 'miroc-es2l', 'seaice')", "Document Authors\nSet document authors", "# Set as follows: DOC.set_author(\"name\", \"email\") \n# TODO - please enter value(s)", "Document Contributors\nSpecify document contributors", "# Set as follows: DOC.set_contributor(\"name\", \"email\") \n# TODO - please enter value(s)", "Document Publication\nSpecify document publication status", "# Set publication status: \n# 0=do not publish, 1=publish. \nDOC.set_publication_status(0)", "Document Table of Contents\n1. Key Properties --&gt; Model\n2. Key Properties --&gt; Variables\n3. Key Properties --&gt; Seawater Properties\n4. Key Properties --&gt; Resolution\n5. Key Properties --&gt; Tuning Applied\n6. Key Properties --&gt; Key Parameter Values\n7. Key Properties --&gt; Assumptions\n8. Key Properties --&gt; Conservation\n9. Grid --&gt; Discretisation --&gt; Horizontal\n10. Grid --&gt; Discretisation --&gt; Vertical\n11. Grid --&gt; Seaice Categories\n12. Grid --&gt; Snow On Seaice\n13. Dynamics\n14. Thermodynamics --&gt; Energy\n15. Thermodynamics --&gt; Mass\n16. Thermodynamics --&gt; Salt\n17. Thermodynamics --&gt; Salt --&gt; Mass Transport\n18. Thermodynamics --&gt; Salt --&gt; Thermodynamics\n19. Thermodynamics --&gt; Ice Thickness Distribution\n20. Thermodynamics --&gt; Ice Floe Size Distribution\n21. Thermodynamics --&gt; Melt Ponds\n22. Thermodynamics --&gt; Snow Processes\n23. Radiative Processes \n1. Key Properties --&gt; Model\nName of seaice model used.\n1.1. Model Overview\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nOverview of sea ice model.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.key_properties.model.model_overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "1.2. Model Name\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nName of sea ice model code (e.g. CICE 4.2, LIM 2.1, etc.)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.key_properties.model.model_name') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "2. Key Properties --&gt; Variables\nList of prognostic variable in the sea ice model.\n2.1. Prognostic\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nList of prognostic variables in the sea ice component.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.key_properties.variables.prognostic') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Sea ice temperature\" \n# \"Sea ice concentration\" \n# \"Sea ice thickness\" \n# \"Sea ice volume per grid cell area\" \n# \"Sea ice u-velocity\" \n# \"Sea ice v-velocity\" \n# \"Sea ice enthalpy\" \n# \"Internal ice stress\" \n# \"Salinity\" \n# \"Snow temperature\" \n# \"Snow depth\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "3. Key Properties --&gt; Seawater Properties\nProperties of seawater relevant to sea ice\n3.1. Ocean Freezing Point\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nEquation used to compute the freezing point (in deg C) of seawater, as a function of salinity and pressure", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.key_properties.seawater_properties.ocean_freezing_point') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"TEOS-10\" \n# \"Constant\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "3.2. Ocean Freezing Point Value\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: FLOAT&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nIf using a constant seawater freezing point, specify this value.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.key_properties.seawater_properties.ocean_freezing_point_value') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "4. Key Properties --&gt; Resolution\nResolution of the sea ice grid\n4.1. Name\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nThis is a string usually used by the modelling group to describe the resolution of this grid e.g. N512L180, T512L70, ORCA025 etc.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.key_properties.resolution.name') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "4.2. Canonical Horizontal Resolution\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nExpression quoted for gross comparisons of resolution, eg. 50km or 0.1 degrees etc.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.key_properties.resolution.canonical_horizontal_resolution') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "4.3. Number Of Horizontal Gridpoints\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nTotal number of horizontal (XY) points (or degrees of freedom) on computational grid.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.key_properties.resolution.number_of_horizontal_gridpoints') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "5. Key Properties --&gt; Tuning Applied\nTuning applied to sea ice model component\n5.1. Description\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nGeneral overview description of tuning: explain and motivate the main targets and metrics retained. Document the relative weight given to climate performance metrics versus process oriented metrics, and on the possible conflicts with parameterization level tuning. In particular describe any struggle with a parameter value that required pushing it to its limits to solve a particular model deficiency.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.key_properties.tuning_applied.description') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "5.2. Target\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nWhat was the aim of tuning, e.g. correct sea ice minima, correct seasonal cycle.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.key_properties.tuning_applied.target') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "5.3. Simulations\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\n*Which simulations had tuning applied, e.g. all, not historical, only pi-control? *", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.key_properties.tuning_applied.simulations') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "5.4. Metrics Used\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nList any observed metrics used in tuning model/parameters", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.key_properties.tuning_applied.metrics_used') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "5.5. Variables\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nWhich variables were changed during the tuning process?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.key_properties.tuning_applied.variables') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "6. Key Properties --&gt; Key Parameter Values\nValues of key parameters\n6.1. Typical Parameters\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N\nWhat values were specificed for the following parameters if used?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.key_properties.key_parameter_values.typical_parameters') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Ice strength (P*) in units of N m{-2}\" \n# \"Snow conductivity (ks) in units of W m{-1} K{-1} \" \n# \"Minimum thickness of ice created in leads (h0) in units of m\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "6.2. Additional Parameters\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N\nIf you have any additional paramterised values that you have used (e.g. minimum open water fraction or bare ice albedo), please provide them here as a comma separated list", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.key_properties.key_parameter_values.additional_parameters') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "7. Key Properties --&gt; Assumptions\nAssumptions made in the sea ice model\n7.1. Description\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nGeneral overview description of any key assumptions made in this model.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.key_properties.assumptions.description') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "7.2. On Diagnostic Variables\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nNote any assumptions that specifically affect the CMIP6 diagnostic sea ice variables.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.key_properties.assumptions.on_diagnostic_variables') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "7.3. Missing Processes\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nList any key processes missing in this model configuration? Provide full details where this affects the CMIP6 diagnostic sea ice variables?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.key_properties.assumptions.missing_processes') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "8. Key Properties --&gt; Conservation\nConservation in the sea ice component\n8.1. Description\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nProvide a general description of conservation methodology.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.key_properties.conservation.description') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "8.2. Properties\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nProperties conserved in sea ice by the numerical schemes.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.key_properties.conservation.properties') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Energy\" \n# \"Mass\" \n# \"Salt\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "8.3. Budget\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nFor each conserved property, specify the output variables which close the related budgets. as a comma separated list. For example: Conserved property, variable1, variable2, variable3", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.key_properties.conservation.budget') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "8.4. Was Flux Correction Used\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDoes conservation involved flux correction?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.key_properties.conservation.was_flux_correction_used') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "8.5. Corrected Conserved Prognostic Variables\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nList any variables which are conserved by more than the numerical scheme alone.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.key_properties.conservation.corrected_conserved_prognostic_variables') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "9. Grid --&gt; Discretisation --&gt; Horizontal\nSea ice discretisation in the horizontal\n9.1. Grid\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nGrid on which sea ice is horizontal discretised?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.grid.discretisation.horizontal.grid') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Ocean grid\" \n# \"Atmosphere Grid\" \n# \"Own Grid\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "9.2. Grid Type\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nWhat is the type of sea ice grid?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.grid.discretisation.horizontal.grid_type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Structured grid\" \n# \"Unstructured grid\" \n# \"Adaptive grid\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "9.3. Scheme\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nWhat is the advection scheme?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.grid.discretisation.horizontal.scheme') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Finite differences\" \n# \"Finite elements\" \n# \"Finite volumes\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "9.4. Thermodynamics Time Step\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nWhat is the time step in the sea ice model thermodynamic component in seconds.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.grid.discretisation.horizontal.thermodynamics_time_step') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "9.5. Dynamics Time Step\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nWhat is the time step in the sea ice model dynamic component in seconds.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.grid.discretisation.horizontal.dynamics_time_step') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "9.6. Additional Details\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nSpecify any additional horizontal discretisation details.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.grid.discretisation.horizontal.additional_details') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "10. Grid --&gt; Discretisation --&gt; Vertical\nSea ice vertical properties\n10.1. Layering\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nWhat type of sea ice vertical layers are implemented for purposes of thermodynamic calculations?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.grid.discretisation.vertical.layering') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Zero-layer\" \n# \"Two-layers\" \n# \"Multi-layers\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "10.2. Number Of Layers\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nIf using multi-layers specify how many.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.grid.discretisation.vertical.number_of_layers') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "10.3. Additional Details\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nSpecify any additional vertical grid details.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.grid.discretisation.vertical.additional_details') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "11. Grid --&gt; Seaice Categories\nWhat method is used to represent sea ice categories ?\n11.1. Has Mulitple Categories\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nSet to true if the sea ice model has multiple sea ice categories.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.grid.seaice_categories.has_mulitple_categories') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "11.2. Number Of Categories\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nIf using sea ice categories specify how many.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.grid.seaice_categories.number_of_categories') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "11.3. Category Limits\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nIf using sea ice categories specify each of the category limits.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.grid.seaice_categories.category_limits') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "11.4. Ice Thickness Distribution Scheme\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDescribe the sea ice thickness distribution scheme", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.grid.seaice_categories.ice_thickness_distribution_scheme') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "11.5. Other\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nIf the sea ice model does not use sea ice categories specify any additional details. For example models that paramterise the ice thickness distribution ITD (i.e there is no explicit ITD) but there is assumed distribution and fluxes are computed accordingly.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.grid.seaice_categories.other') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "12. Grid --&gt; Snow On Seaice\nSnow on sea ice details\n12.1. Has Snow On Ice\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nIs snow on ice represented in this model?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.grid.snow_on_seaice.has_snow_on_ice') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "12.2. Number Of Snow Levels\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nNumber of vertical levels of snow on ice?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.grid.snow_on_seaice.number_of_snow_levels') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "12.3. Snow Fraction\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDescribe how the snow fraction on sea ice is determined", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.grid.snow_on_seaice.snow_fraction') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "12.4. Additional Details\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nSpecify any additional details related to snow on ice.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.grid.snow_on_seaice.additional_details') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "13. Dynamics\nSea Ice Dynamics\n13.1. Horizontal Transport\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nWhat is the method of horizontal advection of sea ice?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.dynamics.horizontal_transport') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Incremental Re-mapping\" \n# \"Prather\" \n# \"Eulerian\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "13.2. Transport In Thickness Space\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nWhat is the method of sea ice transport in thickness space (i.e. in thickness categories)?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.dynamics.transport_in_thickness_space') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Incremental Re-mapping\" \n# \"Prather\" \n# \"Eulerian\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "13.3. Ice Strength Formulation\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nWhich method of sea ice strength formulation is used?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.dynamics.ice_strength_formulation') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Hibler 1979\" \n# \"Rothrock 1975\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "13.4. Redistribution\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nWhich processes can redistribute sea ice (including thickness)?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.dynamics.redistribution') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Rafting\" \n# \"Ridging\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "13.5. Rheology\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nRheology, what is the ice deformation formulation?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.dynamics.rheology') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Free-drift\" \n# \"Mohr-Coloumb\" \n# \"Visco-plastic\" \n# \"Elastic-visco-plastic\" \n# \"Elastic-anisotropic-plastic\" \n# \"Granular\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "14. Thermodynamics --&gt; Energy\nProcesses related to energy in sea ice thermodynamics\n14.1. Enthalpy Formulation\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nWhat is the energy formulation?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.energy.enthalpy_formulation') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Pure ice latent heat (Semtner 0-layer)\" \n# \"Pure ice latent and sensible heat\" \n# \"Pure ice latent and sensible heat + brine heat reservoir (Semtner 3-layer)\" \n# \"Pure ice latent and sensible heat + explicit brine inclusions (Bitz and Lipscomb)\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "14.2. Thermal Conductivity\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nWhat type of thermal conductivity is used?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.energy.thermal_conductivity') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Pure ice\" \n# \"Saline ice\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "14.3. Heat Diffusion\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nWhat is the method of heat diffusion?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.energy.heat_diffusion') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Conduction fluxes\" \n# \"Conduction and radiation heat fluxes\" \n# \"Conduction, radiation and latent heat transport\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "14.4. Basal Heat Flux\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nMethod by which basal ocean heat flux is handled?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.energy.basal_heat_flux') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Heat Reservoir\" \n# \"Thermal Fixed Salinity\" \n# \"Thermal Varying Salinity\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "14.5. Fixed Salinity Value\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: FLOAT&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nIf you have selected {Thermal properties depend on S-T (with fixed salinity)}, supply fixed salinity value for each sea ice layer.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.energy.fixed_salinity_value') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "14.6. Heat Content Of Precipitation\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDescribe the method by which the heat content of precipitation is handled.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.energy.heat_content_of_precipitation') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "14.7. Precipitation Effects On Salinity\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nIf precipitation (freshwater) that falls on sea ice affects the ocean surface salinity please provide further details.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.energy.precipitation_effects_on_salinity') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "15. Thermodynamics --&gt; Mass\nProcesses related to mass in sea ice thermodynamics\n15.1. New Ice Formation\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDescribe the method by which new sea ice is formed in open water.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.mass.new_ice_formation') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "15.2. Ice Vertical Growth And Melt\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDescribe the method that governs the vertical growth and melt of sea ice.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.mass.ice_vertical_growth_and_melt') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "15.3. Ice Lateral Melting\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nWhat is the method of sea ice lateral melting?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.mass.ice_lateral_melting') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Floe-size dependent (Bitz et al 2001)\" \n# \"Virtual thin ice melting (for single-category)\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "15.4. Ice Surface Sublimation\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDescribe the method that governs sea ice surface sublimation.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.mass.ice_surface_sublimation') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "15.5. Frazil Ice\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDescribe the method of frazil ice formation.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.mass.frazil_ice') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "16. Thermodynamics --&gt; Salt\nProcesses related to salt in sea ice thermodynamics.\n16.1. Has Multiple Sea Ice Salinities\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDoes the sea ice model use two different salinities: one for thermodynamic calculations; and one for the salt budget?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.salt.has_multiple_sea_ice_salinities') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "16.2. Sea Ice Salinity Thermal Impacts\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDoes sea ice salinity impact the thermal properties of sea ice?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.salt.sea_ice_salinity_thermal_impacts') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "17. Thermodynamics --&gt; Salt --&gt; Mass Transport\nMass transport of salt\n17.1. Salinity Type\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nHow is salinity determined in the mass transport of salt calculation?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.salt.mass_transport.salinity_type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Constant\" \n# \"Prescribed salinity profile\" \n# \"Prognostic salinity profile\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "17.2. Constant Salinity Value\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: FLOAT&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nIf using a constant salinity value specify this value in PSU?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.salt.mass_transport.constant_salinity_value') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "17.3. Additional Details\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDescribe the salinity profile used.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.salt.mass_transport.additional_details') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "18. Thermodynamics --&gt; Salt --&gt; Thermodynamics\nSalt thermodynamics\n18.1. Salinity Type\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nHow is salinity determined in the thermodynamic calculation?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.salt.thermodynamics.salinity_type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Constant\" \n# \"Prescribed salinity profile\" \n# \"Prognostic salinity profile\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "18.2. Constant Salinity Value\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: FLOAT&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nIf using a constant salinity value specify this value in PSU?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.salt.thermodynamics.constant_salinity_value') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "18.3. Additional Details\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDescribe the salinity profile used.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.salt.thermodynamics.additional_details') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "19. Thermodynamics --&gt; Ice Thickness Distribution\nIce thickness distribution details.\n19.1. Representation\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nHow is the sea ice thickness distribution represented?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.ice_thickness_distribution.representation') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Explicit\" \n# \"Virtual (enhancement of thermal conductivity, thin ice melting)\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "20. Thermodynamics --&gt; Ice Floe Size Distribution\nIce floe-size distribution details.\n20.1. Representation\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nHow is the sea ice floe-size represented?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.ice_floe_size_distribution.representation') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Explicit\" \n# \"Parameterised\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "20.2. Additional Details\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nPlease provide further details on any parameterisation of floe-size.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.ice_floe_size_distribution.additional_details') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "21. Thermodynamics --&gt; Melt Ponds\nCharacteristics of melt ponds.\n21.1. Are Included\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nAre melt ponds included in the sea ice model?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.melt_ponds.are_included') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "21.2. Formulation\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nWhat method of melt pond formulation is used?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.melt_ponds.formulation') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Flocco and Feltham (2010)\" \n# \"Level-ice melt ponds\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "21.3. Impacts\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nWhat do melt ponds have an impact on?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.melt_ponds.impacts') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Albedo\" \n# \"Freshwater\" \n# \"Heat\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "22. Thermodynamics --&gt; Snow Processes\nThermodynamic processes in snow on sea ice\n22.1. Has Snow Aging\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nSet to True if the sea ice model has a snow aging scheme.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.snow_processes.has_snow_aging') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "22.2. Snow Aging Scheme\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDescribe the snow aging scheme.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.snow_processes.snow_aging_scheme') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "22.3. Has Snow Ice Formation\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nSet to True if the sea ice model has snow ice formation.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.snow_processes.has_snow_ice_formation') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "22.4. Snow Ice Formation Scheme\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDescribe the snow ice formation scheme.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.snow_processes.snow_ice_formation_scheme') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "22.5. Redistribution\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nWhat is the impact of ridging on snow cover?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.snow_processes.redistribution') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "22.6. Heat Diffusion\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nWhat is the heat diffusion through snow methodology in sea ice thermodynamics?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.snow_processes.heat_diffusion') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Single-layered heat diffusion\" \n# \"Multi-layered heat diffusion\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "23. Radiative Processes\nSea Ice Radiative Processes\n23.1. Surface Albedo\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nMethod used to handle surface albedo.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.radiative_processes.surface_albedo') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Delta-Eddington\" \n# \"Parameterized\" \n# \"Multi-band albedo\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "23.2. Ice Radiation Transmission\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nMethod by which solar radiation through sea ice is handled.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.radiative_processes.ice_radiation_transmission') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Delta-Eddington\" \n# \"Exponential attenuation\" \n# \"Ice radiation transmission per category\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "©2017 ES-DOC" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
pysg/pyther
Modelo de impregnacion/modelo2/Activité 10_Viernes.ipynb
mit
[ "import numpy as np\nimport pandas as pd\nimport math\nimport cmath\nfrom scipy.optimize import root\nfrom scipy.integrate import odeint\nfrom __future__ import division\nfrom scipy import *\nfrom pylab import *\nimport matplotlib.pyplot as plt\n%matplotlib inline\n\nimport pylab as pp\nfrom scipy import integrate, interpolate\nfrom scipy import optimize", "Evaluation des modèles pour l'extraction supercritique\nL'extraction supercritique est de plus en plus utilisée afin de retirer des matières organiques de différents liquides ou matrices solides. Cela est dû au fait que les fluides supercritiques ont des avantages non négligeables par rapport aux autres solvants, ils ont des caractèreistiques comprises entre celles des gaz et celles des solides. En changeant la température et la pression ils peuvent capter des composés différents, ils sont donc très efficaces. \nLe méchanisme de l'extraction supercritique est le suivant : \n- Transport du fluide vers la particule, en premier lieu sur sa surface et en deuxième lieu a l'intérieur de la particule par diffusion\n- Dissolution du soluté avec le fluide supercritique \n- Transport du solvant de l'intérieur vers la surface de la particule \n- Transport du solvant et des solutés de la surface de la particule vers la masse du solvant \nA - Le modèle de Reverchon : \nAfin d'utiliser ce modèle, définissons les variables qui vont y être admises, ci-dessous la nomenclature du modèle :\n \nLe modèle : \nIl est basé sur l'intégration des bilans de masses différentielles tout le long de l'extraction, avec les hypothèses suivants : \n- L'écoulement piston existe à l'intérieur du lit, comme le montre le schéma ci-contre : \n\n- La dispersion axiale du lit est négligeable\n- Le débit, la température et la pression sont constants\nCela nous permet d'obtenir les équations suivantes :\n- $uV.\\frac{\\partial c_{c}}{\\partial t}+eV.\\frac{\\partial c_{c}}{\\partial t}+ AK(q-q) = 0$\n- $(1-e).V.uV\\frac{\\partial c_{q}}{\\partial t}= -AK(q-q*)$\n\nLes conditions initiales sont les suivantes : C = 0, q=q0 à t = 0 et c(0,t) à h=0\n\nLa phase d'équilibre est : $c = k.q*$\nSachant que le fluide et la phase sont uniformes à chaque stage, nous pouvons définir le modèle en utilisant les équations différentielles ordinaires (2n). Les équations sont les suivantes :\n- $(\\frac{W}{p}).(Cn- Cn-1) + e (\\frac{v}{n}).(\\frac{dcn}{dt})+(1-e).(\\frac{v}{n}).(\\frac{dcn}{dt}) = 0$\n- $(\\frac{dqn}{dt} = - (\\frac{1}{ti})(qn-qn*)$\n- Les conditions initiales sont : cn = 0, qn = q0 à t = 0 \nEjemplo ODE", "import numpy as np\nfrom scipy import integrate\nfrom matplotlib.pylab import *", "Ejemplo 2 funciona", "import numpy as np\nfrom scipy import integrate\nimport matplotlib.pyplot as plt\n\ndef vdp1(t, y):\n return np.array([y[1], (1 - y[0]**2)*y[1] - y[0]])\n\nt0, t1 = 0, 20 # start and end\nt = np.linspace(t0, t1, 100) # the points of evaluation of solution\ny0 = [2, 0] # initial value\ny = np.zeros((len(t), len(y0))) # array for solution\ny[0, :] = y0\n\nr = integrate.ode(vdp1).set_integrator(\"dopri5\") # choice of method\nr.set_initial_value(y0, t0) # initial values\n\nfor i in range(1, t.size):\n y[i, :] = r.integrate(t[i]) # get one more value, add it to the array\n if not r.successful():\n raise RuntimeError(\"Could not integrate\")\n\nplt.plot(t, y)\nplt.show()", "Table", "def table(self):\n ", "Fonction", "def fonction1 (n,v,W,p,E,dqn,qt):\n return ( W / p ) *(c - c[1]) + E * (v / n) + (1 - E) * (v / n) * (dqn / dt) == (dcn / dt )\nn = 10 \nv = 2.31E-5\nE = 0.4\ndqn = 0.5\ndt = 0.5\nW = 0.0001471667\np = 285\nt0, t1 = 0, 20\nt = np.linspace(t0, t1, 100) \nc0 = [0, 10] \nc = np.zeros((len(t), len(y0))) \nc[0, :] = c0\nr = integrate.ode(fonction1).set_integrator(\"dopri5\") \nr.set_initial_value(c0, t0) \nfor i in range(1, t.size):\n c[i, :] = r.integrate(t[i]) # get one more value, add it to the array\n if not r.successful():\n raise RuntimeError(\"Could not integrate\")\n\nplt.plot(t, c)\nplt.show()\n\n# def fonction2 (t,q):\n# return (dqn / dt) == (-1 /ti ) * (q -q[1])\n# dqn = 0.1\n# ti = 10\n# t0, t1 = 0, 20\n# t = np.linspace(t0, t1, 100) \n# q0 = [2, 0] \n# q = np.zeros((len(t), len(y0))) \n# q[0, :] = q0\n# r = integrate.ode(fonction2).set_integrator(\"dopri5\") \n# r.set_initial_value(y0, t0) \n# for i in range(1, t.size):\n# q[i, :] = r.integrate(t[i]) # get one more value, add it to the array\n# if not r.successful():\n# raise RuntimeError(\"Could not integrate\")\n\n# plt.plot(t, q)\n# plt.show()\n\n# def fonction3 (self):\n# return cc == k * q1\n# k = 0.2\n# q1 = 1\n# t0, t1 = 0, 20\n# t = np.linspace(t0, t1, 100) \n# cc0 = [2, 0] \n# cc = np.zeros((len(t), len(y0))) \n# cc[0, :] = cc0\n# plt.plot(t, cc)\n# plt.show()", "Modelo Reverchon\nMathematical Modeling of Supercritical Extraction of Sage Oil", "P = 9 #MPa\nT = 323 # K\nQ = 8.83 #g/min\ne = 0.4\nrho = 285 #kg/m3\nmiu = 2.31e-5 # Pa*s\ndp = 0.75e-3 # m\nDl = 0.24e-5 #m2/s\nDe = 8.48e-12 # m2/s\nDi = 6e-13\nu = 0.455e-3 #m/s\nkf = 1.91e-5 #m/s\nde = 0.06 # m\nW = 0.160 # kg\nkp = 0.2\n\nr = 0.31 #m\n\nn = 10\nV = 12\n\n#C = kp * qE\nC = 0.1\nqE = C / kp\n\nCn = 0.05\nCm = 0.02\n\n\nt = np.linspace(0,10, 1)\n\nti = (r ** 2) / (15 * Di)\n\n\ndef reverchon(x,t):\n \n #Ecuaciones diferenciales del modelo Reverchon \n #dCdt = - (n/(e * V)) * (W * (Cn - Cm) / rho + (1 - e) * V * dqdt)\n #dqdt = - (1 / ti) * (q - qE)\n \n q = x[0]\n C = x[1]\n qE = C / kp\n dqdt = - (1 / ti) * (q - qE)\n dCdt = - (n/(e * V)) * (W * (C - Cm) / rho + (1 - e) * V * dqdt)\n \n return [dqdt, dCdt] \n\n\nreverchon([1, 2], 0)\n\nx0 = [0, 0]\nt = np.linspace(0, 3000, 500)\n\nresultado = odeint(reverchon, x0, t)\n\nqR = resultado[:, 0]\nCR = resultado[:, 1]\nplt.plot(t, CR)\nplt.title(\"Modelo Reverchon\")\nplt.xlabel(\"t [=] min\")\nplt.ylabel(\"C [=] $kg/m^3$\")\n\nx0 = [0, 0]\nt = np.linspace(0, 3000, 500)\n\nresultado = odeint(reverchon, x0, t)\n\nqR = resultado[:, 0]\nCR = resultado[:, 1]\nplt.plot(t, qR)\nplt.title(\"Modelo Reverchon\")\nplt.xlabel(\"t [=] min\")\nplt.ylabel(\"C solid–fluid interface [=] $kg/m^3$\")\n\nprint(CR)\n\nr = 0.31 #m\nx0 = [0, 0]\nt = np.linspace(0, 3000, 500)\n\nresultado = odeint(reverchon, x0, t)\n\nqR = resultado[:, 0]\nCR = resultado[:, 1]\nplt.plot(t, CR)\nplt.title(\"Modelo Reverchon\")\nplt.xlabel(\"t [=] min\")\nplt.ylabel(\"C [=] $kg/m^3$\")\n\nr = 0.231 #m\nx0 = [0, 0]\nt = np.linspace(0, 3000, 500)\n\nresultado = odeint(reverchon, x0, t)\n\nqR = resultado[:, 0]\nCR = resultado[:, 1]\nplt.plot(t, CR)\nplt.title(\"Modelo Reverchon\")\nplt.xlabel(\"t [=] min\")\nplt.ylabel(\"C [=] $kg/m^3$\")\n\nfig,axes=plt.subplots(2,2)\naxes[0,0].plot(t,CR)\naxes[1,0].plot(t,qR)\n", "Trabajo futuro\n\nRealizar modificaciones de los parametros para observar cómo afectan al comportamiento del modelo.\nRealizar un ejemplo de optimización de parámetros utilizando el modelo de Reverchon.\n\nReferencias\n[1] E. Reverchon, Mathematical modelling of supercritical extraction of sage oil, AIChE J. 42 (1996) 1765–1771.\nhttps://onlinelibrary.wiley.com/doi/pdf/10.1002/aic.690420627\n[2] Amit Rai, Kumargaurao D.Punase, Bikash Mohanty, Ravindra Bhargava, Evaluation of models for supercritical fluid extraction, International Journal of Heat and Mass Transfer Volume 72, May 2014, Pages 274-287. https://www.sciencedirect.com/science/article/pii/S0017931014000398\nAjuste de parámetros con ODEs: modelo Reverchon", "\n\n#Datos experimentales\nx_data = np.linspace(0,9,10)\ny_data = np.array([0.000,0.416,0.489,0.595,0.506,0.493,0.458,0.394,0.335,0.309])\n\ndef f(y, t, k): \n \"\"\" sistema de ecuaciones diferenciales ordinarias \"\"\"\n return (-k[0]*y[0], k[0]*y[0]-k[1]*y[1], k[1]*y[1])\n\ndef my_ls_func(x,teta):\n f2 = lambda y, t: f(y, t, teta)\n # calcular el valor de la ecuación diferencial en cada punto\n r = integrate.odeint(f2, y0, x)\n return r[:,1]\n\ndef f_resid(p):\n # definir la función de minimos cuadrados para cada valor de y\"\"\"\n \n return y_data - my_ls_func(x_data,p)\n\n#resolver el problema de optimización\nguess = [0.2, 0.3] #valores inicales para los parámetros\ny0 = [1,0,0] #valores inciales para el sistema de ODEs\n(c, kvg) = optimize.leastsq(f_resid, guess) #get params\n\nprint(\"parameter values are \",c)\n\n# interpolar los valores de las ODEs usando splines\nxeval = np.linspace(min(x_data), max(x_data),30) \ngls = interpolate.UnivariateSpline(xeval, my_ls_func(xeval,c), k=3, s=0)\n\n\nxeval = np.linspace(min(x_data), max(x_data), 200)\n#Gráficar los resultados\npp.plot(x_data, y_data,'.r',xeval,gls(xeval),'-b')\npp.xlabel('t [=] min',{\"fontsize\":16})\npp.ylabel(\"C\",{\"fontsize\":16})\npp.legend(('Datos','Modelo'),loc=0)\npp.show()" ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]