hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 11
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 3 251 | max_stars_repo_name stringlengths 4 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 251 | max_issues_repo_name stringlengths 4 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 251 | max_forks_repo_name stringlengths 4 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.05M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.04M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
cc85a1c4d80d0d0402facd00a8044ff49b268555 | 443 | py | Python | osmaxx/excerptexport/migrations/0042_remove_outputfile_file_extension.py | tyrasd/osmaxx | da4454083d17b2ef8b0623cad62e39992b6bd52a | [
"MIT"
] | 27 | 2015-03-30T14:17:26.000Z | 2022-02-19T17:30:44.000Z | osmaxx/excerptexport/migrations/0042_remove_outputfile_file_extension.py | tyrasd/osmaxx | da4454083d17b2ef8b0623cad62e39992b6bd52a | [
"MIT"
] | 483 | 2015-03-09T16:58:03.000Z | 2022-03-14T09:29:06.000Z | osmaxx/excerptexport/migrations/0042_remove_outputfile_file_extension.py | tyrasd/osmaxx | da4454083d17b2ef8b0623cad62e39992b6bd52a | [
"MIT"
] | 6 | 2015-04-07T07:38:30.000Z | 2020-04-01T12:45:53.000Z | # -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2016-06-07 13:30
from __future__ import unicode_literals
from django.db import migrations
| 22.15 | 86 | 0.656885 |
cc85a942cbf7b59c5567afa83b5cd9d0434c51dd | 3,673 | py | Python | extract/river/extract_rivers.py | parkermac/LO | 09e0197de7f2166bfa835ec62018b7a8fbfa7379 | [
"MIT"
] | 1 | 2022-01-31T23:12:22.000Z | 2022-01-31T23:12:22.000Z | extract/river/extract_rivers.py | parkermac/LO | 09e0197de7f2166bfa835ec62018b7a8fbfa7379 | [
"MIT"
] | 1 | 2022-03-18T23:51:54.000Z | 2022-03-21T18:02:44.000Z | extract/river/extract_rivers.py | parkermac/LO | 09e0197de7f2166bfa835ec62018b7a8fbfa7379 | [
"MIT"
] | null | null | null | """
Extract as-run river time series.
To test on mac:
run extract_rivers -gtx cas6_v3_lo8b -0 2019.07.04 -1 2019.07.04
To run on perigee:
run extract_rivers -gtx cas6_v3_lo8b -0 2018.01.01 -1 2018.01.10
run extract_rivers -gtx cas6_v3_lo8b -0 2018.01.01 -1 2018.12.31
Performance: takes 23 sec per year on perigee
Modified to include all NPZD tracers, and package the results as
an xarray Dataset.
***
NOTE: this is hard-coded to LiveOcean_output / [gtag] / riv2 so it
pretty specific to the cas6_v3_lo8b run. Also, it expects to find all
the NPZDOC variables.
***
"""
from lo_tools import Lfun, zrfun
from lo_tools import extract_argfun as exfun
Ldir = exfun.intro() # this handles the argument passing
from datetime import datetime, timedelta
from time import time
import numpy as np
import pandas as pd
import xarray as xr
from pathlib import Path
ds0 = Ldir['ds0']
ds1 = Ldir['ds1']
tt0 = time()
# long list of variables to extract
vn_list = ['transport', 'salt', 'temp', 'oxygen',
'NO3', 'phytoplankton', 'zooplankton', 'detritus', 'Ldetritus',
'TIC', 'alkalinity']
print(' Doing river extraction for '.center(60,'='))
print(' gtag = ' + Ldir['gtag'])
outname = 'extraction_' + ds0 + '_' + ds1 + '.nc'
# make sure the output directory exists
out_dir = Ldir['LOo'] / 'pre' / 'river' / Ldir['gtag'] / 'Data_roms'
Lfun.make_dir(out_dir)
out_fn = out_dir / outname
out_fn.unlink(missing_ok=True)
dt0 = datetime.strptime(ds0, Lfun.ds_fmt)
dt1 = datetime.strptime(ds1, Lfun.ds_fmt)
ndays = (dt1-dt0).days + 1
# make mds_list: list of datestrings (e.g. 2017.01.01) to loop over
mds_list = []
mdt = dt0
while mdt <= dt1:
mds_list.append(datetime.strftime(mdt, Lfun.ds_fmt))
mdt = mdt + timedelta(days=1)
# get list of river names
# (this is a bit titchy because of NetCDF 3 limitations on strings, forcing them
# to be arrays of characters)
mds = mds_list[0]
fn = Path('/boildat1').absolute() / 'parker' / 'LiveOcean_output' / Ldir['gtag'] / ('f' + mds) / 'riv2' / 'rivers.nc'
ds = xr.open_dataset(fn)
rn = ds['river_name'].values
NR = rn.shape[1]
riv_name_list = []
for ii in range(NR):
a = rn[:,ii]
r = []
for l in a:
r.append(l.decode())
rr = ''.join(r)
riv_name_list.append(rr)
ds.close()
NT = len(mds_list)
nanmat = np.nan * np.ones((NT, NR))
v_dict = dict()
for vn in vn_list:
v_dict[vn] = nanmat.copy()
tt = 0
for mds in mds_list:
fn = Path('/boildat1').absolute() / 'parker' / 'LiveOcean_output' / Ldir['gtag'] / ('f' + mds) / 'riv2' / 'rivers.nc'
ds = xr.open_dataset(fn)
# The river transport is given at noon of a number of days surrounding the forcing date.
# Here we find the index of the time for the day "mds".
RT = pd.to_datetime(ds['river_time'].values)
mdt = datetime.strptime(mds, Lfun.ds_fmt) + timedelta(hours=12)
mask = RT == mdt
for vn in vn_list:
if vn == 'transport':
v_dict[vn][tt,:] = ds['river_' + vn][mask,:]
else:
# the rest of the variables allow for depth variation, but we
# don't use this, so, just use the bottom value
v_dict[vn][tt,:] = ds['river_' + vn][mask,0,:]
ds.close()
tt += 1
# make transport positive
v_dict['transport'] = np.abs(v_dict['transport'])
# store output in an xarray Dataset
mdt_list = [(datetime.strptime(item, Lfun.ds_fmt) + timedelta(hours=12)) for item in mds_list]
times = pd.Index(mdt_list)
x = xr.Dataset(coords={'time': times,'riv': riv_name_list})
for vn in vn_list:
v = v_dict[vn]
x[vn] = (('time','riv'), v)
x.to_netcdf(out_fn)
x.close()
print('Total time for extraction = %d seconds' % (time() - tt0))
| 28.253846 | 121 | 0.659406 |
cc87ec2077a4bb23825353e65fcb8ba6c7ee17e1 | 570 | py | Python | common/migrations/0016_GlobalComments.py | jakubhyza/kelvin | b06c0ed0594a3fb48df1e50ff30cee010ddeea5a | [
"MIT"
] | 8 | 2020-01-11T15:25:25.000Z | 2022-02-20T17:32:58.000Z | common/migrations/0016_GlobalComments.py | jakubhyza/kelvin | b06c0ed0594a3fb48df1e50ff30cee010ddeea5a | [
"MIT"
] | 72 | 2020-01-13T21:07:26.000Z | 2022-03-28T10:17:50.000Z | common/migrations/0016_GlobalComments.py | jakubhyza/kelvin | b06c0ed0594a3fb48df1e50ff30cee010ddeea5a | [
"MIT"
] | 6 | 2020-01-11T16:50:04.000Z | 2022-02-19T10:12:19.000Z | # Generated by Django 3.0.7 on 2020-11-07 16:58
from django.db import migrations, models
| 23.75 | 74 | 0.580702 |
cc884c667af3244727045d36ce0f98e65a2527d2 | 6,361 | py | Python | laia/models/kws/dortmund_phocnet.py | basbeu/PyLaia | d14458484b56622204b1730a7d53220c5d0f1bc1 | [
"MIT"
] | 2 | 2020-09-10T13:31:17.000Z | 2021-07-31T09:44:17.000Z | laia/models/kws/dortmund_phocnet.py | basbeu/PyLaia | d14458484b56622204b1730a7d53220c5d0f1bc1 | [
"MIT"
] | 1 | 2020-12-06T18:11:52.000Z | 2020-12-06T18:19:38.000Z | laia/models/kws/dortmund_phocnet.py | basbeu/PyLaia | d14458484b56622204b1730a7d53220c5d0f1bc1 | [
"MIT"
] | 2 | 2020-04-20T13:40:56.000Z | 2020-10-17T11:59:55.000Z | from __future__ import absolute_import
import math
import operator
from collections import OrderedDict
from functools import reduce
from typing import Union, Sequence, Optional
import torch
from laia.data import PaddedTensor
from laia.nn.pyramid_maxpool_2d import PyramidMaxPool2d
from laia.nn.temporal_pyramid_maxpool_2d import TemporalPyramidMaxPool2d
def convert_old_parameters(params):
"""Convert parameters from the old model to the new one."""
# type: OrderedDict -> OrderedDict
new_params = []
for k, v in params.items():
if k.startswith("conv"):
new_params.append(("conv.{}".format(k), v))
elif k.startswith("fc"):
new_params.append(("fc.{}".format(k), v))
else:
new_params.append((k, v))
return OrderedDict(new_params)
| 41.575163 | 88 | 0.564377 |
cc8a2d2f86634a18b7bce0839a293dfedd5c4feb | 1,541 | py | Python | robocute/widget/__init__.py | kfields/robocute | f6f15ab74266053da5fe4ede3cc81310a62146e5 | [
"MIT"
] | 1 | 2015-08-24T21:58:34.000Z | 2015-08-24T21:58:34.000Z | robocute/widget/__init__.py | kfields/robocute | f6f15ab74266053da5fe4ede3cc81310a62146e5 | [
"MIT"
] | null | null | null | robocute/widget/__init__.py | kfields/robocute | f6f15ab74266053da5fe4ede3cc81310a62146e5 | [
"MIT"
] | null | null | null | import pyglet
from pyglet.gl import *
from robocute.node import *
from robocute.vu import *
from robocute.shape import Rect
| 27.035088 | 81 | 0.541856 |
cc8abb2907e577ffd204f1ea8583e97ab1d687f1 | 410 | py | Python | lib/googlecloudsdk/compute/subcommands/http_health_checks/__init__.py | IsaacHuang/google-cloud-sdk | 52afa5d1a75dff08f4f5380c5cccc015bf796ca5 | [
"Apache-2.0"
] | null | null | null | lib/googlecloudsdk/compute/subcommands/http_health_checks/__init__.py | IsaacHuang/google-cloud-sdk | 52afa5d1a75dff08f4f5380c5cccc015bf796ca5 | [
"Apache-2.0"
] | null | null | null | lib/googlecloudsdk/compute/subcommands/http_health_checks/__init__.py | IsaacHuang/google-cloud-sdk | 52afa5d1a75dff08f4f5380c5cccc015bf796ca5 | [
"Apache-2.0"
] | 2 | 2020-07-25T05:03:06.000Z | 2020-11-04T04:55:57.000Z | # Copyright 2014 Google Inc. All Rights Reserved.
"""Commands for reading and manipulating HTTP health checks."""
from googlecloudsdk.calliope import base
HttpHealthChecks.detailed_help = {
'brief': ('Read and manipulate HTTP health checks for load balanced '
'instances')
}
| 27.333333 | 75 | 0.736585 |
cc8c3ccbb426c49972b991935c3bbbd09c451c5d | 52,014 | py | Python | pycofe/tasks/basic.py | ekr-ccp4/jsCoFE | b9424733fb567938927509bc667ef24ed60ddd8c | [
"MIT"
] | null | null | null | pycofe/tasks/basic.py | ekr-ccp4/jsCoFE | b9424733fb567938927509bc667ef24ed60ddd8c | [
"MIT"
] | null | null | null | pycofe/tasks/basic.py | ekr-ccp4/jsCoFE | b9424733fb567938927509bc667ef24ed60ddd8c | [
"MIT"
] | 1 | 2021-02-25T06:54:15.000Z | 2021-02-25T06:54:15.000Z | ##!/usr/bin/python
#
# ============================================================================
#
# 06.02.18 <-- Date of Last Modification.
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# ----------------------------------------------------------------------------
#
# BASIC TASK WRAPPER
#
# Command-line: N/A
#
# Copyright (C) Eugene Krissinel, Andrey Lebedev 2017-2018
#
# ============================================================================
#
# Function list:
#
# TaskDriver::
# report_page_id ()
# setReportWidget ( widgetId )
# resetReportPage ()
# log_page_id ()
# err_page_id ()
# file_stdout_path ()
# file_stderr_path ()
# file_stdin_path ()
# reportDir ()
# outputDir ()
# inputDir ()
# reportDocumentName()
# refmac_section ()
# refmac_report ()
# importDir ()
# import_summary_id ()
# python native imports
import os
import sys
import shutil
import traceback
# ccp4-python imports
import pyrvapi
import pyrvapi_ext.parsers
# pycofe imports
from pycofe.dtypes import dtype_template, dtype_xyz, dtype_structure, databox
from pycofe.dtypes import dtype_ensemble, dtype_hkl, dtype_ligand
from pycofe.dtypes import dtype_sequence
from pycofe.proc import edmap, import_merged
from pycofe.varut import signal, jsonut, command
# ============================================================================
# driver class
| 40.103315 | 100 | 0.514381 |
cc8c90a4501fe9ab990f2ab191504461fa54d0c3 | 154 | py | Python | models/__init__.py | Celiali/FixMatch | fe55ea9daf353aead58b630da96f357eee728a82 | [
"MIT"
] | 12 | 2020-12-07T04:24:58.000Z | 2022-02-16T15:33:26.000Z | models/__init__.py | Celiali/FixMatch | fe55ea9daf353aead58b630da96f357eee728a82 | [
"MIT"
] | 1 | 2021-07-15T23:02:22.000Z | 2021-07-15T23:02:22.000Z | models/__init__.py | Celiali/FixMatch | fe55ea9daf353aead58b630da96f357eee728a82 | [
"MIT"
] | 1 | 2021-07-14T10:21:48.000Z | 2021-07-14T10:21:48.000Z | from .wideresnet import *
from .wideresnet_lk import *
WRN_MODELS = {
'WideResNet':WideResNet,
'WideResNet_Lk': WideResNet_Lk,
}
| 19.25 | 39 | 0.642857 |
cc8d3cb600d5b30b2c38ef88bbba0a6183336176 | 8,954 | py | Python | sp3-diffusion/extract-convert.py | robfairh/npre555-cp03 | 2aea7ae2df4a720c5d09003f98192f8986a6d107 | [
"BSD-3-Clause"
] | null | null | null | sp3-diffusion/extract-convert.py | robfairh/npre555-cp03 | 2aea7ae2df4a720c5d09003f98192f8986a6d107 | [
"BSD-3-Clause"
] | 2 | 2021-01-04T12:29:30.000Z | 2021-02-01T11:13:45.000Z | sp3-diffusion/extract-convert.py | robfairh/npre555-cp03 | 2aea7ae2df4a720c5d09003f98192f8986a6d107 | [
"BSD-3-Clause"
] | null | null | null | # This script is based on moltres/python/extractSerpent2GCs.py
import os
import numpy as np
import argparse
import subprocess
import serpentTools as sT
def makePropertiesDir(
outdir,
filebase,
mapFile,
unimapFile,
serp1=False,
fromMain=False):
""" Takes in a mapping from branch names to material temperatures,
then makes a properties directory.
Serp1 means that the group transfer matrix is transposed."""
# the constants moltres looks for:
goodStuff = ['Tot', 'Sp0', 'Sp2', 'Fiss', 'Nsf', 'Kappa', 'Sp1', 'Sp3',
'Invv', 'Chit', 'Chip', 'Chid', 'BETA_EFF', 'lambda']
goodMap = dict([(thing, 'inf' + thing) for thing in goodStuff])
goodMap['BETA_EFF'] = 'betaEff'
goodMap['lambda'] = 'lambda'
# map material names to universe names from serpent
with open(unimapFile) as fh:
uniMap = []
for line in fh:
uniMap.append(tuple(line.split()))
# this now maps material names to serpent universes
uniMap = dict(uniMap)
# list of material names
inmats = list(uniMap.keys())
print("Making properties for materials:")
print(inmats)
coeList = dict([(mat, sT.read(mat + '.coe')) for mat in inmats])
# primary branch to temp mapping
branch2TempMapping = open(mapFile)
# Check if calculation uses 6 neutron precursor groups.
# This prevents writing of excess zeros. Check if any
# entries in the 7th and 8th group precursor positions
# are nonzero, if so, use 8 groups.
use8Groups = False
for line in branch2TempMapping:
item, temp = tuple(line.split())
for mat in inmats:
if mat in item:
currentMat = mat
break
strData = coeList[currentMat].branches[item].universes[
uniMap[currentMat], 0, 0, None].gc[goodMap['BETA_EFF']]
strData = strData[1:9]
if np.any(strData[-2:] != 0.0):
use8Groups = True
# Now loop through a second time
branch2TempMapping.close()
branch2TempMapping = open(mapFile)
for line in branch2TempMapping:
item, temp = tuple(line.split())
for mat in inmats:
if mat in item:
currentMat = mat
break
else:
print('Considered materials: {}'.format(inmats))
raise Exception(
'Couldnt find a material corresponding to branch {}'.format(
item))
try:
totxsdata = coeList[currentMat].branches[item].universes[
uniMap[currentMat], 0, 0, None].infExp[goodMap['Tot']]
sp0xsdata = coeList[currentMat].branches[item].universes[
uniMap[currentMat], 0, 0, None].infExp[goodMap['Sp0']]
sp1xsdata = coeList[currentMat].branches[item].universes[
uniMap[currentMat], 0, 0, None].infExp[goodMap['Sp1']]
sp2xsdata = coeList[currentMat].branches[item].universes[
uniMap[currentMat], 0, 0, None].infExp[goodMap['Sp2']]
sp3xsdata = coeList[currentMat].branches[item].universes[
uniMap[currentMat], 0, 0, None].infExp[goodMap['Sp3']]
G = len(totxsdata)
remxs0g = totxsdata - sp0xsdata.reshape((G, G)).diagonal()
remxs1g = totxsdata - sp1xsdata.reshape((G, G)).diagonal()
remxs2g = totxsdata - sp2xsdata.reshape((G, G)).diagonal()
remxs3g = totxsdata - sp3xsdata.reshape((G, G)).diagonal()
with open(outdir + '/' + filebase + '_' + currentMat +
'_DIFFCOEFA.txt', 'a') as fh:
strData = 1./3./remxs1g
strData = ' '.join(
[str(dat) for dat in strData]) if isinstance(
strData, np.ndarray) else strData
fh.write(str(temp) + ' ' + strData)
fh.write('\n')
with open(outdir + '/' + filebase + '_' + currentMat +
'_DIFFCOEFB.txt', 'a') as fh:
strData = 9./35./remxs3g
strData = ' '.join(
[str(dat) for dat in strData]) if isinstance(
strData, np.ndarray) else strData
fh.write(str(temp) + ' ' + strData)
fh.write('\n')
with open(outdir + '/' + filebase + '_' + currentMat +
'_REMXSA.txt', 'a') as fh:
strData = remxs0g
strData = ' '.join(
[str(dat) for dat in strData]) if isinstance(
strData, np.ndarray) else strData
fh.write(str(temp) + ' ' + strData)
fh.write('\n')
with open(outdir + '/' + filebase + '_' + currentMat +
'_REMXSB.txt', 'a') as fh:
strData = remxs2g + 4./5*remxs0g
strData = ' '.join(
[str(dat) for dat in strData]) if isinstance(
strData, np.ndarray) else strData
fh.write(str(temp) + ' ' + strData)
fh.write('\n')
with open(outdir + '/' + filebase + '_' + currentMat +
'_COUPLEXSA.txt', 'a') as fh:
strData = 2*remxs0g
strData = ' '.join(
[str(dat) for dat in strData]) if isinstance(
strData, np.ndarray) else strData
fh.write(str(temp) + ' ' + strData)
fh.write('\n')
with open(outdir + '/' + filebase + '_' + currentMat +
'_COUPLEXSB.txt', 'a') as fh:
strData = 2./5*remxs0g
strData = ' '.join(
[str(dat) for dat in strData]) if isinstance(
strData, np.ndarray) else strData
fh.write(str(temp) + ' ' + strData)
fh.write('\n')
for coefficient in ['Chit', 'Chip', 'Chid', 'Fiss', 'Nsf', 'Sp0',
'Kappa', 'Invv', 'BETA_EFF', 'lambda']:
with open(outdir + '/' + filebase + '_' + currentMat +
'_' + coefficient.upper() + '.txt', 'a') as fh:
if coefficient == 'lambda' or coefficient == 'BETA_EFF':
strData = coeList[currentMat].branches[
item].universes[
uniMap[currentMat], 0, 0, None].gc[
goodMap[coefficient]]
# some additional formatting is needed here
strData = strData[1:9]
# Cut off group 7 and 8 precursor params in 6
# group calcs
if not use8Groups:
strData = strData[0:6]
else:
strData = coeList[currentMat].branches[
item].universes[
uniMap[currentMat], 0, 0, None].infExp[
goodMap[coefficient]]
strData = ' '.join(
[str(dat) for dat in strData]) if isinstance(
strData, np.ndarray) else strData
fh.write(str(temp) + ' ' + strData)
fh.write('\n')
except KeyError:
raise Exception('Check your mapping and secondary branch files.')
if __name__ == '__main__':
# make it act like a nice little terminal program
parser = argparse.ArgumentParser(
description='Extracts Serpent 2 group constants, \
and puts them in a directory suitable for moltres.')
parser.add_argument('outDir', metavar='o', type=str, nargs=1,
help='name of directory to write properties to.')
parser.add_argument('fileBase', metavar='f', type=str,
nargs=1, help='File base name to give moltres')
parser.add_argument(
'mapFile',
metavar='b',
type=str,
nargs=1,
help='File that maps branches to temperatures')
parser.add_argument(
'universeMap',
metavar='u',
type=str,
nargs=1,
help='File that maps material names to serpent universe')
parser.add_argument(
'--serp1',
dest='serp1',
action='store_true',
help='use this flag for serpent 1 group transfer matrices')
parser.set_defaults(serp1=False)
args = parser.parse_args()
# these are unpacked, so it fails if they werent passed to the script
outdir = args.outDir[0]
fileBase = args.fileBase[0]
mapFile = args.mapFile[0]
unimapFile = args.universeMap[0]
makePropertiesDir(outdir, fileBase, mapFile, unimapFile, serp1=args.serp1,
fromMain=True)
print("Successfully made property files in directory {}.".format(outdir))
| 38.594828 | 78 | 0.52256 |
cc8e5699c6d924eefec86f5ef76ba0da8a0749bf | 63,581 | py | Python | ms1searchpy/main.py | markmipt/ms1searchpy | 1fae3ba9ca25ac151b34110d333820f0a063ee11 | [
"Apache-2.0"
] | 6 | 2020-01-28T12:29:02.000Z | 2022-02-01T14:43:44.000Z | ms1searchpy/main.py | markmipt/ms1searchpy | 1fae3ba9ca25ac151b34110d333820f0a063ee11 | [
"Apache-2.0"
] | 3 | 2021-07-30T01:28:05.000Z | 2021-11-25T09:14:31.000Z | ms1searchpy/main.py | markmipt/ms1searchpy | 1fae3ba9ca25ac151b34110d333820f0a063ee11 | [
"Apache-2.0"
] | 2 | 2020-07-23T10:01:10.000Z | 2021-05-04T12:46:04.000Z | import os
from . import utils
import numpy as np
from scipy.stats import scoreatpercentile
from scipy.optimize import curve_fit
from scipy import exp
import operator
from copy import copy, deepcopy
from collections import defaultdict, Counter
import re
from pyteomics import parser, mass, fasta, auxiliary as aux, achrom
try:
from pyteomics import cmass
except ImportError:
cmass = mass
import subprocess
from sklearn import linear_model
import tempfile
import pandas as pd
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from multiprocessing import Queue, Process, cpu_count
from itertools import chain
try:
import seaborn
seaborn.set(rc={'axes.facecolor':'#ffffff'})
seaborn.set_style('whitegrid')
except:
pass
from .utils import calc_sf_all, recalc_spc
import lightgbm as lgb
import pandas as pd
from sklearn.model_selection import train_test_split
from scipy.stats import zscore, spearmanr
import pandas as pd
from pyteomics import pepxml, achrom, auxiliary as aux, mass, fasta, mzid, parser
from pyteomics import electrochem
import numpy as np
import random
SEED = 42
from sklearn.model_selection import train_test_split
from os import path, mkdir
from collections import Counter, defaultdict
import warnings
import pylab as plt
warnings.formatwarning = lambda msg, *args, **kw: str(msg) + '\n'
import pandas as pd
from sklearn.model_selection import train_test_split, KFold
import os
from collections import Counter, defaultdict
from scipy.stats import scoreatpercentile
from sklearn.isotonic import IsotonicRegression
import warnings
import numpy as np
import matplotlib
import numpy
import pandas
import random
import sklearn
import matplotlib.pyplot as plt
from sklearn import (
feature_extraction, feature_selection, decomposition, linear_model,
model_selection, metrics, svm
)
import scipy
from scipy.stats import rankdata
from copy import deepcopy
import csv
from scipy.stats import rankdata
import lightgbm as lgb
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from itertools import chain
import time as timemodule
import ast
from sklearn import metrics
SEED = 50
| 37.466706 | 276 | 0.564225 |
cc8e8a561ef5687c9cc7d4a828c190e58f83f661 | 1,099 | py | Python | waitinglist/urls.py | konradb/django-waitinglist | 81e2b3ba81d96c377f4167b0bbd01bb023f7b97a | [
"MIT"
] | null | null | null | waitinglist/urls.py | konradb/django-waitinglist | 81e2b3ba81d96c377f4167b0bbd01bb023f7b97a | [
"MIT"
] | null | null | null | waitinglist/urls.py | konradb/django-waitinglist | 81e2b3ba81d96c377f4167b0bbd01bb023f7b97a | [
"MIT"
] | null | null | null | from django.conf.urls import url
from django.views.generic import TemplateView
from . import views
urlpatterns = [
url(r"^list_signup/$", views.list_signup, name="waitinglist_list_signup"),
url(r"^ajax_list_signup/$", views.ajax_list_signup, name="waitinglist_ajax_list_signup"),
url(r"^survey/thanks/$", TemplateView.as_view(template_name="waitinglist/thanks.html"), name="waitinglist_thanks"),
url(r"^survey/(?P<code>.*)/$", views.survey, name="waitinglist_survey"),
url(r"^success/$", TemplateView.as_view(template_name="waitinglist/success.html"), name="waitinglist_success"),
url(r"^cohorts/$", views.cohort_list, name="waitinglist_cohort_list"),
url(r"^cohorts/create/$", views.cohort_create, name="waitinglist_cohort_create"),
url(r"^cohorts/cohort/(\d+)/$", views.cohort_detail, name="waitinglist_cohort_detail"),
url(r"^cohorts/cohort/(\d+)/add_member/$", views.cohort_member_add, name="waitinglist_cohort_member_add"),
url(r"^cohorts/cohort/(\d+)/send_invitations/$", views.cohort_send_invitations, name="waitinglist_cohort_send_invitations"),
]
| 57.842105 | 128 | 0.744313 |
cc8eeef4e955218a7296a121d1dfb2153b85aa22 | 1,162 | py | Python | nokogiri/tqdm_load.py | nat-chan/nokogiri | 476c2989e2fbe80b2937923fbef0211459ec738f | [
"MIT"
] | null | null | null | nokogiri/tqdm_load.py | nat-chan/nokogiri | 476c2989e2fbe80b2937923fbef0211459ec738f | [
"MIT"
] | null | null | null | nokogiri/tqdm_load.py | nat-chan/nokogiri | 476c2989e2fbe80b2937923fbef0211459ec738f | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
from pathlib import Path
from nokogiri.which_env import which_env
from pickle import Unpickler | 31.405405 | 77 | 0.60241 |
cc930b4f7f09f8fa1727fc2d776abafb2a109c6e | 5,991 | py | Python | tests/test_contrib/test_prepredict.py | uricod/yellowbrick | 6fb2e9b7e5b2998c6faa4dcca81a4b0f91bf29b4 | [
"Apache-2.0"
] | 1 | 2017-03-03T03:26:54.000Z | 2017-03-03T03:26:54.000Z | tests/test_contrib/test_prepredict.py | uricod/yellowbrick | 6fb2e9b7e5b2998c6faa4dcca81a4b0f91bf29b4 | [
"Apache-2.0"
] | 1 | 2021-11-10T18:06:19.000Z | 2021-11-10T18:06:19.000Z | tests/test_contrib/test_prepredict.py | uricod/yellowbrick | 6fb2e9b7e5b2998c6faa4dcca81a4b0f91bf29b4 | [
"Apache-2.0"
] | null | null | null | # tests.test_contrib.test_prepredict
# Test the prepredict estimator.
#
# Author: Benjamin Bengfort <benjamin@bengfort.com>
# Created: Mon Jul 12 07:07:33 2021 -0400
#
# ID: test_prepredict.py [] benjamin@bengfort.com $
"""
Test the prepredict estimator.
"""
##########################################################################
## Imports
##########################################################################
import pytest
from io import BytesIO
from tests.fixtures import Dataset, Split
from tests.base import IS_WINDOWS_OR_CONDA, VisualTestCase
from sklearn.naive_bayes import GaussianNB
from sklearn.cluster import MiniBatchKMeans
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split as tts
from sklearn.datasets import make_classification, make_regression, make_blobs
from yellowbrick.contrib.prepredict import *
from yellowbrick.regressor import PredictionError
from yellowbrick.classifier import ClassificationReport
import numpy as np
# Set random state
np.random.seed()
##########################################################################
## Fixtures
##########################################################################
##########################################################################
## Tests
##########################################################################
| 32.037433 | 93 | 0.624604 |
cc9334543aeeb4519b2ac91825f9b078d9f16ee4 | 94 | py | Python | src6/1/string3.py | pjackson3/cs50 | 4cf8ca67abfc293d4dbb9bf5a1cb742d74ca7a31 | [
"MIT"
] | null | null | null | src6/1/string3.py | pjackson3/cs50 | 4cf8ca67abfc293d4dbb9bf5a1cb742d74ca7a31 | [
"MIT"
] | null | null | null | src6/1/string3.py | pjackson3/cs50 | 4cf8ca67abfc293d4dbb9bf5a1cb742d74ca7a31 | [
"MIT"
] | 1 | 2020-11-24T23:25:26.000Z | 2020-11-24T23:25:26.000Z | # input and print, with format strings
s = input("What's your name?\n")
print(f"hello, {s}")
| 18.8 | 38 | 0.659574 |
cc93993829d4a8f1dc6a847d22783d8908c2b006 | 2,259 | py | Python | examples/metrics/fc/example_idtxl_wrapper_multi.py | HelmchenLabSoftware/mesostat-dev | 8baa7120b892fe0df893cdcf0f20f49876643d75 | [
"MIT"
] | null | null | null | examples/metrics/fc/example_idtxl_wrapper_multi.py | HelmchenLabSoftware/mesostat-dev | 8baa7120b892fe0df893cdcf0f20f49876643d75 | [
"MIT"
] | null | null | null | examples/metrics/fc/example_idtxl_wrapper_multi.py | HelmchenLabSoftware/mesostat-dev | 8baa7120b892fe0df893cdcf0f20f49876643d75 | [
"MIT"
] | null | null | null | import os, sys
import numpy as np
import matplotlib.pyplot as plt
# Export library path
rootname = "mesoscopic-functional-connectivity"
thispath = os.path.dirname(os.path.abspath(__file__))
rootpath = os.path.join(thispath[:thispath.index(rootname)], rootname)
print("Appending project path", rootpath)
sys.path.append(rootpath)
from codes.lib.info_metrics.info_metrics_generic import parallel_metric_2d
from codes.lib.models.test_lib import dynsys
from codes.lib.sweep_lib import DataSweep
############################
# Parameters
############################
# DynSys parameters
dynsysParam = {
'nNode' : 4, # Number of variables
'nData' : 4000, # Number of timesteps
'nTrial' : 20, # Number of trials
'dt' : 50, # ms, timestep
'tau' : 500, # ms, timescale of each mesoscopic area
'inpT' : 100, # Period of input oscillation
'inpMag' : 0.0, # Magnitude of the periodic input
'std' : 0.2, # STD of neuron noise
}
# IDTxl parameters
idtxlParam = {
'dim_order' : 'rps',
'cmi_estimator' : 'JidtGaussianCMI',
'max_lag_sources' : 5,
'min_lag_sources' : 1,
'window' : 50
}
############################
# Data
############################
nSweep = 10
data = dynsys(dynsysParam) #[trial x channel x time]
print("Generated data of shape", data.shape)
methods = ['BivariateTE', 'MultivariateTE']
dataSweep1 = DataSweep(data, idtxlParam, nSweepMax=nSweep)
timeIdxs = dataSweep1.get_target_time_idxs()
# print(timeIdxs)
#
# from codes.lib.sweep_lib import Sweep2D
#
# sweeper = Sweep2D(dataSweep1.iterator(), methods, idtxlParam["dim_order"], parTarget=True)
#
# for i, (method, data, iTrg) in enumerate(sweeper.iterator()):
# print(i, method, data.shape, iTrg)
results = parallel_metric_2d(dataSweep1.iterator(), "idtxl", methods, idtxlParam, nCore=None)
fig, ax = plt.subplots(nrows=nSweep, ncols=2)
fig.suptitle("TE computation for several windows of the data")
for iMethod, method in enumerate(methods):
ax[0][iMethod].set_title(method)
print(results[method].shape)
for iSweep in range(nSweep):
ax[iSweep][0].set_ylabel("time="+str(timeIdxs[iSweep]))
ax[iSweep][iMethod].imshow(results[method][iSweep][0])
plt.show() | 29.723684 | 93 | 0.660912 |
cc97881f01338acc3697d0a0591a9ee19e6dbad9 | 1,747 | py | Python | project.py | blendacosta/CIP_FinalProject | 8f50298d1b35cffe4569f5a78a158d6d699fa532 | [
"MIT"
] | null | null | null | project.py | blendacosta/CIP_FinalProject | 8f50298d1b35cffe4569f5a78a158d6d699fa532 | [
"MIT"
] | null | null | null | project.py | blendacosta/CIP_FinalProject | 8f50298d1b35cffe4569f5a78a158d6d699fa532 | [
"MIT"
] | null | null | null | '''
A Gameplay Mechanic [TS4]
By CozyGnomes (https://cozygnomes.tumblr.com/)
This is an existing document that contains several things you can do in your gameplay on TS4.
This program comes with the intention of automatically generating the related phrase without
having to search for each number (as instructed in the original document).
Project by Blenda C
'''
import random
FILE_NAME = 'gpmechanic.txt'
if __name__ == '__main__':
main() | 37.170213 | 112 | 0.661133 |
cc9a005a14d0a98035518428a505d967d10c254e | 101 | py | Python | Section 18/8.Document-function-with-keyword-based-arguments.py | airbornum/-Complete-Python-Scripting-for-Automation | bc053444f8786259086269ca1713bdb10144dd74 | [
"MIT"
] | 18 | 2020-04-13T03:14:06.000Z | 2022-03-09T18:54:41.000Z | Section 18/8.Document-function-with-keyword-based-arguments.py | airbornum/-Complete-Python-Scripting-for-Automation | bc053444f8786259086269ca1713bdb10144dd74 | [
"MIT"
] | null | null | null | Section 18/8.Document-function-with-keyword-based-arguments.py | airbornum/-Complete-Python-Scripting-for-Automation | bc053444f8786259086269ca1713bdb10144dd74 | [
"MIT"
] | 22 | 2020-04-29T21:12:42.000Z | 2022-03-17T18:19:54.000Z |
display(3,4)
display(a=3,b=4)
display(b=4,a=3) | 14.428571 | 18 | 0.60396 |
cc9b021bad66790ffb26790ca7f0b624083f6bad | 830 | py | Python | test_dataloader.py | dsoselia/Continual-Learning-Benchmark | b3393aa9eb9a6956a6560ee15904a2cdb046e4c7 | [
"MIT"
] | null | null | null | test_dataloader.py | dsoselia/Continual-Learning-Benchmark | b3393aa9eb9a6956a6560ee15904a2cdb046e4c7 | [
"MIT"
] | null | null | null | test_dataloader.py | dsoselia/Continual-Learning-Benchmark | b3393aa9eb9a6956a6560ee15904a2cdb046e4c7 | [
"MIT"
] | null | null | null | # %%
from collections import OrderedDict
import dataloaders.base
from dataloaders.datasetGen import SplitGen, PermutedGen
train_dataset, val_dataset = dataloaders.base.__dict__["CIFAR10"]('data', False)
# %%
print(train_dataset)
# %%
train_dataset_splits, val_dataset_splits, task_output_space = SplitGen(train_dataset, val_dataset,
first_split_sz=2,
other_split_sz=2,
rand_split=False,
remap_class=not False)
# %%
for X, y , cl in train_dataset_splits["4"]:
print(X.shape)
print("cl")
print(y)
break
# %%
# %%
| 28.62069 | 98 | 0.474699 |
cc9ccfbe858fbd860be65559e189e269ad336b72 | 4,631 | py | Python | zsalt/pretty_redshift.py | dmnielsen/zSALT | 938904ef8f331d5e047bd00a04a4424e982552e2 | [
"BSD-3-Clause"
] | null | null | null | zsalt/pretty_redshift.py | dmnielsen/zSALT | 938904ef8f331d5e047bd00a04a4424e982552e2 | [
"BSD-3-Clause"
] | null | null | null | zsalt/pretty_redshift.py | dmnielsen/zSALT | 938904ef8f331d5e047bd00a04a4424e982552e2 | [
"BSD-3-Clause"
] | null | null | null | import sys
import pyfits
import numpy as np
from PySpectrograph import Spectrum
from PySpectrograph.Utilities.fit import interfit
import pylab as pl
def ncor(x, y):
"""Calculate the normalized correlation of two arrays"""
d=np.correlate(x,x)*np.correlate(y,y)
if d<=0: return 0
return np.correlate(x,y)/d**0.5
def xcor_redshift(spectra, template, sub=False, z1=0, z2=1, zstep=0.001):
"""Meaure the redshift of a spectra by cross correlating it
with a template
returns an array of correlation values
"""
zvalue=np.arange(z1,z2,zstep)
cc_arr=np.zeros(len(zvalue))
sflux=continuum_subtract(spectra)
tflux=continuum_subtract(template)
for i,z in enumerate(zvalue):
nflux=np.interp(spectra.wavelength, template.wavelength*(1+z), tflux)
cc_arr[i]=ncor(sflux, nflux)
return zvalue, cc_arr
def continuum_subtract(spec, function='polynomial', order=7):
"""Fit a function to a spectra and subtract the continuum"""
wc=interfit(spec.wavelength, spec.flux, function=function, order=order)
wc.interfit()
return spec.flux-wc(spec.wavelength)
if __name__=='__main__':
if sys.argv[1].count('fits'):
hdu=pyfits.open(sys.argv[1])
spec=loadiraf(hdu)
else:
spec=loadtext(sys.argv[1])
thdu=pyfits.open(sys.argv[2])
zc=float(sys.argv[3])
template=loadsdss(thdu)
z1=max(0,zc-0.15)
z2=max(0,zc+0.15)
z_arr, cc_arr=xcor_redshift(spec, template, z1=z1, z2=z2, zstep=0.0001)
z=z_arr[cc_arr.argmax()]
print z
#z_arr, cc_arr=xcor_redshift(spec, template, z1=z-0.05, z2=z+0.05, zstep=0.0001)
#z=z_arr[cc_arr.argmax()]
#print z
pl.figure()
sp=pl.axes([0.15,0.15,0.8,0.8])
cflux=np.convolve(spec.flux, np.ones(10), mode='same')
#cflux*=1000e16
sp.plot(spec.wavelength, cflux, color='#000000')
coef=np.polyfit(spec.wavelength, cflux, 3)
#sp.plot(spec.wavelength, np.polyval(coef, spec.wavelength))
nflux=np.interp(spec.wavelength, (1+z)*template.wavelength, template.flux)
tcoef=np.polyfit(spec.wavelength, nflux*cflux.mean()/nflux.mean(), 2)
#ratio=cflux.mean()/nflux.mean()#*np.polyval(coef, spec.wavelength)/np.polyval(tcoef, spec.wavelength)
ratio=cflux.mean()/nflux.mean()*np.polyval(coef, spec.wavelength)/np.polyval(tcoef, spec.wavelength)
#sp.plot(spec.wavelength, nflux*ratio*0.5-0.4e-16, color='#FF0000')
sp.plot(spec.wavelength, nflux*ratio, color='#FF0000')
#sp.plot(spec.wavelength, np.polyval(tcoef, spec.wavelength))
#pl.plot((1+z)*template.wavelength, template.flux*spec.flux.mean()/template.flux.mean())
spname=sys.argv[1].split('_')[0]
#sp.set_ylim([0,2000])
x1,x2=sp.get_xlim()
y1,y2=sp.get_ylim()
print y1,y2, x1,x2
line_wave, line_name=readlinelist('redshift/sdss.linelist')
dx=10
for w,n in zip(line_wave, line_name):
w=float(w)*(1+z)
if w>x1 and w< x2:
sp.plot([w,w],[y1,y2],ls='--', color='#AAAAAA')
sp.text(w, y2-dx, '$%s$' % n.replace('_', '\\'), color='#AAAAAA', fontsize=8)
#if dx<300:
# dx+=100
#else:
# dx=100
spname=sys.argv[4]
sp.text(4500,0.8*y2,spname, fontsize=24)
sp.text(4500,0.70*y2,'z=%5.4f' % zc, fontsize=24)
sp.set_ylabel('Counts')
sp.set_xlabel('$\lambda \ (\AA)$')
if len(sys.argv)>5:
sy1=float(sys.argv[5])
sy2=float(sys.argv[6])
else:
sy1=0.7
sy2=0.7
if False:
cc=pl.axes([sy1, sy2,0.2,0.2])
cc.plot(z_arr, cc_arr, color='#777777')
xticks=np.arange(100*z1,100*z2+1,10, dtype=int)/100.0
print xticks
cc.set_xticks(xticks)
cc.set_yticklabels([])
cc.set_xlabel('z')
cc.set_title('X-corr Function')
pl.savefig(spname+'.png')
pl.show()
| 30.668874 | 105 | 0.649536 |
cc9d27fc8106d84826a9bc91142a53251a35c56a | 62,936 | py | Python | niimpy/preprocess.py | CxAalto/niimpy | ffd20f9c6aba1671d9035f47715d1649ced0e6e7 | [
"MIT"
] | 5 | 2021-11-23T12:05:23.000Z | 2022-02-11T12:57:50.000Z | niimpy/preprocess.py | niima-project/niimpy | 975470507b1f8836d9e29d43601e345612b06a62 | [
"MIT"
] | 62 | 2021-07-16T09:17:18.000Z | 2022-03-16T11:27:50.000Z | niimpy/preprocess.py | niima-project/niimpy | 975470507b1f8836d9e29d43601e345612b06a62 | [
"MIT"
] | 6 | 2021-09-07T13:06:57.000Z | 2022-03-14T11:26:30.000Z | ################################################################################
# This is the main file for preprocessing smartphone sensor data #
# #
# Contributors: Anna Hakala & Ana Triana #
################################################################################
import niimpy
import numpy as np
import pandas as pd
from pandas import Series
import matplotlib.pyplot as plt
import seaborn as sns
import time
import datetime
import pytz
import niimpy.aalto
# backwards compatibility aliases
from .screen import screen_off, screen_duration
def date_range(df, begin, end):
"""Extract out a certain date range from a DataFrame.
Extract out a certain data range from a dataframe. The index must be the
dates, and the index must be sorted.
"""
# TODO: is this needed? Do normal pandas operation, timestamp
# checking is not really needed (and limits the formats that can
# be used, pandas can take more than pd.Timestamp)
if(begin!=None):
assert isinstance(begin,pd.Timestamp),"begin not given in timestamp format"
else:
begin = df.index[0]
if(end!= None):
assert isinstance(end,pd.Timestamp),"end not given in timestamp format"
else:
end = df.index[-1]
df_new = df.loc[begin:end]
return df_new
# Above this point is function that should *stay* in preprocess.py
# Below this is functions that may or may not be moved.
def get_subjects(database):
""" Returns a list of the subjects in the database
Parameters
----------
database: database
"""
# TODO: deprecate, user should do ['user'].unique() on dataframe themselves
assert isinstance(database, niimpy.database.Data1),"database not given in Niimpy database format"
questions = database.raw(table='AwareHyksConverter', user=niimpy.ALL)
subjects=list(questions.user.unique())
return subjects
def get_phq9(database,subject):
""" Returns the phq9 scores from the databases per subject
Parameters
----------
database: database
user: string
Returns
-------
phq9: Dataframe with the phq9 score
"""
# TODO: Most of this logic can be moved to sum_survey_cores
assert isinstance(database, niimpy.database.Data1),"database not given in Niimpy database format"
assert isinstance(subject, str),"user not given in string format"
phq9 = niimpy.aalto.phq9_raw(database)
phq9 = phq9[phq9['user']==subject]
phq9 = phq9.drop(['user','source'],axis=1)
phq9 = phq9.sort_index()
phq9 = phq9.reset_index().drop_duplicates(subset=['index','id'],keep='first').set_index('index')
phq9 = phq9.groupby(phq9.index)['answer'].sum()
phq9 = phq9.to_frame()
return phq9
#surveys
def daily_affect_variability(questions, subject=None):
""" Returns two DataFrames corresponding to the daily affect variability and
mean daily affect, both measures defined in the OLO paper available in
10.1371/journal.pone.0110907. In brief, the mean daily affect computes the
mean of each of the 7 questions (e.g. sad, cheerful, tired) asked in a
likert scale from 0 to 7. Conversely, the daily affect viariability computes
the standard deviation of each of the 7 questions.
NOTE: This function aggregates data by day.
Parameters
----------
questions: DataFrame with subject data (or database for backwards compatibility)
subject: string, optional (backwards compatibility only, in the future do filtering before).
Returns
-------
DLA_mean: mean of the daily affect
DLA_std: standard deviation of the daily affect
"""
# TODO: The daily summary (mean/std) seems useful, can we generalize?
# Backwards compatibilty if a database was passed
if isinstance(questions, niimpy.database.Data1):
questions = questions.raw(table='AwareHyksConverter', user=subject)
# Maintain backwards compatibility in the case subject was passed and
# questions was *not* a dataframe.
elif isinstance(subject, string):
questions = questions[questions['user'] == subject]
questions=questions[(questions['id']=='olo_1_1') | (questions['id']=='olo_1_2') | (questions['id']=='olo_1_3') | (questions['id']=='olo_1_4') | (questions['id']=='olo_1_5') | (questions['id']=='olo_1_6') | (questions['id']=='olo_1_7') | (questions['id']=='olo_1_8')]
questions['answer']=pd.to_numeric(questions['answer'])
questions = questions.drop(['device', 'time', 'user'], axis=1)
if (pd.Timestamp.tzname(questions.index[0]) != 'EET'):
if pd.Timestamp.tzname(questions.index[0]) != 'EEST':
questions.index = pd.to_datetime(questions.index).tz_localize('Europe/Helsinki')
questions=questions.drop_duplicates(subset=['datetime','id'],keep='first')
questions=questions.pivot_table(index='datetime', columns='id', values='answer')
questions=questions.rename(columns={'olo_1_1': 'cheerful', 'olo_1_2': 'tired','olo_1_3': 'content', 'olo_1_4': 'nervous','olo_1_5': 'tranquil', 'olo_1_6': 'sad', 'olo_1_7': 'excited', 'olo_1_8': 'active'})
questions = questions.reset_index()
DLA = questions.copy()
questions['date_minus_time'] = questions['datetime'].apply( lambda questions : datetime.datetime(year=questions.year, month=questions.month, day=questions.day))
questions.set_index(questions["date_minus_time"],inplace=True)
DLA_std = questions.resample('D').std()#), how='std')
DLA_std=DLA_std.rename(columns={'date_minus_time': 'datetime'})
DLA_std.index = pd.to_datetime(DLA_std.index).tz_localize('Europe/Helsinki')
DLA_mean = questions.resample('D').mean()
DLA_mean=DLA_mean.rename(columns={'date_minus_time': 'datetime'})
DLA_mean.index = pd.to_datetime(DLA_mean.index).tz_localize('Europe/Helsinki')
return DLA_std, DLA_mean
#Ambient Noise
def ambient_noise(noise, subject, begin=None, end=None):
""" Returns a Dataframe with 5 possible computations regarding the noise
ambient plug-in: average decibels, average frequency, number of times when
there was noise in the day, number of times when there was a loud noise in
the day (>70dB), and number of times when the noise matched the speech noise
level and frequency (65Hz < freq < 255Hz and dB>50 )
NOTE: This function aggregates data by day.
Parameters
----------
noise: DataFrame with subject data (or database for backwards compatibility)
subject: string, optional (backwards compatibility only, in the future do filtering before).
begin: datetime, optional
end: datetime, optional
Returns
-------
avg_noise: Dataframe
"""
# TODO: move to niimpy.noise
# TODO: add arguments for frequency/decibels/silence columns
# Backwards compatibilty if a database was passed
if isinstance(noise, niimpy.database.Data1):
noise = noise.raw(table='AwareAmbientNoise', user=subject)
# Maintain backwards compatibility in the case subject was passed and
# questions was *not* a dataframe.
elif isinstance(subject, string):
noise = noise[noise['user'] == subject]
# Shrink the dataframe down to only what we need
noise = noise[['double_frequency', 'is_silent', 'double_decibels', 'datetime']]
# Extract the data range (In the future should be done before this function
# is called.)
if begin is not None or end is not None:
noise = date_range(noise, begin, end)
noise['is_silent']=pd.to_numeric(noise['is_silent'])
loud = noise[noise.double_decibels>70] #check if environment was noisy
speech = noise[noise['double_frequency'].between(65, 255)]
speech = speech[speech.is_silent==0] #check if there was a conversation
silent=noise[noise.is_silent==0] #This is more what moments there are noise in the environment.
avg_noise=noise.resample('D', on='datetime').mean() #average noise
avg_noise=avg_noise.drop(['is_silent'],axis=1)
if not silent.empty:
silent=silent.resample('D', on='datetime').count()
silent = silent.drop(['double_decibels','double_frequency','datetime'],axis=1)
silent=silent.rename(columns={'is_silent':'noise'})
avg_noise = avg_noise.merge(silent, how='outer', left_index=True, right_index=True)
if not loud.empty:
loud=loud.resample('D', on='datetime').count()
loud = loud.drop(['double_decibels','double_frequency','datetime'],axis=1)
loud=loud.rename(columns={'is_silent':'loud'})
avg_noise = avg_noise.merge(loud, how='outer', left_index=True, right_index=True)
if not speech.empty:
speech=speech.resample('D', on='datetime').count()
speech = speech.drop(['double_decibels','double_frequency','datetime'],axis=1)
speech=speech.rename(columns={'is_silent':'speech'})
avg_noise = avg_noise.merge(speech, how='outer', left_index=True, right_index=True)
return avg_noise
#Application
def shutdown_info(database,subject,begin=None,end=None):
""" Returns a DataFrame with the timestamps of when the phone has shutdown.
NOTE: This is a helper function created originally to preprocess the application
info data
Parameters
----------
database: Niimpy database
user: string
begin: datetime, optional
end: datetime, optional
Returns
-------
shutdown: Dataframe
"""
bat = niimpy.read._get_dataframe(database, table='AwareBattery', user=subject)
bat = niimpy.filter_dataframe(bat, begin=begin, end=end)
# TODO: move to niimpy.battery
if 'datetime' in bat.columns:
bat = bat[['battery_status', 'datetime']]
else:
bat = bat[['battery_status']]
bat=bat.loc[begin:end]
bat['battery_status']=pd.to_numeric(bat['battery_status'])
shutdown = bat[bat['battery_status'].between(-3, 0, inclusive=False)]
return shutdown
def get_seconds(time_delta):
""" Converts the timedelta to seconds
NOTE: This is a helper function
Parameters
----------
time_delta: Timedelta
"""
return time_delta.dt.seconds
def app_duration(database,subject,begin=None,end=None,app_list_path=None):
""" Returns two DataFrames contanining the duration and number of events per
group of apps, e.g. number of times a person used communication apps like
WhatsApp, Telegram, Messenger, sms, etc. and for how long these apps were
used in a day (in seconds).
Parameters
----------
database: Niimpy database
user: string
begin: datetime, optional
end: datetime, optional
app_list_path: path to the csv file where the apps are classified into groups
Returns
-------
duration: Dataframe
count: Dataframe
"""
assert isinstance(database, niimpy.database.Data1),"database not given in Niimpy database format"
assert isinstance(subject, str),"user not given in string format"
app = database.raw(table='AwareApplicationNotifications', user=subject)
if(begin!=None):
assert isinstance(begin,pd.Timestamp),"begin not given in timestamp format"
else:
begin = app.iloc[0]['datetime']
if(end!= None):
assert isinstance(end,pd.Timestamp),"end not given in timestamp format"
else:
end = app.iloc[len(app)-1]['datetime']
if(app_list_path==None):
app_list_path = '/m/cs/scratch/networks-nima/ana/niima-code/Datastreams/Phone/apps_group.csv'
app = app.drop(columns=['device','user','time','defaults','sound','vibrate'])
app=app.loc[begin:end]
#Classify the apps into groups
app_list=pd.read_csv(app_list_path)
app['group']=np.nan
for index, row in app.iterrows():
group=app_list.isin([row['application_name']]).any()
group=group.reset_index()
if (not any(group[0])):
app.loc[index,'group']=10
else:
app.loc[index,'group']=group.index[group[0] == True].tolist()[0]
#Insert missing data due to phone being shut down
shutdown = shutdown_info(database,subject,begin,end)
if not shutdown.empty:
shutdown['group']=11
shutdown['battery_status'] = 'off'
app = app.merge(shutdown, how='outer', left_index=True, right_index=True)
app['application_name'] = app['application_name'].replace(np.nan, 'off', regex=True)
app['group_x'] = app['group_x'].replace(np.nan, 11, regex=True)
app = app.drop(['battery_status','group_y'], axis=1)
dates=app.datetime_x.combine_first(app.datetime_y)
app['datetime']=dates
app = app.drop(['datetime_x','datetime_y'], axis=1)
app=app.rename(columns={'group_x':'group'})
#Insert missing data due to the screen being off
screen=screen_off(database,subject,begin,end)
if not screen.empty:
app = app.merge(screen, how='outer', left_index=True, right_index=True)
app['application_name'] = app['application_name'].replace(np.nan, 'off', regex=True)
app['group'] = app['group'].replace(np.nan, 11, regex=True)
del app['screen_status']
dates=app.datetime_x.combine_first(app.datetime_y)
app['datetime']=dates
app = app.drop(['datetime_x','datetime_y'], axis=1)
#Insert missing data caught by sms but unknown cause
sms = database.raw(table='AwareMessages', user=subject)
sms = sms.drop(columns=['device','user','time','trace'])
sms = sms.drop_duplicates(subset=['datetime','message_type'],keep='first')
sms = sms[sms.message_type=='outgoing']
sms = sms.loc[begin:end]
if not sms.empty:
app = app.merge(sms, how='outer', left_index=True, right_index=True)
app['application_name'] = app['application_name'].replace(np.nan, 'sms', regex=True)
app['group'] = app['group'].replace(np.nan, 2, regex=True)
del app['message_type']
dates=app.datetime_x.combine_first(app.datetime_y)
app['datetime']=dates
app = app.drop(['datetime_x','datetime_y'], axis=1)
#Insert missing data caught by calls but unknown cause
call = database.raw(table='AwareCalls', user=subject)
if not call.empty:
call = call.drop(columns=['device','user','time','trace'])
call = call.drop_duplicates(subset=['datetime','call_type'],keep='first')
call['call_duration'] = pd.to_timedelta(call.call_duration.astype(int), unit='s')
call = call.loc[begin:end]
dummy = call.datetime+call.call_duration
dummy = pd.Series.to_frame(dummy)
dummy['finish'] = dummy[0]
dummy = dummy.set_index(0)
call = call.merge(dummy, how='outer', left_index=True, right_index=True)
dates=call.datetime.combine_first(call.finish)
call['datetime']=dates
call = call.drop(columns=['call_duration','finish'])
app = app.merge(call, how='outer', left_index=True, right_index=True)
app.group = app.group.fillna(2)
app.application_name = app.application_name.fillna('call')
dates=app.datetime_x.combine_first(app.datetime_y)
app['datetime']=dates
app = app.drop(columns=['datetime_x','datetime_y','call_type'])
#Calculate the app duration per group
app['duration']=np.nan
app['duration']=app['datetime'].diff()
app['duration'] = app['duration'].shift(-1)
app['datetime'] = app['datetime'].dt.floor('d')
duration=pd.pivot_table(app,values='duration',index='datetime', columns='group', aggfunc=np.sum)
count=pd.pivot_table(app,values='duration',index='datetime', columns='group', aggfunc='count')
duration.columns = duration.columns.map({0.0: 'sports', 1.0: 'games', 2.0: 'communication', 3.0: 'social_media', 4.0: 'news', 5.0: 'travel', 6.0: 'shop', 7.0: 'entretainment', 8.0: 'work_study', 9.0: 'transportation', 10.0: 'other', 11.0: 'off'})
count.columns = count.columns.map({0.0: 'sports', 1.0: 'games', 2.0: 'communication', 3.0: 'social_media', 4.0: 'news', 5.0: 'travel', 6.0: 'shop', 7.0: 'entretainment', 8.0: 'work_study', 9.0: 'transportation', 10.0: 'other', 11.0: 'off'})
duration = duration.apply(get_seconds,axis=1)
return duration, count
#Communication
def call_info(database,subject,begin=None,end=None):
""" Returns a DataFrame contanining the duration and number of events per
type of calls (outgoing, incoming, missed). The Dataframe summarizes the
duration of the incoming/outgoing calls in seconds, number of those events,
and how long (in seconds) the person has spoken to the top 5 contacts (most
frequent)
Parameters
----------
database: Niimpy database
user: string
begin: datetime, optional
end: datetime, optional
Returns
-------
duration: Dataframe
"""
assert isinstance(database, niimpy.database.Data1),"database not given in Niimpy database format"
assert isinstance(subject, str),"user not given in string format"
call = database.raw(table='AwareCalls', user=subject)
if(begin!=None):
assert isinstance(begin,pd.Timestamp),"begin not given in timestamp format"
else:
begin = call.iloc[0]['datetime']
if(end!= None):
assert isinstance(end,pd.Timestamp),"end not given in timestamp format"
else:
end = call.iloc[len(call)-1]['datetime']
call = call.drop(columns=['device','user','time'])
call = call.loc[begin:end]
call['datetime'] = call['datetime'].dt.floor('d')
call['call_duration']=pd.to_numeric(call['call_duration'])
duration = call.groupby(['datetime']).sum()
missed_calls = call.loc[(call['call_type'] == 'missed')].groupby(['datetime']).count()
outgoing_calls = call.loc[(call['call_type'] == 'outgoing')].groupby(['datetime']).count()
incoming_calls = call.loc[(call['call_type'] == 'incoming')].groupby(['datetime']).count()
duration['call_missed'] = missed_calls['call_type']
duration['call_outgoing'] = outgoing_calls['call_type']
duration['call_incoming'] = incoming_calls['call_type']
duration2 = call.pivot_table(index='datetime', columns='call_type', values='call_duration',aggfunc='sum')
if ('incoming' in duration2.columns):
duration2 = duration2.rename(columns={'incoming': 'call_incoming_duration'})
if ('outgoing' in duration2.columns):
duration2 = duration2.rename(columns={'outgoing': 'call_outgoing_duration'})
if ('missed' in duration2.columns):
duration2 = duration2.drop(columns=['missed'])
duration = duration.merge(duration2, how='outer', left_index=True, right_index=True)
duration = duration.fillna(0)
if ('missed_y' in duration.columns):
duration = duration.drop(columns=['missed_y'])
#duration.columns = ['total_call_duration', 'call_missed', 'call_outgoing', 'call_incoming', 'call_incoming_duration', 'call_outgoing_duration']
#Now let's calculate something more sophisticated... Let's see
trace = call.groupby(['trace']).count()
trace = trace.sort_values(by=['call_type'], ascending=False)
top5 = trace.index.values.tolist()[:5]
call['frequent']=0
call = call.reset_index()
call = call.rename(columns={'index': 'date'})
for index, row in call.iterrows():
if (call.loc[index,'trace'] in top5):
call.loc[index,'frequent']=1
call['frequent'] = call['frequent'].astype(str)
duration2 = call.pivot_table(index='date', columns=['call_type','frequent'], values='call_duration',aggfunc='sum')
duration2.columns = ['_'.join(col) for col in duration2.columns]
duration2 = duration2.reset_index()
#duration2.columns = ['datetime','incoming_0','incoming_1','missed_0','missed_1','outgoing_0','outgoing_1']
duration2['datetime'] = duration2['date'].dt.floor('d')
duration2 = duration2.groupby(['datetime']).sum()
if ('incoming_0' in duration2.columns):
duration2 = duration2.drop(columns=['incoming_0'])
if ('missed_0' in duration2.columns):
duration2 = duration2.drop(columns=['missed_0'])
if ('missed_1' in duration2.columns):
duration2 = duration2.drop(columns=['missed_1'])
if ('outgoing_0' in duration2.columns):
duration2 = duration2.drop(columns=['outgoing_0'])
duration = duration.merge(duration2, how='outer', left_index=True, right_index=True)
duration = duration.rename(columns={'incoming_1': 'incoming_duration_top5', 'outgoing_1': 'outgoing_duration_top5'})
return duration
def sms_info(database,subject,begin=None,end=None):
""" Returns a DataFrame contanining the number of events per type of messages
SMS (outgoing, incoming). The Dataframe summarizes the number of the
incoming/outgoing sms and how many of those correspond to the top 5 contacts
(most frequent with whom the subject exchanges texts)
Parameters
----------
database: Niimpy database
user: string
begin: datetime, optional
end: datetime, optional
Returns
-------
sms_stats: Dataframe
"""
assert isinstance(database, niimpy.database.Data1),"database not given in Niimpy database format"
assert isinstance(subject, str),"user not given in string format"
sms = database.raw(table='AwareMessages', user=subject)
if(begin!=None):
assert isinstance(begin,pd.Timestamp),"begin not given in timestamp format"
else:
begin = sms.iloc[0]['datetime']
if(end!= None):
assert isinstance(end,pd.Timestamp),"end not given in timestamp format"
else:
end = sms.iloc[len(sms)-1]['datetime']
sms = sms.drop(columns=['device','user','time'])
sms['datetime'] = sms['datetime'].dt.floor('d')
sms = sms.loc[begin:end]
if (len(sms)>0):
sms_stats = sms.copy()
sms_stats['dummy'] = 1
sms_stats = sms_stats.pivot_table(index='datetime', columns='message_type', values='dummy',aggfunc='sum')
#Now let's move to somethign more sophisticated
trace = sms.groupby(['trace']).count()
trace = trace.sort_values(by=['message_type'], ascending=False)
top5 = trace.index.values.tolist()[:5]
sms['frequent']=0
sms = sms.reset_index()
sms = sms.rename(columns={'index': 'date'})
for index, row in sms.iterrows():
if (sms.loc[index,'trace'] in top5):
sms.loc[index,'frequent']=1
sms['frequent'] = sms['frequent'].astype(str)
sms['dummy']=1
dummy = sms.pivot_table(index='date', columns=['message_type','frequent'], values='dummy',aggfunc='sum')
dummy.columns = ['_'.join(col) for col in dummy.columns]
dummy = dummy.reset_index()
dummy['datetime'] = dummy['date'].dt.floor('d')
dummy = dummy.groupby(['datetime']).sum()
if ('incoming_0' in dummy.columns):
dummy = dummy.drop(columns=['incoming_0'])
if ('outgoing_0' in dummy.columns):
dummy = dummy.drop(columns=['outgoing_0'])
sms_stats = sms_stats.merge(dummy, how='outer', left_index=True, right_index=True)
sms_stats = sms_stats.rename(columns={'incoming_1': 'sms_incoming_top5', 'outgoing_1': 'sms_outgoing_top5'})
sms_stats = sms_stats.fillna(0)
if ('incoming' in sms_stats.columns):
sms_stats = sms_stats.rename(columns={'incoming': 'sms_incoming'})
if ('outgoing' in sms_stats.columns):
sms_stats = sms_stats.rename(columns={'outgoing': 'sms_outgoing'})
return sms_stats
else:
sms_stats = pd.DataFrame()
return sms_stats
def sms_duration(database,subject,begin,end):
""" Returns a DataFrame contanining the duration per type of messages SMS
(outgoing, incoming). The Dataframe summarizes the calculated duration of
the incoming/outgoing sms and the lags (i.e. the period between receiving a
message and reading/writing a reply).
NOTE: The foundation of this function is still weak and needs discussion
Parameters
----------
database: Niimpy database
user: string
begin: datetime, optional
end: datetime, optional
Returns
-------
reading: Dataframe
writing: Dataframe
"""
assert isinstance(database, niimpy.database.Data1),"database not given in Niimpy database format"
assert isinstance(subject, str),"user not given in string format"
app = database.raw(table='AwareApplicationNotifications', user=subject)
if(begin!=None):
assert isinstance(begin,pd.Timestamp),"begin not given in timestamp format"
else:
begin = app.iloc[0]['datetime']
if(end!= None):
assert isinstance(end,pd.Timestamp),"end not given in timestamp format"
else:
end = app.iloc[len(app)-1]['datetime']
app = app.drop(columns=['device','user','time','defaults','sound','vibrate'])
#Insert missing data due to phone being shut down
shutdown = shutdown_info(database,subject,begin,end)
shutdown=shutdown.rename(columns={'battery_status':'application_name'})
shutdown['application_name'] = 'off'
app = app.merge(shutdown, how='outer', left_index=True, right_index=True)
app['application_name_x'] = app['application_name_x'].replace(np.nan, 'off', regex=True)
del app['application_name_y']
dates=app.datetime_x.combine_first(app.datetime_y)
app['datetime']=dates
app = app.drop(['datetime_x','datetime_y'],axis=1)
app=app.rename(columns={'application_name_x':'application_name'})
#Insert missing data due to the screen being off
screen=screen_off(database,subject,begin,end)
app = app.merge(screen, how='outer', left_index=True, right_index=True)
app['application_name'] = app['application_name'].replace(np.nan, 'off', regex=True)
del app['screen_status']
dates=app.datetime_x.combine_first(app.datetime_y)
app['datetime']=dates
app = app.drop(['datetime_x','datetime_y'],axis=1)
app = app.drop_duplicates(subset=['datetime','application_name'],keep='first')
#Insert missing data caught by sms but unknown cause
sms = database.raw(table='AwareMessages', user=subject)
sms = sms.drop(columns=['device','user','time','trace'])
sms = sms.drop_duplicates(subset=['datetime','message_type'],keep='first')
#sms = sms[sms.message_type=='outgoing']
app = app.merge(sms, how='outer', left_index=True, right_index=True)
app.loc[app['application_name'].isnull(),'application_name'] = app['message_type']
del app['message_type']
dates=app.datetime_x.combine_first(app.datetime_y)
app['datetime']=dates
app = app.drop(['datetime_x','datetime_y'],axis=1)
#Calculate the app duration
app['duration']=np.nan
app['duration']=app['datetime'].diff()
app['duration'] = app['duration'].shift(-1)
#Select the text applications only
sms_app_name = ['Messages','Mensajera','Mensajera','Viestit','incoming','outgoing']
app = app[app['application_name'].isin(sms_app_name)]
sms_app_name = ['Messages','Mensajera','Mensajera','Viestit']
app['application_name'].loc[(app['application_name'].isin(sms_app_name))] = 'messages'
app['group']=np.nan
for i in range(len(app)-1):
if (app.application_name[i]=='incoming' and app.application_name[i+1]=='messages'):
app.group[i+1]=1
elif (app.application_name[i]=='messages' and app.application_name[i+1]=='outgoing'):
app.group[i+1]=2
else:
app.group[i+1]=0
app['lags'] = app['datetime'].diff()
app['datetime'] = app['datetime'].dt.floor('d')
app=app.loc[begin:end]
reading = app.loc[(app['group']==1)]
if (len(reading)>0):
reading = pd.pivot_table(reading,values=['duration','lags'],index='datetime', columns='application_name', aggfunc=np.sum)
reading.columns = ['reading_duration','reading_lags']
reading = reading.apply(get_seconds,axis=1)
writing = app.loc[(app['group']==2)]
if (len(writing)>0):
for i in range(len(writing)-1):
if (writing.lags[i].seconds<15 or writing.lags[i].seconds>120):
writing.lags[i] = datetime.datetime.strptime('00:05', "%M:%S") - datetime.datetime.strptime("00:00", "%M:%S")
del writing['duration']
writing = writing.rename(columns={'lags':'writing_duration'})
writing = pd.pivot_table(writing,values='writing_duration',index='datetime', columns='application_name', aggfunc=np.sum)
writing = writing.apply(get_seconds,axis=1)
return reading, writing
def communication_info(database,subject,begin=None,end=None):
""" Returns a DataFrame contanining all the information extracted from
communication's events (calls, sms, and communication apps like WhatsApp,
Telegram, Messenger, etc.). Regarding calls, this function contains the
duration of the incoming/outgoing calls in seconds, number of those events,
and how long (in seconds) the person has spoken to the top 5 contacts (most
frequent). Regarding the SMSs, this function contains the number of incoming
/outgoing events, and the top 5 contacts (most frequent). Aditionally, we
also include the calculated duration of the incoming/outgoing sms and the
lags (i.e. the period between receiving a message and reading/writing a
reply). Regarding the app, the duration of communication events is summarized.
This function also sums all the different durations (calls, SMSs, apps) and
provides the duration (in seconds) that a person spent communicating during
the day.
Parameters
----------
database: Niimpy database
user: string
begin: datetime, optional
end: datetime, optional
Returns
-------
call_summary: Dataframe
"""
assert isinstance(database, niimpy.database.Data1),"database not given in Niimpy database format"
assert isinstance(subject, str),"user not given in string format"
app = database.raw(table='AwareApplicationNotifications', user=subject)
if(begin!=None):
assert isinstance(begin,pd.Timestamp),"begin not given in timestamp format"
else:
begin = app.iloc[0]['datetime']
if(end!= None):
assert isinstance(end,pd.Timestamp),"end not given in timestamp format"
else:
end = app.iloc[len(app)-1]['datetime']
duration_app, count_app = app_duration(database,subject,begin,end)
call_summary = call_info(database,subject,begin,end)
sms_summary = sms_info(database,subject,begin,end)
#reading, writing = sms_duration(database,subject,begin,end)
if (not sms_summary.empty):
call_summary = call_summary.merge(sms_summary, how='outer', left_index=True, right_index=True)
call_summary = call_summary.fillna(0)
#Now let's see if there is any info from the apps worth bringin back
if ('communication' in duration_app.columns): #2 is the number for communication apps
comm_app = duration_app['communication']#.dt.seconds
comm_app = comm_app.fillna(0)
comm_app = comm_app.to_frame()
if ('social_media' in duration_app.columns): #2 is the number for communication apps
social_app = duration_app['social_media']#.dt.seconds
social_app = social_app.fillna(0)
social_app = social_app.to_frame()
try:
social_app
try:
comm_app
comm_app = comm_app.merge(social_app, how='outer', left_index=True, right_index=True)
except NameError:
comm_app = social_app
except NameError:
pass
try:
comm_app
call_summary = call_summary.merge(comm_app, how='outer', left_index=True, right_index=True)
except NameError:
pass
call_summary = call_summary.fillna(0)
if ('communication' in call_summary.columns):
call_summary['total_comm_duration'] = call_summary['call_duration']+call_summary['communication']
if (('social_media' in call_summary.columns) and ('communication' in call_summary.columns)):
call_summary['total_comm_duration'] = call_summary['call_duration']+call_summary['social_media']+call_summary['communication']
if ('communication' in call_summary.columns):
call_summary=call_summary.rename(columns={'communication':'comm_apps_duration'})
if ('social_media' in call_summary.columns):
call_summary=call_summary.rename(columns={'social_media':'social_apps_duration'})
#Now let's see if there is any info from the sms duration
'''if (len(reading)>0):
reading['reading_duration'] = reading['reading_duration']#.dt.seconds
reading['reading_lags'] = reading['reading_lags']#.dt.seconds
call_summary = call_summary.merge(reading, how='outer', left_index=True, right_index=True)
call_summary = call_summary.fillna(0)
call_summary['total_comm_duration'] = call_summary['total_comm_duration']+call_summary['reading_duration']
if (len(writing)>0):
writing=writing.rename(columns={'outgoing':'writing_duration'})
writing['writing_duration'] = writing['writing_duration']#.dt.seconds
call_summary = call_summary.merge(writing, how='outer', left_index=True, right_index=True)
call_summary = call_summary.fillna(0)
call_summary['total_comm_duration'] = call_summary['total_comm_duration']+call_summary['writing_duration']'''
return call_summary
#Occurrences
def occurrence_call_sms(database,subject,begin=None,end=None):
""" Returns a DataFrame contanining the number of events that occur in a
day for call and sms. The events are binned in 12-minutes, i.e. if there is
an event at 11:05 and another one at 11:45, 2 occurences happened in one
hour. Then, the sum of these occurences yield the number per day.
Parameters
----------
database: Niimpy database
user: string
begin: datetime, optional
end: datetime, optional
Returns
-------
event: Dataframe
"""
assert isinstance(database, niimpy.database.Data1),"database not given in Niimpy database format"
assert isinstance(subject, str),"user not given in string format"
call = database.raw(table='AwareCalls', user=subject)
if not call.empty:
if(begin!=None):
assert isinstance(begin,pd.Timestamp),"begin not given in timestamp format"
else:
begin = call.iloc[0]['datetime']
if(end!= None):
assert isinstance(end,pd.Timestamp),"end not given in timestamp format"
else:
end = call.iloc[len(call)-1]['datetime']
call = call.drop(columns=['device','user','time'])
call = call.loc[begin:end]
sms = database.raw(table='AwareMessages', user=subject)
if not sms.empty:
if(begin!=None):
assert isinstance(begin,pd.Timestamp),"begin not given in timestamp format"
else:
begin = call.iloc[0]['datetime']
if(end!= None):
assert isinstance(end,pd.Timestamp),"end not given in timestamp format"
else:
end = call.iloc[len(call)-1]['datetime']
sms = sms.drop(columns=['device','user','time'])
sms = sms.loc[begin:end]
if not call.empty:
if not sms.empty:
call_sms = call.merge(sms, how='outer', left_index=True, right_index=True)
times = pd.DatetimeIndex.to_series(call_sms.index,keep_tz=True)
else:
times = pd.DatetimeIndex.to_series(call.index,keep_tz=True)
if not sms.empty:
if not call.empty:
call_sms = sms.merge(call, how='outer', left_index=True, right_index=True)
times = pd.DatetimeIndex.to_series(call_sms.index,keep_tz=True)
else:
times = pd.DatetimeIndex.to_series(sms.index,keep_tz=True)
event=niimpy.util.occurrence(times)
event = event.groupby(['day']).sum()
event = event.drop(columns=['hour'])
return event
def occurrence_call_sms_apps(database,subject,begin=None,end=None,app_list_path=None,comm_app_list_path=None):
""" Returns a DataFrame contanining the number of events that occur in a
day for calls, sms, and communication apps. The events are binned in
12-minutes, i.e. if there is an event at 11:05 and another one at 11:45, 2
occurences happened in one hour. Then, the sum of these occurences yield the
number per day.
Parameters
----------
database: Niimpy database
user: string
begin: datetime, optional
end: datetime, optional
app_list_path: path to the file where the apps are classified into groups
comm_app_list_path:path to the file where the communication apps are listed
Returns
-------
event: Dataframe
"""
assert isinstance(database, niimpy.database.Data1),"database not given in Niimpy database format"
assert isinstance(subject, str),"user not given in string format"
call = database.raw(table='AwareCalls', user=subject)
if not call.empty:
if(begin!=None):
assert isinstance(begin,pd.Timestamp),"begin not given in timestamp format"
else:
begin = call.iloc[0]['datetime']
if(end!= None):
assert isinstance(end,pd.Timestamp),"end not given in timestamp format"
else:
end = call.iloc[len(call)-1]['datetime']
call = call.drop(columns=['device','user','time'])
call = call.loc[begin:end]
sms = database.raw(table='AwareMessages', user=subject)
if not sms.empty:
if(begin!=None):
assert isinstance(begin,pd.Timestamp),"begin not given in timestamp format"
else:
begin = sms.iloc[0]['datetime']
if(end!= None):
assert isinstance(end,pd.Timestamp),"end not given in timestamp format"
else:
end = sms.iloc[len(sms)-1]['datetime']
sms = sms.drop(columns=['device','user','time'])
sms = sms.loc[begin:end]
app = database.raw(table='AwareApplicationNotifications', user=subject)
if (app_list_path==None):
app_list_path='/m/cs/scratch/networks-nima/ana/niima-code/Datastreams/Phone/apps_group.csv'
if (comm_app_list_path==None):
comm_app_list_path='/m/cs/scratch/networks-nima/ana/niima-code/Datastreams/Phone/comm_apps.csv'
if not app.empty:
if(begin!=None):
assert isinstance(begin,pd.Timestamp),"begin not given in timestamp format"
else:
begin = app.iloc[0]['datetime']
if(end!= None):
assert isinstance(end,pd.Timestamp),"end not given in timestamp format"
else:
end = app.iloc[len(app)-1]['datetime']
app = app.drop(columns=['device','user','time','defaults','sound','vibrate'])
app = app.loc[begin:end]
app_list=pd.read_csv(app_list_path)
app['group']=np.nan
for index, row in app.iterrows():
group=app_list.isin([row['application_name']]).any()
group=group.reset_index()
if (not any(group[0])):
app.loc[index,'group']=10
else:
app.loc[index,'group']=group.index[group[0] == True].tolist()[0]
app = app.loc[app['group'] == 2]
comm_app_list = pd.read_csv(comm_app_list_path)
comm_app_list = comm_app_list['Communication'].tolist()
app = app[~app.application_name.isin(comm_app_list)]
if not call.empty:
if not sms.empty:
event = call.merge(sms, how='outer', left_index=True, right_index=True)
else:
event = call
else:
if not sms.empty:
event = sms
else:
event= pd.DataFrame()
if not app.empty:
if not event.empty:
event = event.merge(app, how='outer', left_index=True, right_index=True)
else:
event=app
if not event.empty:
times = pd.DatetimeIndex.to_series(event.index,keep_tz=True)
event=niimpy.util.occurrence(times)
event = event.groupby(['day']).sum()
event = event.drop(columns=['hour'])
return event
def occurrence_call_sms_social(database,subject,begin=None,end=None,app_list_path=None,comm_app_list_path=None):
""" Returns a DataFrame contanining the number of events that occur in a
day for calls, sms, and social and communication apps. The events are binned
in 12-minutes, i.e. if there is an event at 11:05 and another one at 11:45,
2 occurences happened in one hour. Then, the sum of these occurences yield
the number per day.
Parameters
----------
database: Niimpy database
user: string
begin: datetime, optional
end: datetime, optional
app_list_path: path to the file where the apps are classified into groups
comm_app_list_path:path to the file where the communication apps are listed
Returns
-------
event: Dataframe
"""
assert isinstance(database, niimpy.database.Data1),"database not given in Niimpy database format"
assert isinstance(subject, str),"user not given in string format"
call = database.raw(table='AwareCalls', user=subject)
if not call.empty:
if(begin!=None):
assert isinstance(begin,pd.Timestamp),"begin not given in timestamp format"
else:
begin = call.iloc[0]['datetime']
if(end!= None):
assert isinstance(end,pd.Timestamp),"end not given in timestamp format"
else:
end = call.iloc[len(call)-1]['datetime']
call = call.drop(columns=['device','user','time'])
call = call.loc[begin:end]
sms = database.raw(table='AwareMessages', user=subject)
if not sms.empty:
if(begin!=None):
assert isinstance(begin,pd.Timestamp),"begin not given in timestamp format"
else:
begin = sms.iloc[0]['datetime']
if(end!= None):
assert isinstance(end,pd.Timestamp),"end not given in timestamp format"
else:
end = sms.iloc[len(sms)-1]['datetime']
sms = sms.drop(columns=['device','user','time'])
sms = sms.loc[begin:end]
app = database.raw(table='AwareApplicationNotifications', user=subject)
if(app_list_path==None):
app_list_path='/m/cs/scratch/networks-nima/ana/niima-code/Datastreams/Phone/apps_group.csv'
if (comm_app_list_path==None):
comm_app_list_path='/m/cs/scratch/networks-nima/ana/niima-code/Datastreams/Phone/comm_apps.csv'
if not app.empty:
if(begin!=None):
assert isinstance(begin,pd.Timestamp),"begin not given in timestamp format"
else:
begin = app.iloc[0]['datetime']
if(end!= None):
assert isinstance(end,pd.Timestamp),"end not given in timestamp format"
else:
end = app.iloc[len(app)-1]['datetime']
app = app.drop(columns=['device','user','time','defaults','sound','vibrate'])
app = app.loc[begin:end]
app_list=pd.read_csv(app_list_path)
app['group']=np.nan
for index, row in app.iterrows():
group=app_list.isin([row['application_name']]).any()
group=group.reset_index()
if (not any(group[0])):
app.loc[index,'group']=10
else:
app.loc[index,'group']=group.index[group[0] == True].tolist()[0]
app = app.loc[(app['group'] == 2) | (app['group'] == 3)]
comm_app_list = pd.read_csv(comm_app_list_path)
comm_app_list = comm_app_list['Communication'].tolist()
app = app[~app.application_name.isin(comm_app_list)]
if not call.empty:
if not sms.empty:
event = call.merge(sms, how='outer', left_index=True, right_index=True)
else:
event = call
else:
if not sms.empty:
event = sms
else:
event= pd.DataFrame()
if not app.empty:
if not event.empty:
event = event.merge(app, how='outer', left_index=True, right_index=True)
else:
event=app
if not event.empty:
times = pd.DatetimeIndex.to_series(event.index,keep_tz=True)
event=niimpy.util.occurrence(times)
event = event.groupby(['day']).sum()
event = event.drop(columns=['hour'])
event.index = pd.to_datetime(event.index).tz_localize('Europe/Helsinki')
return event
#Location
def location_data(database,subject,begin=None,end=None):
""" Reads the readily, preprocessed location data from the right database.
The data already contains the aggregation of the GPS data (more info here:
https://github.com/digitraceslab/koota-server/blob/master/kdata/converter.py).
Parameters
----------
database: Niimpy database
user: string
begin: datetime, optional
end: datetime, optional
Returns
-------
location: Dataframe
"""
assert isinstance(database, niimpy.database.Data1),"database not given in Niimpy database format"
assert isinstance(subject, str),"user not given in string format"
location = database.raw(table='AwareLocationDay', user=subject)
location = location.drop(['device','user'],axis=1)
location=location.drop_duplicates(subset=['day'],keep='first')
location['day']=pd.to_datetime(location['day'], format='%Y-%m-%d')
location=location.set_index('day')
location.index = pd.to_datetime(location.index).tz_localize('Europe/Helsinki')
if(begin!=None):
assert isinstance(begin,pd.Timestamp),"begin not given in timestamp format"
else:
begin = location.index[0]
if(end!= None):
assert isinstance(end,pd.Timestamp),"end not given in timestamp format"
else:
end = location.index[-1]
location=location.loc[begin:end]
return location
#Screen
def get_battery_data(battery, user=None, start = None, end = None):
""" Returns a DataFrame with battery data for a user.
Parameters
----------
battery: DataFrame with battery data
user: string, optional
start: datetime, optional
end: datetime, optional
"""
assert isinstance(battery, pd.core.frame.DataFrame), "data is not a pandas DataFrame"
if(user!= None):
assert isinstance(user, str),"user not given in string format"
battery_data = battery[(battery['user']==user)]
else:
battery_data = battery
if(start!=None):
start = pd.to_datetime(start)
else:
start = battery_data.iloc[0]['datetime']
if(end!= None):
end = pd.to_datetime(end)
else:
end = battery_data.iloc[len(battery_data)-1]['datetime']
battery_data = battery_data[(battery_data['datetime']>=start) & (battery_data['datetime']<=end)]
battery_data['battery_level'] = pd.to_numeric(battery_data['battery_level'])
#df['column'].fillna(pd.Timedelta(seconds=0))
#df.dropna()
battery_data = battery_data.drop_duplicates(subset=['datetime','user','device'],keep='last')
battery_data = battery_data.drop(['user','device','time','datetime'],axis=1)
return battery_data
def battery_occurrences(battery_data, user=None, start=None, end=None, battery_status = False, days= 0, hours=6, minutes=0, seconds=0,milli=0, micro=0, nano=0):
""" Returns a dataframe showing the amount of battery data points found between a given interval and steps.
The default interval is 6 hours.
Parameters
----------
battery_data: Dataframe
user: string, optional
start: datetime, optional
end: datetime, optional
battery_status: boolean, optional
"""
assert isinstance(battery_data, pd.core.frame.DataFrame), "data is not a pandas DataFrame"
assert isinstance(user, (type(None), str)),"user not given in string format"
if(user!= None):
ocurrence_data = battery_data[(battery_data['user']==user)]
else:
occurrence_data = battery_data
occurrence_data = occurrence_data.drop_duplicates(subset=['datetime','device'],keep='last')
if(start==None):
start = occurrence_data.iloc[0]['datetime']
start = pd.to_datetime(start)
td = pd.Timedelta(days=days,hours=hours,minutes=minutes,seconds=seconds,milliseconds=milli,microseconds=micro,nanoseconds=nano)
delta = start+td
if(end==None):
end = occurrence_data.iloc[len(occurrence_data)-1]['datetime']
end = pd.to_datetime(end)
idx_range = np.floor((end-start)/td).astype(int)
idx = pd.date_range(start, periods = idx_range, freq=td)
if ((battery_status == True) & ('battery_status' in occurrence_data.columns)):
occurrences = pd.DataFrame(np.nan, index = idx,columns=list(['start','end','occurrences','battery_status']))
for i in range(idx_range):
idx_dat = occurrence_data.loc[(occurrence_data['datetime']>start) & (occurrence_data['datetime']<=delta)]
battery_status = occurrence_data.loc[(occurrence_data['datetime']>start) & (occurrence_data['datetime']<=delta) & ((occurrence_data['battery_status']=='-1')|(occurrence_data['battery_status']=='-2')|(occurrence_data['battery_status']=='-3'))]
occurrences.iloc[i] = [start, delta,len(idx_dat), len(battery_status)]
start = start + td
delta = start + td
else:
occurrences = pd.DataFrame(np.nan, index = idx,columns=list(['start','end','occurrences']))
for i in range(idx_range):
idx_dat = occurrence_data.loc[(occurrence_data['datetime']>start) & (occurrence_data['datetime']<=delta)]
occurrences.iloc[i] = [start, delta,len(idx_dat)]
start = start + td
delta = start + td
return occurrences
def battery_gaps(data, min_duration_between = None):
'''Returns a DataFrame including all battery data and showing the delta between
consecutive battery timestamps. The minimum size of the considered deltas can be decided
with the min_duration_between parameter.
Parameters
----------
data: dataframe with date index
min_duration_between: Timedelta, for example, pd.Timedelta(hours=6)
'''
assert isinstance(data, pd.core.frame.DataFrame), "data is not a pandas DataFrame"
assert isinstance(data.index, pd.core.indexes.datetimes.DatetimeIndex), "data index is not DatetimeIndex"
gaps = data.copy()
gaps['tvalue'] = gaps.index
gaps['delta'] = (gaps['tvalue']-gaps['tvalue'].shift()).fillna(pd.Timedelta(seconds=0))
if(min_duration_between!=None):
gaps = gaps[gaps['delta']>=min_duration_between]
return gaps
def battery_charge_discharge(data):
'''Returns a DataFrame including all battery data and showing the charge/discharge between each timestamp.
Parameters
----------
data: dataframe with date index
'''
assert isinstance(data, pd.core.frame.DataFrame), "data is not a pandas DataFrame"
assert isinstance(data.index, pd.core.indexes.datetimes.DatetimeIndex), "data index is not DatetimeIndex"
charge = data.copy()
charge['battery_level'] = pd.to_numeric(charge['battery_level'])
charge['tvalue'] = charge.index
charge['tdelta'] = (charge['tvalue']-charge['tvalue'].shift()).fillna(pd.Timedelta(seconds=0))
charge['bdelta'] = (charge['battery_level']-charge['battery_level'].shift()).fillna(0)
charge['charge/discharge']= ((charge['bdelta'])/((charge['tdelta']/ pd.Timedelta(seconds=1))))
return charge
def find_real_gaps(battery_data,other_data,start=None, end=None, days= 0, hours=6, minutes=0, seconds=0,milli=0, micro=0, nano=0):
""" Returns a dataframe showing the gaps found both in the battery data and the other data.
The default interval is 6 hours.
Parameters
----------
battery_data: Dataframe
other_data: Dataframe
The data you want to compare with
start: datetime, optional
end: datetime, optional
"""
assert isinstance(battery_data, pd.core.frame.DataFrame), "battery_data is not a pandas DataFrame"
assert isinstance(other_data, pd.core.frame.DataFrame), "other_data is not a pandas DataFrame"
assert isinstance(battery_data.index, pd.core.indexes.datetimes.DatetimeIndex), "battery_data index is not DatetimeIndex"
assert isinstance(other_data.index, pd.core.indexes.datetimes.DatetimeIndex), "other_data index is not DatetimeIndex"
if(start!=None):
start = pd.to_datetime(start)
else:
start = battery_data.index[0] if (battery_data.index[0]<= other_data.index[0]) else other_data.index[0]
if(end!=None):
end = pd.to_datetime(end)
else:
end = battery_data.index[-1] if (battery_data.index[-1]>= other_data.index[-1]) else other_data.index[-1]
battery = battery_occurrences(battery_data, start=start,end=end,days=days,hours=hours,minutes=minutes,seconds=seconds,milli=milli,micro=micro,nano=nano)
battery.rename({'occurrences': 'battery_occurrences'}, axis=1, inplace = True)
other = battery_occurrences(other_data, start=start,days=days,hours=hours,minutes=minutes,seconds=seconds,milli=milli,micro=micro,nano=nano)
mask = (battery['battery_occurrences']==0)&(other['occurrences']==0)
gaps = pd.concat([battery[mask],other[mask]['occurrences']],axis=1, sort=False)
return gaps
def find_non_battery_gaps(battery_data,other_data,start=None, end=None, days= 0, hours=6, minutes=0, seconds=0,milli=0, micro=0, nano=0):
""" Returns a dataframe showing the gaps found only in the other data.
The default interval is 6 hours.
Parameters
----------
battery_data: Dataframe
other_data: Dataframe
The data you want to compare with
start: datetime, optional
end: datetime, optional
"""
assert isinstance(battery_data, pd.core.frame.DataFrame), "battery_data is not a pandas DataFrame"
assert isinstance(other_data, pd.core.frame.DataFrame), "other_data is not a pandas DataFrame"
assert isinstance(battery_data.index, pd.core.indexes.datetimes.DatetimeIndex), "battery_data index is not DatetimeIndex"
assert isinstance(other_data.index, pd.core.indexes.datetimes.DatetimeIndex), "other_data index is not DatetimeIndex"
if(start!=None):
start = pd.to_datetime(start)
else:
start = battery_data.index[0] if (battery_data.index[0]<= other_data.index[0]) else other_data.index[0]
if(end!=None):
end = pd.to_datetime(end)
else:
end = battery_data.index[-1] if (battery_data.index[-1]>= other_data.index[-1]) else other_data.index[-1]
battery = battery_occurrences(battery_data, start=start,end=end,days=days,hours=hours,minutes=minutes,seconds=seconds,milli=milli,micro=micro,nano=nano)
battery.rename({'occurrences': 'battery_occurrences'}, axis=1, inplace = True)
other = battery_occurrences(other_data, start=start,days=days,hours=hours,minutes=minutes,seconds=seconds,milli=milli,micro=micro,nano=nano)
mask = (battery['battery_occurrences']>10)&(other['occurrences']==0)
gaps = pd.concat([battery[mask],other[mask]['occurrences']],axis=1, sort=False)
return gaps
def find_battery_gaps(battery_data,other_data,start=None, end=None, days= 0, hours=6, minutes=0, seconds=0,milli=0, micro=0, nano=0):
""" Returns a dataframe showing the gaps found only in the battery data.
The default interval is 6 hours.
Parameters
----------
battery_data: Dataframe
other_data: Dataframe
The data you want to compare with
start: datetime, optional
end: datetime, optional
"""
assert isinstance(battery_data, pd.core.frame.DataFrame), "battery_data is not a pandas DataFrame"
assert isinstance(other_data, pd.core.frame.DataFrame), "other_data is not a pandas DataFrame"
assert isinstance(battery_data.index, pd.core.indexes.datetimes.DatetimeIndex), "battery_data index is not DatetimeIndex"
assert isinstance(other_data.index, pd.core.indexes.datetimes.DatetimeIndex), "other_data index is not DatetimeIndex"
if(start!=None):
start = pd.to_datetime(start)
else:
start = battery_data.index[0] if (battery_data.index[0]<= other_data.index[0]) else other_data.index[0]
if(end!=None):
end = pd.to_datetime(end)
else:
end = battery_data.index[-1] if (battery_data.index[-1]>= other_data.index[-1]) else other_data.index[-1]
battery = battery_occurrences(battery_data, start=start,end=end,days=days,hours=hours,minutes=minutes,seconds=seconds,milli=milli,micro=micro,nano=nano)
battery.rename({'occurrences': 'battery_occurrences'}, axis=1, inplace = True)
other = battery_occurrences(other_data, start=start,end=end,days=days,hours=hours,minutes=minutes,seconds=seconds,milli=milli,micro=micro,nano=nano)
mask = (battery['battery_occurrences']==0)&(other['occurrences']>0)
gaps = pd.concat([battery[mask],other[mask]['occurrences']],axis=1, sort=False)
return gaps
def missing_data_format(question,keep_values=False):
""" Returns a series of timestamps in the right format to allow missing data visualization
.
Parameters
----------
question: Dataframe
"""
question['date'] = question.index
question['date'] = question['date'].apply( lambda question : datetime.datetime(year=question.year, month=question.month, day=question.day))
question = question.drop_duplicates(subset=['date'],keep='first')
question = question.set_index(['date'])
if (keep_values == False):
question['answer'] = 1
question = question.T.squeeze()
return question
def screen_missing_data(database,subject,begin=None,end=None):
""" Returns a DataFrame contanining the percentage (range [0,1]) of loss data
calculated based on the transitions of screen status. In general, if
screen_status(t) == screen_status(t+1), we declared we have at least one
missing point.
Parameters
----------
database: Niimpy database
user: string
begin: datetime, optional
end: datetime, optional
Returns
-------
count: Dataframe
"""
assert isinstance(database, niimpy.database.Data1),"database not given in Niimpy database format"
assert isinstance(subject, str),"usr not given in string format"
screen = database.raw(table='AwareScreen', user=subject)
if(begin!=None):
assert isinstance(begin,pd.Timestamp),"begin not given in timestamp format"
else:
begin = screen.iloc[0]['datetime']
if(end!= None):
assert isinstance(end,pd.Timestamp),"end not given in timestamp format"
else:
end = screen.iloc[len(screen)-1]['datetime']
screen=screen.drop_duplicates(subset=['datetime'],keep='first')
screen = screen.drop(['device','user','time'],axis=1)
screen=screen.loc[begin:end]
screen['screen_status']=pd.to_numeric(screen['screen_status'])
#Include the missing points that are due to shutting down the phone
shutdown = shutdown_info(database,subject,begin,end)
shutdown=shutdown.rename(columns={'battery_status':'screen_status'})
shutdown['screen_status']=0
screen = screen.merge(shutdown, how='outer', left_index=True, right_index=True)
screen['screen_status'] = screen.fillna(0)['screen_status_x'] + screen.fillna(0)['screen_status_y']
screen = screen.drop(['screen_status_x','screen_status_y'],axis=1)
dates=screen.datetime_x.combine_first(screen.datetime_y)
screen['datetime']=dates
screen = screen.drop(['datetime_x','datetime_y'],axis=1)
#Detect missing data points
screen['missing']=0
screen['next']=screen['screen_status'].shift(-1)
screen['dummy']=screen['screen_status']-screen['next']
screen['missing'] = np.where(screen['dummy']==0, 1, 0)
screen['missing'] = screen['missing'].shift(1)
screen = screen.drop(['dummy','next'], axis=1)
screen = screen.fillna(0)
screen['datetime'] = screen['datetime'].apply( lambda screen : datetime.datetime(year=screen.year, month=screen.month, day=screen.day))
screen = screen.drop(['screen_status'], axis=1)
count=pd.pivot_table(screen,values='missing',index='datetime', aggfunc='count')
count = screen.groupby(['datetime','missing'])['missing'].count().unstack(fill_value=0)
count['missing'] = count[1.0]/(count[0.0]+count[1.0])
count = count.drop([0.0,1.0], axis=1)
if (pd.Timestamp.tzname(count.index[0]) != 'EET'):
if pd.Timestamp.tzname(count.index[0]) != 'EEST':
count.index = pd.to_datetime(count.index).tz_localize('Europe/Helsinki')
return count
def missing_noise(database,subject,begin=None,end=None):
""" Returns a Dataframe with the estimated missing data from the ambient
noise sensor.
NOTE: This function aggregates data by day.
Parameters
----------
database: Niimpy database
user: string
begin: datetime, optional
end: datetime, optional
Returns
-------
avg_noise: Dataframe
"""
assert isinstance(database, niimpy.database.Data1),"database not given in Niimpy database format"
assert isinstance(subject, str),"user not given in string format"
noise = database.raw(table='AwareAmbientNoise', user=subject)
if(begin!=None):
assert isinstance(begin,pd.Timestamp),"begin not given in timestamp format"
else:
begin = noise.iloc[0]['datetime']
if(end!= None):
assert isinstance(end,pd.Timestamp),"end not given in timestamp format"
else:
end = noise.iloc[len(noise)-1]['datetime']
noise = noise.drop(['device','user','time','double_silence_threshold','double_rms','blob_raw','is_silent','double_frequency'],axis=1)
noise = noise.loc[begin:end]
noise['duration'] = noise['datetime'].diff()
noise['duration'] = get_seconds(noise['duration'])
noise = noise.iloc[1:]
shutdown = shutdown_info(database,subject,begin,end)
shutdown=shutdown.rename(columns={'battery_status':'duration'})
noise = noise.merge(shutdown, how='outer', left_index=True, right_index=True)
noise['duration_x'] = noise.fillna(0)['duration_x'] + noise.fillna(0)['duration_y']
noise=noise.rename(columns={'duration_x':'duration'})
dates=noise.datetime_x.combine_first(noise.datetime_y)
noise['datetime']=dates
noise = noise.drop(['datetime_x','datetime_y'],axis=1)
noise=noise.drop(['double_decibels', 'duration_y'],axis=1)
noise['missing'] = np.where(noise['duration']>=1860, 1, 0) #detect the missing points
noise['dummy'] = noise.missing.shift(-2) #assumes that everytime the cellphone shuts down, two timestamps are generated with -1 in the battery_health
noise['dummy'] = noise.dummy*noise.duration
noise['dummy'] = noise.dummy.shift(2)
noise['missing'] = np.where(noise['missing']==1, np.round(noise['duration']/1800), 0) #calculate the number of datapoints missing
noise = noise.drop(noise[noise.dummy==-1].index) #delete those missing datapoints due to the phone being shut down
noise = noise.drop(['duration', 'datetime', 'dummy'],axis=1)
return noise
| 42.182306 | 270 | 0.668981 |
cc9e3bc94eb629f76a855b39c6d9abe102c7440e | 15,570 | py | Python | analog-voltage-control/analogVoltageController.py | jsbangsund/measurement-automation-tools | d2c2fd58b3a6884945081cb9a9ad87366da4a10e | [
"MIT"
] | 2 | 2018-09-27T09:47:47.000Z | 2022-03-24T09:53:04.000Z | analog-voltage-control/analogVoltageController.py | jsbangsund/measurement-automation-tools | d2c2fd58b3a6884945081cb9a9ad87366da4a10e | [
"MIT"
] | null | null | null | analog-voltage-control/analogVoltageController.py | jsbangsund/measurement-automation-tools | d2c2fd58b3a6884945081cb9a9ad87366da4a10e | [
"MIT"
] | null | null | null | # imports
import visa
import numpy as np
import os
import csv
import time
import datetime
import tkinter as tk
from tkinter.filedialog import askopenfilename, askdirectory
from tkinter.ttk import Frame, Button, Style,Treeview, Scrollbar, Checkbutton
from functools import partial
import serial
# This app uses an arduino to output two analog voltage channels from 0 to ~3.3V
# These output voltages are used to control flow rate on mass flow controllers
if __name__ == '__main__':
main() | 48.504673 | 107 | 0.617213 |
cc9ffda493fbb21c5940dac2d68d30da4ab49dc6 | 12,522 | py | Python | cnn_theano_overlap.py | nanfeng1101/DBQA | 56b0a320e4641f46e80db78039d4ca79e9037d7e | [
"MIT"
] | 7 | 2017-08-14T02:40:04.000Z | 2019-04-24T13:44:44.000Z | cnn_theano_overlap.py | nanfeng1101/DBQA | 56b0a320e4641f46e80db78039d4ca79e9037d7e | [
"MIT"
] | null | null | null | cnn_theano_overlap.py | nanfeng1101/DBQA | 56b0a320e4641f46e80db78039d4ca79e9037d7e | [
"MIT"
] | 2 | 2018-05-19T01:26:38.000Z | 2019-12-04T07:58:09.000Z | #-*- coding:utf-8 -*-
__author__ = "ChenJun"
import theano
import theano.tensor as T
import numpy as np
import cPickle as pickle
from theano_models.qa_cnn import CNNModule
from theano_models.layers import InteractLayer, MLP, MLPDropout, BatchNormLayer
from theano_models.optimizer import Optimizer
from data_process.load_data import data_loader
from collections import OrderedDict
from qa_score import qa_evaluate
from weighted_model import ensemble
import warnings
warnings.filterwarnings("ignore")
SEED = 3435
rng = np.random.RandomState(SEED)
def build_model(batch_size,img_h,img_w,filter_windows,filter_num,n_in,n_hidden,n_out,L1_reg,L2_reg,conv_non_linear,learning_rate,n_epochs,random=False,non_static=False):
"""
build cnn model for QA.
:param batch_size: batch_size
:param img_h: sentence length
:param img_w: word vector dimension [100]
:param filter_windows: filter window sizes
:param filter_num: the number of feature maps (per filter window)
:param n_in: num of input units
:param n_hidden: num of hidden units
:param n_out: num of out units
:param L1_reg: mlp L1 loss
:param L2_reg: mlp L2 loss
:param conv_non_linear: activation
:param learning_rate: learning rate
:param n_epochs: num of epochs
:param random: bool, use random embedding or trained embedding
:param non_static: bool, use word embedding for param or not
:return:
"""
global rng
###############
# LOAD DATA #
###############
print "loading the data... "
path = "/Users/chenjun/PycharmProjects/DBQA/"
loader = data_loader(path+"pkl/data-train-nn.pkl",path+"pkl/data-valid-nn.pkl",path+"pkl/data-test-nn.pkl", path+"pkl/index2vec.pkl")
valid_group_list = pickle.load(open(path+"pkl/valid_group.pkl"))
test_group_list = [int(x.strip()) for x in open(path + "data/dbqa-data-test.txt.group")]
datasets, emb_words = loader.get_input_by_model(model="theano",random=random)
train_q_data, valid_q_data, test_q_data = datasets[0]
train_a_data, valid_a_data, test_a_data = datasets[1]
train_l_data, valid_l_data, test_l_data = datasets[2]
features = get_overlap(path, length=img_h)
train_overlap_q, valid_overlap_q, test_overlap_q = features[0]
train_overlap_a, valid_overlap_a, test_overlap_a = features[1]
# calculate the number of batches
n_train_batches = train_q_data.get_value(borrow=True).shape[0] // batch_size
n_valid_batches = valid_q_data.get_value(borrow=True).shape[0] // batch_size
n_test_batches = test_q_data.get_value(borrow=True).shape[0] // batch_size
print "batch_size: %i, n_train_batches: %i, n_valid_batches: %i, n_test_batches: %i" % (batch_size, n_train_batches, n_valid_batches, n_test_batches)
###############
# BUILD MODEL #
###############
print "building the model... "
# define the input variable
index = T.lscalar(name="index")
drop_rate = T.fscalar(name="drop_rate")
x1 = T.matrix(name='x1', dtype='int64')
x2 = T.matrix(name='x2', dtype='int64')
y = T.lvector(name='y')
x1_overlap = T.tensor4(name="x1_overlap", dtype='float32')
x2_overlap = T.tensor4(name="x2_overlap", dtype='float32')
# transfer input to vector with embedding.
_x1 = emb_words[x1.flatten()].reshape((x1.shape[0], 1, img_h, img_w - 1))
emb_x1 = T.concatenate([_x1, x1_overlap], axis=3)
_x2 = emb_words[x2.flatten()].reshape((x2.shape[0], 1, img_h, img_w - 1))
emb_x2 = T.concatenate([_x2, x2_overlap], axis=3)
# conv_layer
conv_layers = []
q_input = []
a_input = []
for i, filter_h in enumerate(filter_windows):
filter_w = img_w
filter_shape = (filter_num, 1, filter_h, filter_w)
pool_size = (img_h - filter_h + 1, img_w - filter_w + 1)
conv_layer = CNNModule(rng, filter_shape=filter_shape, pool_size=pool_size, non_linear=conv_non_linear)
q_conv_output, a_conv_output = conv_layer(emb_x1, emb_x2)
q_conv_output = q_conv_output.flatten(2) # [batch_size * filter_num]
a_conv_output = a_conv_output.flatten(2) # [batch_size * filter_num]
q_input.append(q_conv_output)
a_input.append(a_conv_output)
conv_layers.append(conv_layer)
q_input = T.concatenate(q_input, axis=1) # batch_size*(filter_num*len(filter_windows))
a_input = T.concatenate(a_input, axis=1) # batch_size*(filter_num*len(filter_windows))
num_filters = len(filter_windows) * filter_num
interact_layer = InteractLayer(rng, num_filters, num_filters, dim=n_in)
qa_vec = interact_layer(q_input, a_input)
bn_layer = BatchNormLayer(n_in=n_in, inputs=qa_vec)
# classifier = MLP(rng,input=bn_layer.out,n_in=n_in,n_hidden=n_hidden,n_out=n_out)
classifier = MLPDropout(rng, input=bn_layer.out, n_in=n_in, n_hidden=n_hidden, n_out=n_out, dropout_rate=drop_rate)
# model params
params = classifier.params + interact_layer.params + bn_layer.params
for i in xrange(len(conv_layers)):
params += conv_layers[i].params
if non_static:
print "---CNN-NON-STATIC---"
params += [emb_words]
else:
print "---CNN-STATIC---"
opt = Optimizer()
cost = (
classifier.cross_entropy(y)
+ L1_reg * classifier.L1
+ L2_reg * classifier.L2_sqr
)
# updates = opt.sgd_updates_adadelta(params, cost, 0.95, 1e-6, 9)
updates = opt.RMSprop(params, cost)
train_model = theano.function(
inputs=[index, drop_rate],
updates=updates,
outputs=cost,
givens={
x1: train_q_data[index * batch_size:(index + 1) * batch_size],
x2: train_a_data[index * batch_size:(index + 1) * batch_size],
y: train_l_data[index * batch_size:(index + 1) * batch_size],
x1_overlap: train_overlap_q[index * batch_size: (index + 1) * batch_size],
x2_overlap: train_overlap_a[index * batch_size: (index + 1) * batch_size]
},
)
valid_model = theano.function(
inputs=[index, drop_rate],
outputs=classifier.pred_prob(),
givens={
x1: valid_q_data[index * batch_size:(index + 1) * batch_size],
x2: valid_a_data[index * batch_size:(index + 1) * batch_size],
x1_overlap: valid_overlap_q[index * batch_size: (index + 1) * batch_size],
x2_overlap: valid_overlap_a[index * batch_size: (index + 1) * batch_size]
},
)
test_model = theano.function(
inputs=[index, drop_rate],
outputs=classifier.pred_prob(),
givens={
x1: test_q_data[index * batch_size:(index + 1) * batch_size],
x2: test_a_data[index * batch_size:(index + 1) * batch_size],
x1_overlap: test_overlap_q[index * batch_size: (index + 1) * batch_size],
x2_overlap: test_overlap_a[index * batch_size: (index + 1) * batch_size]
},
)
###############
# TRAIN MODEL #
###############
print('training the model...')
epoch = 0
valid_dic = OrderedDict()
eval_dic = OrderedDict()
while epoch < n_epochs:
epoch += 1
batch_cost = 0.
for batch_index1 in xrange(n_train_batches):
batch_cost += train_model(batch_index1, 0.5) # drop
if batch_index1 % 100 == 0:
print ('epoch %i/%i, batch %i/%i, cost %f') % (
epoch, n_epochs, batch_index1, n_train_batches, batch_cost / n_train_batches)
###############
# VALID MODEL #
###############
valid_score_data = []
for batch_index2 in xrange(n_valid_batches):
batch_pred = valid_model(batch_index2, 0.0) # drop
valid_score_data.append(batch_pred)
valid_score_list = (np.concatenate(np.asarray(valid_score_data), axis=0)).tolist()
valid_label_list = valid_l_data.get_value(borrow=True).tolist()
for i in xrange(len(valid_score_list), len(valid_label_list)):
valid_score_list.append(np.random.random())
_eval = qa_evaluate(valid_score_list, valid_label_list, valid_group_list, label=1, mod="mrr")
print "---valid mrr: ", _eval
valid_dic[str(epoch) + "-" + str(batch_index1)] = _eval
###############
# TEST MODEL #
###############
test_score_data = []
for batch_index3 in xrange(n_test_batches):
batch_pred = test_model(batch_index3, 0.0) # drop
test_score_data.append(batch_pred)
test_score_list = (np.concatenate(np.asarray(test_score_data), axis=0)).tolist()
test_label_list = test_l_data.get_value(borrow=True).tolist()
for i in xrange(len(test_score_list), len(test_label_list)):
test_score_list.append(np.random.random())
_eval = qa_evaluate(test_score_list, test_label_list, test_group_list, label=1, mod="mrr")
print "---test mrr: ", _eval
eval_dic[str(epoch) + "-" + str(batch_index1)] = _eval
pickle.dump(valid_score_list, open(path + "result/cnn-overlap-valid.pkl." + str(epoch) + "-" + str(batch_index1), "w"))
pickle.dump(test_score_list, open(path + "result/cnn-overlap-test.pkl."+str(epoch)+"-"+str(batch_index1), "w"))
pickle.dump(test_label_list, open(path + "result/test_label.pkl", "w"))
pickle.dump(valid_label_list, open(path + "result/valid_label.pkl", "w"))
_valid_dic = sorted(valid_dic.items(), key=lambda x: x[1])[-10:]
_eval_dic = sorted(eval_dic.items(), key=lambda x: x[1])[-10:]
print "valid dic: ", _valid_dic
print "eval dic: ", _eval_dic
valid_score_file = [path+"result/cnn-overlap-valid.pkl."+x[0] for x in _valid_dic]
test_score_file = [path + "result/cnn-overlap-test.pkl." + x[0] for x in _valid_dic] ###from valid
valid_label_file = path + "result/valid_label.pkl"
test_label_file = path + "result/test_label.pkl"
test_ensemble_file = path + "result/test_ensemble_overlap.pkl"
valid_ensemble_file = path + "result/valid_ensemble_overlap.pkl"
valid_mrr = ensemble(valid_score_file, valid_label_file, valid_group_list, valid_ensemble_file)
test_mrr = ensemble(test_score_file, test_label_file, test_group_list, test_ensemble_file)
print "---ensemble valid mrr: ", valid_mrr
print "---ensemble test mrr: ", test_mrr
if __name__ == "__main__":
build_model(batch_size=64,
img_h=50,
img_w=51,
filter_windows=[3, 1, 2],
filter_num=128,
n_in=20,
n_hidden=128,
n_out=2,
L1_reg=0.00,
L2_reg=0.0001,
conv_non_linear="relu",
learning_rate=0.001,
n_epochs=3,
random=False,
non_static=False)
| 47.977011 | 169 | 0.645504 |
cca08eaaa789f720d1578ebb6f7b99357f1739e1 | 7,298 | py | Python | marble-plotter-dous/src/main/python/server.py | paser4se/marble | 35da5ac8ff24ff7f30a135cbabf57f60f06f54e3 | [
"Apache-2.0"
] | 2 | 2017-06-05T13:06:06.000Z | 2021-06-23T13:53:33.000Z | marble-plotter-dous/src/main/python/server.py | paser4se/marble | 35da5ac8ff24ff7f30a135cbabf57f60f06f54e3 | [
"Apache-2.0"
] | 35 | 2015-07-21T15:09:24.000Z | 2020-07-08T09:06:08.000Z | marble-plotter-dous/src/main/python/server.py | paser4se/marble | 35da5ac8ff24ff7f30a135cbabf57f60f06f54e3 | [
"Apache-2.0"
] | 5 | 2015-06-16T09:40:39.000Z | 2020-11-05T08:13:57.000Z | # Rest Server
from flask import Flask, jsonify, abort, request
# Eureka client
from eureka.client import EurekaClient
# Background tasks
import threading
import atexit
import logging
import socket
import netifaces as ni
import sys
import os
import time
# Plotter libs
from io import BytesIO
import pymongo
from pymongo import MongoClient
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import base64
import datetime
DATABASE_NAME = 'marble'
POSTS_COLLECTION = 'posts'
PROCESSED_POSTS_COLLECTION = 'processed_posts'
pool_time = 5 # Seconds
# variables that are accessible from anywhere
commonDataStruct = {}
# lock to control access to variable
dataLock = threading.Lock()
# thread handler
yourThread = threading.Thread()
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
# Global variables
app_name = "plotter-dous"
try:
ni.ifaddresses('eth0')
app_ip = ni.ifaddresses('eth0')[2][0]['addr']
except Exception:
app_ip = "localhost"
app_host = socket.getfqdn()
app_port = 8084
secure_app_port = 8443
eureka_url = "http://registry:1111/eureka/"
app = create_app()
if __name__ == '__main__':
app.run(host="0.0.0.0", port=app_port)
# plotTopic("Apple Microsoft", {
# 'title': 'Titlte', 'description': 'Dscription'})
#input("Press Enter to continue...")
| 29.075697 | 121 | 0.565908 |
cca2291d6cc0c07dfc8cf1b487d638a6d05c267b | 3,274 | py | Python | dependencies/src/4Suite-XML-1.0.2/test/Xml/Xslt/Borrowed/sm_19991228.py | aleasims/Peach | bb56841e943d719d5101fee0a503ed34308eda04 | [
"MIT"
] | null | null | null | dependencies/src/4Suite-XML-1.0.2/test/Xml/Xslt/Borrowed/sm_19991228.py | aleasims/Peach | bb56841e943d719d5101fee0a503ed34308eda04 | [
"MIT"
] | null | null | null | dependencies/src/4Suite-XML-1.0.2/test/Xml/Xslt/Borrowed/sm_19991228.py | aleasims/Peach | bb56841e943d719d5101fee0a503ed34308eda04 | [
"MIT"
] | 1 | 2020-07-26T03:57:45.000Z | 2020-07-26T03:57:45.000Z | #Example for Jon Smirl on 28 Dec 1999, originally by Steve Muench, with improvements by Mike Brown and Jeremy Richman
from Xml.Xslt import test_harness
sheet_1 = """<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
version="1.0">
<xsl:template match="/">
<html>
<head/>
<body>
<xsl:apply-templates/>
</body>
</html>
</xsl:template>
<xsl:template match="p">
<p><xsl:apply-templates/></p>
</xsl:template>
<xsl:template match="programlisting">
<span style="font-family:monospace">
<xsl:call-template name="br-replace">
<xsl:with-param name="word" select="."/>
</xsl:call-template>
</span>
</xsl:template>
<xsl:template name="br-replace">
<xsl:param name="word"/>
<!-- </xsl:text> on next line on purpose to get newline -->
<xsl:variable name="cr"><xsl:text>
</xsl:text></xsl:variable>
<xsl:choose>
<xsl:when test="contains($word,$cr)">
<xsl:value-of select="substring-before($word,$cr)"/>
<br/>
<xsl:call-template name="br-replace">
<xsl:with-param name="word" select="substring-after($word,$cr)"/>
</xsl:call-template>
</xsl:when>
<xsl:otherwise>
<xsl:value-of select="$word"/>
</xsl:otherwise>
</xsl:choose>
</xsl:template>
</xsl:stylesheet>"""
sheet_2 = """<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
version="1.0">
<xsl:template match="/">
<html>
<head/>
<body>
<xsl:apply-templates/>
</body>
</html>
</xsl:template>
<xsl:template match="p">
<p><xsl:apply-templates/></p>
</xsl:template>
<xsl:template match="programlisting">
<span style="font-family:monospace">
<xsl:apply-templates/>
</span>
</xsl:template>
<xsl:template match="programlisting/text()[contains(.,'
')]">
<xsl:call-template name="br-replace">
<xsl:with-param name="text" select="."/>
</xsl:call-template>
</xsl:template>
<xsl:template name="br-replace">
<xsl:param name="text"/>
<!-- </xsl:text> on next line on purpose to get newline -->
<xsl:choose>
<xsl:when test="contains($text, '
')">
<xsl:value-of select="substring-before($text, '
')"/>
<br/>
<xsl:call-template name="br-replace">
<xsl:with-param name="text" select="substring-after($text, '
')"/>
</xsl:call-template>
</xsl:when>
<xsl:otherwise>
<xsl:value-of select="$text"/>
</xsl:otherwise>
</xsl:choose>
</xsl:template>
</xsl:stylesheet>"""
source_1="""<doc>
<p>This is some text.</p>
<programlisting><![CDATA[This is a paragraph
with some newlines
does it work?]]></programlisting>
</doc>"""
expected_1 = """<html>
<head>
<meta http-equiv='Content-Type' content='text/html; charset=iso-8859-1'>
</head>
<body>
<p>This is some text.</p>
<span style='font-family:monospace'>This is a paragraph<br> with some newlines<br> does it work?</span>
</body>
</html>"""
| 25.578125 | 117 | 0.640806 |
cca2be577b6cb5b5ef2f77c8c187c1f9904195fb | 2,687 | py | Python | django_docutils/favicon/rst/transforms/favicon.py | tony/django-docutils | ed3e089728507a0f579a62bcb182f283bc59929c | [
"MIT"
] | 10 | 2017-04-28T00:19:10.000Z | 2020-07-22T15:27:09.000Z | django_docutils/favicon/rst/transforms/favicon.py | tony/django-docutils | ed3e089728507a0f579a62bcb182f283bc59929c | [
"MIT"
] | 231 | 2017-01-17T04:47:51.000Z | 2022-03-30T03:03:42.000Z | django_docutils/favicon/rst/transforms/favicon.py | tony/django-docutils | ed3e089728507a0f579a62bcb182f283bc59929c | [
"MIT"
] | 1 | 2019-01-25T14:42:15.000Z | 2019-01-25T14:42:15.000Z | import tldextract
from django.db.models import Q
from docutils import nodes
from docutils.transforms import Transform
from django_docutils.favicon.models import get_favicon_model
from ..nodes import icon
Favicon = get_favicon_model()
def resolve_favicon(url):
"""Given a URL to a website, see if a Favicon exists in db.
URL will be resolved to a fqdn for a key lookup.
:param url: URL to any page on a website
:type url: str
:returns: Full Storage based favicon url path, or None
:rtype: str|None
"""
# e.g. forums.bbc.co.uk
fqdn = tldextract.extract(url).fqdn
try:
return Favicon.objects.get(domain=fqdn).favicon.url
except (ValueError, Favicon.DoesNotExist):
return None
def plain_references(node):
"""Docutils traversal: Only return references with URI's, skip xref's
If a nodes.reference already has classes, it's an icon class from xref,
so skip that.
If a nodes.reference has no 'refuri', it's junk, skip.
Docutils node.traverse condition callback
:returns: True if it's a URL we want to lookup favicons for
:rtype: bool
"""
if isinstance(node, nodes.reference):
# skip nodes already with xref icon classes or no refuri
no_classes = 'classes' not in node or not node['classes']
has_refuri = 'refuri' in node
if no_classes and has_refuri and node['refuri'].startswith('http'):
return True
return False
| 30.885057 | 81 | 0.61965 |
cca383b23457a782befbce9c96268d9161502ad0 | 1,733 | py | Python | xnmt/model_context.py | marcintustin/xnmt | f315fc5e493d25746bbde46d2c89cea3410d43df | [
"Apache-2.0"
] | null | null | null | xnmt/model_context.py | marcintustin/xnmt | f315fc5e493d25746bbde46d2c89cea3410d43df | [
"Apache-2.0"
] | null | null | null | xnmt/model_context.py | marcintustin/xnmt | f315fc5e493d25746bbde46d2c89cea3410d43df | [
"Apache-2.0"
] | null | null | null | import dynet as dy
import os
from xnmt.serializer import Serializable
| 38.511111 | 100 | 0.709175 |
cca508227383d8fc79a193435ae1d2995d0eb8b7 | 3,100 | py | Python | utils.py | Akshayvm98/Django-School | 723d52db2cd3bc7665680a3adaf8687f97836d48 | [
"MIT"
] | 26 | 2015-08-04T00:13:27.000Z | 2021-03-19T01:01:14.000Z | utils.py | Akshayvm98/Django-School | 723d52db2cd3bc7665680a3adaf8687f97836d48 | [
"MIT"
] | null | null | null | utils.py | Akshayvm98/Django-School | 723d52db2cd3bc7665680a3adaf8687f97836d48 | [
"MIT"
] | 28 | 2015-01-19T15:10:15.000Z | 2020-10-27T11:22:21.000Z | import csv
from django.http import HttpResponse, HttpResponseForbidden
from django.template.defaultfilters import slugify
from django.db.models.loading import get_model
def admin_list_export(request, model_name, app_label, queryset=None, fields=None, list_display=True):
"""
Put the following line in your urls.py BEFORE your admin include
(r'^admin/(?P<app_label>[\d\w]+)/(?P<model_name>[\d\w]+)/csv/', 'util.csv_view.admin_list_export'),
"""
if not request.user.is_staff:
return HttpResponseForbidden()
if not queryset:
model = get_model(app_label, model_name)
queryset = model.objects.all()
filters = dict()
for key, value in request.GET.items():
if key not in ('ot', 'o'):
filters[str(key)] = str(value)
if len(filters):
queryset = queryset.filter(**filters)
if not fields and list_display:
from django.contrib import admin
ld = admin.site._registry[queryset.model].list_display
if ld and len(ld) > 0:
fields = ld
'''
if not fields:
if list_display and len(queryset.model._meta.admin.list_display) > 1:
fields = queryset.model._meta.admin.list_display
else:
fields = None
'''
return export(queryset, fields)
"""
Create your own change_list.html for your admin view and put something like this in it:
{% block object-tools %}
<ul class="object-tools">
<li><a href="csv/{%if request.GET%}?{{request.GET.urlencode}}{%endif%}" class="addlink">Export to CSV</a></li>
{% if has_add_permission %}
<li><a href="add/{% if is_popup %}?_popup=1{% endif %}" class="addlink">{% blocktrans with cl.opts.verbose_name|escape as name %}Add {{ name }}{% endblocktrans %}</a></li>
{% endif %}
</ul>
{% endblock %}
"""
import datetime
from django.http import HttpResponseRedirect #, HttpResponse
from django.shortcuts import render_to_response
from django.template import RequestContext
from django.contrib.auth.decorators import login_required
| 36.470588 | 179 | 0.639032 |
cca9c51d89c5109307f0fb8fc9ff82831401ed2e | 1,668 | py | Python | hard-gists/a027a9fc5aac66e6a382/snippet.py | jjhenkel/dockerizeme | eaa4fe5366f6b9adf74399eab01c712cacaeb279 | [
"Apache-2.0"
] | 21 | 2019-07-08T08:26:45.000Z | 2022-01-24T23:53:25.000Z | hard-gists/a027a9fc5aac66e6a382/snippet.py | jjhenkel/dockerizeme | eaa4fe5366f6b9adf74399eab01c712cacaeb279 | [
"Apache-2.0"
] | 5 | 2019-06-15T14:47:47.000Z | 2022-02-26T05:02:56.000Z | hard-gists/a027a9fc5aac66e6a382/snippet.py | jjhenkel/dockerizeme | eaa4fe5366f6b9adf74399eab01c712cacaeb279 | [
"Apache-2.0"
] | 17 | 2019-05-16T03:50:34.000Z | 2021-01-14T14:35:12.000Z | # -*- coding: utf-8 -*-
# Author: Massimo Menichinelli
# Homepage: http://www.openp2pdesign.org
# License: MIT
#
import wx
import serial
#A new custom class that extends the wx.Frame
#Main program
if __name__ == '__main__':
# Connect to serial port first
try:
arduino = serial.Serial('/dev/tty.usbmodem1421', 9600)
except:
print "Failed to connect"
exit()
#Create and launch the wx interface
app = wx.App()
MyFrame(None, 'Serial data test')
app.MainLoop()
#Close the serial connection
arduino.close()
| 23.492958 | 71 | 0.601918 |
ccae60674a2d5d72a4d3496efd86de39ff78e0dc | 13,807 | py | Python | src/python/pants/backend/docker/util_rules/docker_build_context_test.py | asherf/pants | e010b93c4123b4446a631cac5db0b7ea15634686 | [
"Apache-2.0"
] | null | null | null | src/python/pants/backend/docker/util_rules/docker_build_context_test.py | asherf/pants | e010b93c4123b4446a631cac5db0b7ea15634686 | [
"Apache-2.0"
] | 14 | 2021-05-03T13:54:41.000Z | 2022-03-30T10:20:58.000Z | src/python/pants/backend/docker/util_rules/docker_build_context_test.py | asherf/pants | e010b93c4123b4446a631cac5db0b7ea15634686 | [
"Apache-2.0"
] | null | null | null | # Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
from textwrap import dedent
from typing import Any, ContextManager
import pytest
from pants.backend.docker.subsystems.dockerfile_parser import rules as parser_rules
from pants.backend.docker.target_types import DockerImageTarget
from pants.backend.docker.util_rules.docker_build_args import docker_build_args
from pants.backend.docker.util_rules.docker_build_context import (
DockerBuildContext,
DockerBuildContextRequest,
DockerVersionContext,
)
from pants.backend.docker.util_rules.docker_build_context import rules as context_rules
from pants.backend.docker.util_rules.docker_build_env import docker_build_environment_vars
from pants.backend.docker.util_rules.dockerfile import rules as dockerfile_rules
from pants.backend.python import target_types_rules
from pants.backend.python.goals import package_pex_binary
from pants.backend.python.goals.package_pex_binary import PexBinaryFieldSet
from pants.backend.python.target_types import PexBinary
from pants.backend.python.util_rules import pex_from_targets
from pants.backend.shell.target_types import ShellSourcesGeneratorTarget, ShellSourceTarget
from pants.backend.shell.target_types import rules as shell_target_types_rules
from pants.core.goals.package import BuiltPackage
from pants.core.target_types import FilesGeneratorTarget
from pants.core.target_types import rules as core_target_types_rules
from pants.engine.addresses import Address
from pants.engine.fs import Snapshot
from pants.engine.internals.scheduler import ExecutionError
from pants.testutil.pytest_util import no_exception
from pants.testutil.rule_runner import QueryRule, RuleRunner
| 32.640662 | 99 | 0.557833 |
ccb0eb936ec41c62e8c05807faa25bca2c7ad40d | 48 | py | Python | tests/extensions/artificial_intelligence/ai_model/__init__.py | chr0m1ng/blip-sdk-js | 74f404a3ef048a100cc330f78a9751572e31fb0b | [
"MIT"
] | null | null | null | tests/extensions/artificial_intelligence/ai_model/__init__.py | chr0m1ng/blip-sdk-js | 74f404a3ef048a100cc330f78a9751572e31fb0b | [
"MIT"
] | 9 | 2021-05-27T21:08:23.000Z | 2021-06-14T20:10:10.000Z | tests/extensions/artificial_intelligence/ai_model/__init__.py | chr0m1ng/blip-sdk-python | 74f404a3ef048a100cc330f78a9751572e31fb0b | [
"MIT"
] | null | null | null | from .test_ai_model import TestAiModelExtension
| 24 | 47 | 0.895833 |
ccb0f6edadd14edc784ea01c369c3b62583b31ca | 5,100 | py | Python | src/rescuexport/dal.py | karlicoss/rescuexport | 69f5275bfa7cb39a1ba74b99312b605ba340916a | [
"MIT"
] | 6 | 2019-11-28T10:56:53.000Z | 2022-01-10T21:07:40.000Z | src/rescuexport/dal.py | karlicoss/rescuexport | 69f5275bfa7cb39a1ba74b99312b605ba340916a | [
"MIT"
] | null | null | null | src/rescuexport/dal.py | karlicoss/rescuexport | 69f5275bfa7cb39a1ba74b99312b605ba340916a | [
"MIT"
] | 1 | 2020-12-08T14:16:53.000Z | 2020-12-08T14:16:53.000Z | #!/usr/bin/env python3
import logging
from pathlib import Path
import json
from datetime import datetime, timedelta
from typing import Set, Sequence, Any, Iterator
from dataclasses import dataclass
from .exporthelpers.dal_helper import PathIsh, Json, Res, datetime_naive
from .exporthelpers.logging_helper import LazyLogger
logger = LazyLogger(__package__)
seconds = int
_DT_FMT = '%Y-%m-%dT%H:%M:%S'
from typing import Iterable
# todo quick test (dal helper aided: check that DAL can handle fake data)
if __name__ == '__main__':
main()
| 31.097561 | 122 | 0.550392 |
ccb20b99e03872ed73a498ac6c05de75181ce2b4 | 1,287 | py | Python | MuonAnalysis/MuonAssociators/python/muonHLTL1Match_cfi.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 852 | 2015-01-11T21:03:51.000Z | 2022-03-25T21:14:00.000Z | MuonAnalysis/MuonAssociators/python/muonHLTL1Match_cfi.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 30,371 | 2015-01-02T00:14:40.000Z | 2022-03-31T23:26:05.000Z | MuonAnalysis/MuonAssociators/python/muonHLTL1Match_cfi.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 3,240 | 2015-01-02T05:53:18.000Z | 2022-03-31T17:24:21.000Z | import FWCore.ParameterSet.Config as cms
from MuonAnalysis.MuonAssociators.muonL1Match_cfi import *
muonHLTL1Match = cms.EDProducer("HLTL1MuonMatcher",
muonL1MatcherParameters,
# Reconstructed muons
src = cms.InputTag("muons"),
# L1 Muon collection, and preselection on that collection
matched = cms.InputTag("patTrigger"),
# Requests to select the object
matchedCuts = cms.string('coll("hltL1extraParticles")'),
# 90% compatible with documentation at SWGuidePATTrigger#Module_Configuration_AN1
# andOr = cms.bool( False ), # if False, do the 'AND' of the conditions below; otherwise, do the OR
# filterIdsEnum = cms.vstring( '*' ),
# filterIds = cms.vint32( 0 ),
# filterLabels = cms.vstring( '*' ),
# pathNames = cms.vstring( '*' ),
# collectionTags = cms.vstring( 'hltL1extraParticles' ),
resolveAmbiguities = cms.bool( True ), # if True, no more than one reco object can be matched to the same L1 object; precedence is given to the reco ones coming first in the list
# Fake filter lavels for the object propagated to the second muon station
setPropLabel = cms.string("propagatedToM2"),
# Write extra ValueMaps
writeExtraInfo = cms.bool(True),
)
| 40.21875 | 186 | 0.679876 |
ccb315a349fc8cff12751a9ddd2a5e9db7858230 | 632 | py | Python | Curso_em_Video_py3/ex082.py | Rodrigo98Matos/Projetos_py | 6428e2c09d28fd8a717743f4434bc788e7d7d3cc | [
"MIT"
] | 1 | 2021-05-11T12:39:43.000Z | 2021-05-11T12:39:43.000Z | Curso_em_Video_py3/ex082.py | Rodrigo98Matos/Projetos_py | 6428e2c09d28fd8a717743f4434bc788e7d7d3cc | [
"MIT"
] | null | null | null | Curso_em_Video_py3/ex082.py | Rodrigo98Matos/Projetos_py | 6428e2c09d28fd8a717743f4434bc788e7d7d3cc | [
"MIT"
] | null | null | null | lista = list()
while True:
lista.append(int(input("Digite um nmero inteiro:\t")))
while True:
p = str(input("Digitar mais nmeros?\t").strip())[0].upper()
if p in 'SN':
break
else:
print("\033[31mDigite uma opo vlida!\033[m")
if p == 'N':
break
par = list()
impar = list()
for n in lista:
if n % 2 == 0:
par.append(n)
else:
impar.append(n)
par.sort()
impar.sort()
if 0 in par:
par.remove(0)
print(f"Entre os nmeros \033[32m{lista}\033[m, os nmeros pares so: \033[33m{par}\033[m e os nmeros mpares so: \033[34m{impar}\033[m!")
| 26.333333 | 140 | 0.568038 |
ccb4877c2d894de84894acf89db676779bb0786b | 1,212 | py | Python | census_data_downloader/tables/percapitaincome.py | JoeGermuska/census-data-downloader | 0098b9e522b78ad0e30301c9845ecbcc903c62e4 | [
"MIT"
] | 170 | 2019-04-01T01:41:42.000Z | 2022-03-25T21:22:06.000Z | census_data_downloader/tables/percapitaincome.py | JoeGermuska/census-data-downloader | 0098b9e522b78ad0e30301c9845ecbcc903c62e4 | [
"MIT"
] | 68 | 2019-03-31T22:52:43.000Z | 2021-08-30T16:33:54.000Z | census_data_downloader/tables/percapitaincome.py | JoeGermuska/census-data-downloader | 0098b9e522b78ad0e30301c9845ecbcc903c62e4 | [
"MIT"
] | 34 | 2019-04-02T17:57:16.000Z | 2022-03-28T17:22:35.000Z | #! /usr/bin/env python
# -*- coding: utf-8 -*
import collections
from census_data_downloader.core.tables import BaseTableConfig
from census_data_downloader.core.decorators import register
| 26.933333 | 65 | 0.782178 |
ccb69ce91869d886c281dd5c097515346b3bb141 | 844 | py | Python | pyskel_bc/cli.py | smnorris/pyskel_bc | d8bdec3e15da6268c9b6a0f3be1fdd6af9737d21 | [
"Apache-2.0"
] | null | null | null | pyskel_bc/cli.py | smnorris/pyskel_bc | d8bdec3e15da6268c9b6a0f3be1fdd6af9737d21 | [
"Apache-2.0"
] | null | null | null | pyskel_bc/cli.py | smnorris/pyskel_bc | d8bdec3e15da6268c9b6a0f3be1fdd6af9737d21 | [
"Apache-2.0"
] | null | null | null | # Copyright 2018 Province of British Columbia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Skeleton of a CLI
import click
import pyskel_bc
| 30.142857 | 76 | 0.740521 |
ccb7c733b68470d611b304324e4c358b41fac520 | 1,442 | py | Python | project/encryption_utils.py | v2121/redshift-django | b6d66b2ee0443f039cfbad9f53c4ffd4efd156ec | [
"CC0-1.0"
] | null | null | null | project/encryption_utils.py | v2121/redshift-django | b6d66b2ee0443f039cfbad9f53c4ffd4efd156ec | [
"CC0-1.0"
] | null | null | null | project/encryption_utils.py | v2121/redshift-django | b6d66b2ee0443f039cfbad9f53c4ffd4efd156ec | [
"CC0-1.0"
] | null | null | null | import Crypto.Random
from Crypto.Cipher import AES
import hashlib
import base64
SALT_SIZE = 16
AES_MULTIPLE = 16
NUMBER_OF_ITERATIONS = 20
| 31.347826 | 63 | 0.731623 |
ccb976a45584bf29937e03ff9e1aef9ce04de815 | 462 | py | Python | core/xml/utils.py | Magoli1/carla-pre-crash-scenario-generator | 46666309a55e9aab59cebeda33aca7723c1d360c | [
"MIT"
] | 1 | 2022-03-30T08:28:53.000Z | 2022-03-30T08:28:53.000Z | core/xml/utils.py | Magoli1/carla-pre-crash-scenario-generator | 46666309a55e9aab59cebeda33aca7723c1d360c | [
"MIT"
] | null | null | null | core/xml/utils.py | Magoli1/carla-pre-crash-scenario-generator | 46666309a55e9aab59cebeda33aca7723c1d360c | [
"MIT"
] | null | null | null | from xml.etree import ElementTree
from xml.dom import minidom
def get_pretty_xml(tree):
"""Transforms and pretty-prints a given xml-tree to string
:param tree: XML-tree
:type tree: object
:returns: (Prettified) stringified version of the passed xml-tree
:rtype: str
"""
stringified_tree = ElementTree.tostring(tree.getroot(), 'utf-8')
minidoc = minidom.parseString(stringified_tree)
return minidoc.toprettyxml(indent=" ")
| 28.875 | 69 | 0.71645 |
ccba7105c97f04202293ba5c9e3bb9286a1e1c30 | 1,745 | py | Python | tests/test_main.py | giulionf/GetOldTweets3 | 038b8fed7da27300e6c611d3c0fd617588075a58 | [
"MIT"
] | null | null | null | tests/test_main.py | giulionf/GetOldTweets3 | 038b8fed7da27300e6c611d3c0fd617588075a58 | [
"MIT"
] | null | null | null | tests/test_main.py | giulionf/GetOldTweets3 | 038b8fed7da27300e6c611d3c0fd617588075a58 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import sys
import time
import GetOldTweets3 as Got3
if sys.version_info[0] < 3:
raise Exception("Python 2.x is not supported. Please upgrade to 3.x")
sys.path.append(os.path.join(os.path.dirname(__file__), ".."))
| 33.557692 | 92 | 0.681948 |
ccbaa690673254ea1cda4e119201ebd6a7af3c57 | 421 | py | Python | extsum/__main__.py | streof/extsum | 45949a6a6aff65ceaf2a4bdc70f5b4d9660fca5f | [
"MIT"
] | null | null | null | extsum/__main__.py | streof/extsum | 45949a6a6aff65ceaf2a4bdc70f5b4d9660fca5f | [
"MIT"
] | null | null | null | extsum/__main__.py | streof/extsum | 45949a6a6aff65ceaf2a4bdc70f5b4d9660fca5f | [
"MIT"
] | null | null | null | import extsum as ext
URL = "https://i.picsum.photos/id/42/1/1.jpg"
# Uncomment for random Picsum photo
# URL = "https://picsum.photos/1/1"
if __name__ == '__main__':
# Init
photo = ext.Load(URL)
photo_parsed = ext.Parse(photo)
# Print found ID (if any)
id_found = photo_parsed.find_id()
if id_found is None:
print("Couldn't find any ID")
else:
print(f"Found ID {id_found}")
| 23.388889 | 45 | 0.631829 |
ccbaec8553da0b692edf4bc23f5c78797c64aa03 | 931 | py | Python | rplugin/python3/denite/kind/lab_browse.py | lighttiger2505/denite-lab | 611e5e081d049d79999a8c0a0f38c2466d8ca970 | [
"MIT"
] | 1 | 2018-02-26T15:27:03.000Z | 2018-02-26T15:27:03.000Z | rplugin/python3/denite/kind/lab_browse.py | lighttiger2505/denite-lab | 611e5e081d049d79999a8c0a0f38c2466d8ca970 | [
"MIT"
] | null | null | null | rplugin/python3/denite/kind/lab_browse.py | lighttiger2505/denite-lab | 611e5e081d049d79999a8c0a0f38c2466d8ca970 | [
"MIT"
] | null | null | null | import subprocess
from .base import Base
| 30.032258 | 64 | 0.525242 |
ccbbb657919673062f15afd66c5a069d9e36de11 | 3,358 | py | Python | Betsy/Betsy/modules/plot_sample_pca.py | jefftc/changlab | 11da8c415afefcba0b0216238387c75aeb3a56ac | [
"MIT"
] | 9 | 2017-01-13T02:38:41.000Z | 2021-04-08T00:44:39.000Z | Betsy/Betsy/modules/plot_sample_pca.py | jefftc/changlab | 11da8c415afefcba0b0216238387c75aeb3a56ac | [
"MIT"
] | null | null | null | Betsy/Betsy/modules/plot_sample_pca.py | jefftc/changlab | 11da8c415afefcba0b0216238387c75aeb3a56ac | [
"MIT"
] | 4 | 2017-01-05T16:25:25.000Z | 2019-12-12T20:07:38.000Z | from Module import AbstractModule
## def plot_pca(filename, result_fig, opts='b', legend=None):
## import arrayio
## from genomicode import jmath, mplgraph
## from genomicode import filelib
## R = jmath.start_R()
## jmath.R_equals(filename, 'filename')
## M = arrayio.read(filename)
## labels = M._col_names['_SAMPLE_NAME']
## data = M.slice()
## jmath.R_equals(data, 'X')
## R('NUM.COMPONENTS <- 2')
## R('S <- svd(X)')
## R('U <- S$u[,1:NUM.COMPONENTS]')
## R('D <- S$d[1:NUM.COMPONENTS]')
## # Project the data onto the first 2 components.
## R('x <- t(X) %*% U %*% diag(D)')
## x1 = R['x'][0:M.ncol()]
## x2 = R['x'][M.ncol():]
## xlabel = 'Principal Component 1'
## ylabel = 'Principal Component 2'
## if len(opts) > 1:
## fig = mplgraph.scatter(
## x1, x2, xlabel=xlabel, ylabel=ylabel, color=opts,
## legend=legend)
## else:
## fig = mplgraph.scatter(
## x1, x2, xlabel=xlabel, ylabel=ylabel, color=opts,
## label=labels)
## fig.savefig(result_fig)
## assert filelib.exists_nz(result_fig), 'the plot_pca.py fails'
| 32.921569 | 76 | 0.532758 |
ccbc5e88899c66f97d3d1a9a12e0ddc302dbc43e | 1,098 | py | Python | scripts/seg_vs_dog_plots.py | AbigailMcGovern/platelet-segmentation | 46cd87b81fc44473b07a2bebed1e6134b2582348 | [
"BSD-3-Clause"
] | 1 | 2022-02-01T23:40:38.000Z | 2022-02-01T23:40:38.000Z | scripts/seg_vs_dog_plots.py | AbigailMcGovern/platelet-segmentation | 46cd87b81fc44473b07a2bebed1e6134b2582348 | [
"BSD-3-Clause"
] | 3 | 2021-03-12T02:03:15.000Z | 2021-03-31T00:39:05.000Z | scripts/seg_vs_dog_plots.py | AbigailMcGovern/platelet-segmentation | 46cd87b81fc44473b07a2bebed1e6134b2582348 | [
"BSD-3-Clause"
] | 1 | 2021-04-06T23:23:32.000Z | 2021-04-06T23:23:32.000Z | from plots import plot_experiment_APs, plot_experiment_no_diff, experiment_VI_plots
import os
# paths, names, title, out_dir, out_name
data_dir = '/Users/amcg0011/Data/pia-tracking/dl-results/210512_150843_seed_z-1_y-1_x-1_m_centg'
suffix = 'seed_z-1_y-1_x-1_m_centg'
out_dir = os.path.join(data_dir, 'DL-vs-Dog')
ap_paths = [os.path.join(data_dir, suffix + '_validation_AP.csv'),
os.path.join(data_dir, 'DoG-segmentation_average_precision.csv')]
nd_paths = [os.path.join(data_dir, 'seed_z-1_y-1_x-1_m_centg_validation_metrics.csv'),
os.path.join(data_dir, 'DoG-segmentation_metrics.csv')]
vi_paths = [
os.path.join(data_dir, 'seed_z-1_y-1_x-1_m_centgvalidation_VI.csv'),
os.path.join(data_dir, 'seed_z-1_y-1_x-1_m_centgvalidation_VI_DOG-seg.csv')
]
#plot_experiment_APs(ap_paths, ['DL', 'DoG'], 'Average precision: DL vs Dog', out_dir, 'AP_DL-vs-Dog')
#plot_experiment_no_diff(nd_paths, ['DL', 'DoG'], 'Number difference: DL vs Dog', out_dir, 'ND_DL-vs-Dog')
experiment_VI_plots(vi_paths, ['DL', 'DoG'], 'VI Subscores: DL vs DoG', 'VI_DL-vs-DoG', out_dir) | 64.588235 | 106 | 0.736794 |
ccbcc99c190554ac665c0c7ffb46b19b240c78b3 | 2,925 | py | Python | src/pyfme/models/state/velocity.py | gaofeng2020/PyFME | 26b76f0622a8dca0e24eb477a6fb4a8b2aa604d7 | [
"MIT"
] | 199 | 2015-12-29T19:49:42.000Z | 2022-03-19T14:31:24.000Z | src/pyfme/models/state/velocity.py | gaofeng2020/PyFME | 26b76f0622a8dca0e24eb477a6fb4a8b2aa604d7 | [
"MIT"
] | 126 | 2015-09-23T11:15:42.000Z | 2020-07-29T12:27:22.000Z | src/pyfme/models/state/velocity.py | gaofeng2020/PyFME | 26b76f0622a8dca0e24eb477a6fb4a8b2aa604d7 | [
"MIT"
] | 93 | 2015-12-26T13:02:29.000Z | 2022-03-19T14:31:13.000Z | """
Python Flight Mechanics Engine (PyFME).
Copyright (c) AeroPython Development Team.
Distributed under the terms of the MIT License.
Velocity
--------
The aircraft state has
"""
from abc import abstractmethod
import numpy as np
# TODO: think about generic changes from body to horizon that could be used for
# velocity, accelerations...
# If also changes from attitude of elements in the body (such as sensors) to
# body and horizon coordinates are implemented it would be useful!
from pyfme.utils.coordinates import body2hor, hor2body
| 23.58871 | 79 | 0.563077 |
ccbe05761dc731218eefcccd4c39733dc739862d | 68 | py | Python | tests/CompileTests/Python_tests/test2011_029.py | maurizioabba/rose | 7597292cf14da292bdb9a4ef573001b6c5b9b6c0 | [
"BSD-3-Clause"
] | 488 | 2015-01-09T08:54:48.000Z | 2022-03-30T07:15:46.000Z | tests/CompileTests/Python_tests/test2011_029.py | sujankh/rose-matlab | 7435d4fa1941826c784ba97296c0ec55fa7d7c7e | [
"BSD-3-Clause"
] | 174 | 2015-01-28T18:41:32.000Z | 2022-03-31T16:51:05.000Z | tests/CompileTests/Python_tests/test2011_029.py | sujankh/rose-matlab | 7435d4fa1941826c784ba97296c0ec55fa7d7c7e | [
"BSD-3-Clause"
] | 146 | 2015-04-27T02:48:34.000Z | 2022-03-04T07:32:53.000Z | # test while statements
while 0:
1
while 0:
2
else:
3
| 6.8 | 23 | 0.558824 |
ccbffdcc60fb438aee22b3f831c544750ef569d6 | 246 | py | Python | minds/sections/__init__.py | Atherz97/minds-bot | 965f44ba39ad816947e4bcba051a86fa1c5ee05c | [
"MIT"
] | 3 | 2020-05-01T10:59:59.000Z | 2022-01-18T01:40:34.000Z | minds/sections/__init__.py | Atherz97/minds-bot | 965f44ba39ad816947e4bcba051a86fa1c5ee05c | [
"MIT"
] | null | null | null | minds/sections/__init__.py | Atherz97/minds-bot | 965f44ba39ad816947e4bcba051a86fa1c5ee05c | [
"MIT"
] | null | null | null | from minds.sections.newsfeed import NewsfeedAPI
from minds.sections.channel import ChannelAPI
from minds.sections.notifications import NotificationsAPI
from minds.sections.posting import PostingAPI
from minds.sections.interact import InteractAPI
| 41 | 57 | 0.878049 |
ccc14d33e30db584c95153ccde86ea32ce7c48be | 8,331 | py | Python | wavelet_prosody_toolkit/wavelet_prosody_toolkit/cwt_global_spectrum.py | eugenemfu/TTS_HW | 34b3a32da2904578ddbd86bfd9529798cc3a1e9f | [
"BSD-3-Clause"
] | 115 | 2019-08-06T08:34:33.000Z | 2022-02-15T09:44:40.000Z | wavelet_prosody_toolkit/wavelet_prosody_toolkit/cwt_global_spectrum.py | eugenemfu/TTS_HW | 34b3a32da2904578ddbd86bfd9529798cc3a1e9f | [
"BSD-3-Clause"
] | 11 | 2019-08-13T15:27:07.000Z | 2022-03-28T15:59:39.000Z | wavelet_prosody_toolkit/wavelet_prosody_toolkit/cwt_global_spectrum.py | eugenemfu/TTS_HW | 34b3a32da2904578ddbd86bfd9529798cc3a1e9f | [
"BSD-3-Clause"
] | 32 | 2019-01-30T12:00:15.000Z | 2022-03-28T10:06:39.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
AUTHOR
- Antti Suni <antti.suni@helsinki.fi>
- Sbastien Le Maguer <lemagues@tcd.ie>
DESCRIPTION
usage: cwt_global_spectrum.py [-h] [-v] [-o OUTPUT]
[-P]
input_file
Tool for extracting global wavelet spectrum of speech envelope
introduced for second language fluency estimation in the following paper:
@inproceedings{suni2019characterizing,
title={Characterizing second language fluency with global wavelet spectrum},
author={Suni, Antti and Kallio, Heini and Benu{\v{s}}, {\v{S}}tefan and {\v{S}}imko, Juraj},
booktitle={International Congress of Phonetic Sciences},
pages={1947--1951},
year={2019},
organization={Australasian Speech Science and Technology Association Inc.}
}
positional arguments:
input_file Input signal or F0 file
optional arguments:
-h, --help show this help message and exit
-v, --verbosity increase output verbosity
-o OUTPUT, --output OUTPUT
output directory for analysis or filename for synthesis.
(Default: input_file directory [Analysis] or <input_file>.f0 [Synthesis])
-P, --plot Plot the results
You should be able to see peak around 4Hz, corresponding to syllable rate.
For longer speech files, lower frequency peaks related to phrasing should appear.
Synthetic test file with 8Hz, 4Hz and 1Hz components is included in sample directory.
LICENSE
See https://github.com/asuni/wavelet_prosody_toolkit/blob/master/LICENSE.txt
"""
# System/default
import sys
import os
# Arguments
import argparse
# Messaging/logging
import traceback
import time
import logging
# Math/plot
import numpy as np
import matplotlib.ticker
import matplotlib.pyplot as plt
# Libraries
from wavelet_prosody_toolkit.prosody_tools import cwt_utils as cwt_utils
from wavelet_prosody_toolkit.prosody_tools import misc as misc
from wavelet_prosody_toolkit.prosody_tools import energy_processing as energy_processing
###############################################################################
# global constants
###############################################################################
LEVEL = [logging.WARNING, logging.INFO, logging.DEBUG]
###############################################################################
# Functions
###############################################################################
def calc_global_spectrum(wav_file, period=5, n_scales=60, plot=False):
"""
"""
# Extract signal envelope, scale and normalize
(fs, waveform) = misc.read_wav(wav_file)
waveform = misc.resample(waveform, fs, 16000)
energy = energy_processing.extract_energy(waveform, min_freq=30, method="hilbert")
energy[energy<0] = 0
energy = np.cbrt(energy+0.1)
params = misc.normalize_std(energy)
# perform continous wavelet transform on envelope with morlet wavelet
# increase _period to get sharper spectrum
matrix, scales, freq = cwt_utils.cwt_analysis(params, first_freq = 16, num_scales = n_scales, scale_distance = 0.1,period=period, mother_name="Morlet",apply_coi=True)
# power, arbitrary scaling to prevent underflow
p_matrix = (abs(matrix)**2).astype('float32')*1000.0
power_spec = np.nanmean(p_matrix,axis=1)
if plot:
f, wave_pics = plt.subplots(1, 2, gridspec_kw = {'width_ratios':[5, 1]}, sharey=True)
f.subplots_adjust(hspace=10)
f.subplots_adjust(wspace=0)
wave_pics[0].set_ylim(0, n_scales)
wave_pics[0].set_xlabel("Time(m:s)")
wave_pics[0].set_ylabel("Frequency(Hz)")
wave_pics[1].set_xlabel("power")
wave_pics[1].tick_params(labelright=True)
fname = os.path.basename(wav_file)
title = "CWT Morlet(p="+str(period)+") global spectrum, "+ fname
wave_pics[0].contourf(p_matrix, 100)
wave_pics[0].set_title(title, loc="center")
wave_pics[0].plot(params*3, color="white",alpha=0.5)
freq_labels = [round(x,3)
if (np.isclose(x, round(x)) or
(x < 2 and np.isclose(x*100., round(x*100))) or
(x < 0.5 and np.isclose(x*10000., round(x*10000))))
else ""
for x in list(freq)]
wave_pics[0].set_yticks(np.linspace(0, len(freq_labels)-1, len(freq_labels)))
wave_pics[0].set_yticklabels(freq_labels)
formatter = matplotlib.ticker.FuncFormatter(lambda ms, x: time.strftime('%M:%S', time.gmtime(ms // 200)))
wave_pics[0].xaxis.set_major_formatter(formatter)
wave_pics[1].grid(axis="y")
wave_pics[1].plot(power_spec,np.linspace(0,len(power_spec), len(power_spec)),"-")
plt.show()
return (power_spec, freq)
###############################################################################
# Main function
###############################################################################
def main():
"""Main entry function
"""
global args
period = 5
n_scales = 60
# Compute the global spectrum
(power_spec, freq) = calc_global_spectrum(args.wav_file, period, n_scales, args.plot)
# save spectrum and associated frequencies for further processing
output_dir = os.path.dirname(args.wav_file)
if args.output_dir is not None:
output_dir = args.output_dir
os.makedirs(output_dir, exist_ok=True)
basename = os.path.join(output_dir, os.path.splitext(os.path.basename(args.wav_file))[0])
np.savetxt(basename+".spec.txt", power_spec, fmt="%.5f", newline= " ")
np.savetxt(basename+".freqs.txt", freq, fmt="%.5f", newline= " ")
###############################################################################
# Envelopping
###############################################################################
if __name__ == '__main__':
try:
parser = argparse.ArgumentParser(description="")
# Add options
parser.add_argument("-l", "--log_file", default=None,
help="Logger file")
parser.add_argument("-o", "--output_dir", default=None, type=str,
help="The output directory (if not defined, use the same directory than the wave file)")
parser.add_argument("-P", "--plot", default=False, action="store_true",
help="Plot the results")
parser.add_argument("-v", "--verbosity", action="count", default=0,
help="increase output verbosity")
# Add arguments
parser.add_argument("wav_file", help="The input wave file")
# Parsing arguments
args = parser.parse_args()
# create logger and formatter
logger = logging.getLogger()
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
# Verbose level => logging level
log_level = args.verbosity
if (args.verbosity >= len(LEVEL)):
log_level = len(LEVEL) - 1
logger.setLevel(log_level)
logging.warning("verbosity level is too high, I'm gonna assume you're taking the highest (%d)" % log_level)
else:
logger.setLevel(LEVEL[log_level])
# create console handler
ch = logging.StreamHandler()
ch.setFormatter(formatter)
logger.addHandler(ch)
# create file handler
if args.log_file is not None:
fh = logging.FileHandler(args.log_file)
logger.addHandler(fh)
# Debug time
start_time = time.time()
logger.info("start time = " + time.asctime())
# Running main function <=> run application
main()
# Debug time
logging.info("end time = " + time.asctime())
logging.info('TOTAL TIME IN MINUTES: %02.2f' %
((time.time() - start_time) / 60.0))
# Exit program
sys.exit(0)
except KeyboardInterrupt as e: # Ctrl-C
raise e
except SystemExit: # sys.exit()
pass
except Exception as e:
logging.error('ERROR, UNEXPECTED EXCEPTION')
logging.error(str(e))
traceback.print_exc(file=sys.stderr)
sys.exit(-1)
else:
print("usage: cwt_global_spectrum.py <audiofile>")
| 35.451064 | 171 | 0.591886 |
ccc1a90b2d04001d5b226461f9c2d2ad26294437 | 4,177 | py | Python | sdlc.py | phillipfurtado/mysearchengine | d28ad99b2729fba9df3f5f606c9aeafa2b257280 | [
"Apache-2.0"
] | null | null | null | sdlc.py | phillipfurtado/mysearchengine | d28ad99b2729fba9df3f5f606c9aeafa2b257280 | [
"Apache-2.0"
] | null | null | null | sdlc.py | phillipfurtado/mysearchengine | d28ad99b2729fba9df3f5f606c9aeafa2b257280 | [
"Apache-2.0"
] | null | null | null | import sys
import math
import operator
import re
from os import listdir
from os.path import isfile, join
from PIL import Image
IMAGE_WIDTH = 355
IMAGE_HEIGHT = 355
BORDER_DISCOUNT = 0.11
BLOCK_SIZE = 36
BLOCKS_SIZE = BLOCK_SIZE * BLOCK_SIZE
X_BLOCK_SIZE = IMAGE_WIDTH / BLOCK_SIZE;
Y_BLOCK_SIZE = IMAGE_HEIGHT / BLOCK_SIZE;
NORTH_POINT = IMAGE_HEIGHT * BORDER_DISCOUNT
WEST_POINT = IMAGE_WIDTH * BORDER_DISCOUNT
SOUTH_POINT = IMAGE_HEIGHT - WEST_POINT
EAST_POINT = IMAGE_WIDTH - WEST_POINT
HALF_WIDTH = IMAGE_WIDTH / 2
HALF_HEIGHT = IMAGE_HEIGHT / 2
if __name__ == '__main__':
main(sys.argv)
| 27.123377 | 142 | 0.627484 |
ccc2302b262199c91606683ac0c0ea01d97056a4 | 1,649 | py | Python | betahex/training/supervised.py | StarvingMarvin/betahex | 0626cf4d003e94423f34f3d83149702a5557ddb8 | [
"MIT"
] | 2 | 2019-03-17T07:09:14.000Z | 2020-05-04T17:40:51.000Z | betahex/training/supervised.py | StarvingMarvin/betahex | 0626cf4d003e94423f34f3d83149702a5557ddb8 | [
"MIT"
] | null | null | null | betahex/training/supervised.py | StarvingMarvin/betahex | 0626cf4d003e94423f34f3d83149702a5557ddb8 | [
"MIT"
] | null | null | null | import tensorflow as tf
from tensorflow.contrib import learn
from betahex.features import Features
from betahex.training.common import make_train_model, make_policy_input_fn, accuracy
from betahex.models import MODEL
tf.logging.set_verbosity(tf.logging.INFO)
if __name__ == '__main__':
tf.app.run()
| 25.765625 | 84 | 0.627653 |
ccc373e2e0f33ced1981ea3d8ae722d60f288cc2 | 9,747 | py | Python | MeterToTransPairingScripts.py | sandialabs/distribution-system-model-calibration | 55493cc03b8ebcc5a0f2e7d2ff9092cb2e608f90 | [
"BSD-3-Clause"
] | 1 | 2021-11-12T21:30:35.000Z | 2021-11-12T21:30:35.000Z | MeterTransformerPairing/MeterToTransPairingScripts.py | sandialabs/distribution-system-model-calibration | 55493cc03b8ebcc5a0f2e7d2ff9092cb2e608f90 | [
"BSD-3-Clause"
] | null | null | null | MeterTransformerPairing/MeterToTransPairingScripts.py | sandialabs/distribution-system-model-calibration | 55493cc03b8ebcc5a0f2e7d2ff9092cb2e608f90 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
"""
BSD 3-Clause License
Copyright 2021 National Technology & Engineering Solutions of Sandia, LLC (NTESS). Under the terms of Contract DE-NA0003525 with NTESS, the U.S. Government retains certain rights in this software.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
##############################################################################
# Import standard Python libraries
import sys
import numpy as np
#import datetime
#from copy import deepcopy
from pathlib import Path
import pandas as pd
# Import custom libraries
import M2TUtils
import M2TFuncs
###############################################################################
###############################################################################
# Input Data Notes
# custIDInput: list of str (customers) - the list of customer IDs as strings
# transLabelsTrue: numpy array of int (1,customers) - the transformer labels for each customer as integers. This is the ground truth transformer labels
# transLabelsErrors: numpy array of int (1,customers) - the transformer labels for each customer which may contain errors.
# In the sample data, customer_3 transformer was changed from 1 to 2 and customer_53 transformer was changed from 23 to 22
# voltageInput: numpy array of float (measurements,customers) - the raw voltage AMI measurements for each customer in Volts
# pDataInput: numpy array of float (measurements, customers) - the real power measurements for each customer in Watts
# qDataInput: numpy array of float (measurements, customers) - the reactive power measurements for each customer in VAr
# Note that the indexing of all variables above should match in the customer index, i.e. custIDInput[0], transLabelsInput[0,0], voltageInput[:,0], pDataInput[:,0], and qDataInput[:,0] should all be the same customer
###############################################################################
# Load Sample data
currentDirectory = Path.cwd()
filePath = Path(currentDirectory.parent,'SampleData')
filename = Path(filePath,'VoltageData_AMI.npy')
voltageInput = np.load(filename)
filename = Path(filePath,'RealPowerData_AMI.npy')
pDataInput = np.load(filename)
filename = Path(filePath,'ReactivePowerData_AMI.npy')
qDataInput = np.load(filename)
filename = Path(filePath,'TransformerLabelsTrue_AMI.npy')
transLabelsTrue = np.load(filename)
filename = Path(filePath,'TransformerLabelsErrors_AMI.npy')
transLabelsErrors = np.load(filename)
filename = Path(filePath,'CustomerIDs_AMI.npy')
custIDInput = list(np.load(filename))
###############################################################################
###############################################################################
# Data pre-processing
# Convert the raw voltage measurements into per unit and difference (delta voltage) representation
vPU = M2TUtils.ConvertToPerUnit_Voltage(voltageInput)
vDV = M2TUtils.CalcDeltaVoltage(vPU)
##############################################################################
#
# Error Flagging Section - Correlation Coefficient Analysis
# Calculate CC Matrix
ccMatrix,noVotesIndex,noVotesIDs = M2TUtils.CC_EnsMedian(vDV,windowSize=384,custID=custIDInput)
# The function CC_EnsMedian takes the median CC across windows in the dataset.
# This is mainly done to deal with the issue of missing measurements in the dataset
# If your data does not have missing measurements you could use numpy.corrcoef directly
# Do a sweep of possible CC Thresholds and rank the flagged results
notMemberVector = [0.25,0.26,0.27,0.28,0.29,0.30,0.31,0.32,0.33,0.34,0.35,0.36,0.37,0.38,0.39,0.4,0.41,0.42,0.43,0.44,0.45,0.46,0.47,0.48,0.49,0.50,0.51,0.52,0.53,0.54,0.55,0.56,0.57,0.58,0.59,0.60,0.61,0.62,0.63,0.64,0.65,0.66,0.67,0.68,0.69,0.70,0.71,0.72,0.73,0.74,0.75,0.76,0.78,0.79,0.80,0.81,0.82,0.83,0.84,0.85,0.86,0.87,0.88,0.90,0.91]
allFlaggedTrans, allNumFlagged, rankedFlaggedTrans, rankedTransThresholds = M2TFuncs.RankFlaggingBySweepingThreshold(transLabelsErrors,notMemberVector,ccMatrix)
# Plot the number of flagged transformers for all threshold values
M2TUtils.PlotNumFlaggedTrans_ThresholdSweep(notMemberVector,allNumFlagged,transLabelsErrors,savePath=-1)
# The main output from this Error Flagging section is rankedFlaggedTrans which
# contains the list of flagged transformers ranked by correlation coefficient.
# Transformers at the beginning of the list were flagged with lower CC, indicating
# higher confidence that those transformers do indeed have errors.
##############################################################################
#
# Transformer Assignment Section - Linear Regression Steps
#
# Calculate the pairwise linear regression
r2Affinity,rDist,xDist,regRDistIndiv,regXDistIndiv,mseMatrix = M2TUtils.ParamEst_LinearRegression(voltageInput,pDataInput,qDataInput)
additiveFactor = 0.02
minMSE, mseThreshold = M2TUtils.FindMinMSE(mseMatrix,additiveFactor)
#This sets the mse threshold based on adding a small amount to the smallest MSE value in the pairwise MSE matrix
# Alternatively you could set the mse threshold manually
#mseThreshold = 0.3
# Plot CDF for adjusted reactance distance
replacementValue = np.max(np.max(xDist))
xDistAdjusted = M2TFuncs.AdjustDistFromThreshold(mseMatrix,xDist,mseThreshold, replacementValue)
# Select a particular set of ranked results using a correlation coefficient threshold
notMemberThreshold=0.5
flaggingIndex = np.where(np.array(notMemberVector)==notMemberThreshold)[0][0]
flaggedTrans = allFlaggedTrans[flaggingIndex]
predictedTransLabels,allChangedIndices,allChangedOrgTrans,allChangedPredTrans = M2TFuncs.CorrectFlaggedTransErrors(flaggedTrans,transLabelsErrors,custIDInput,ccMatrix,notMemberThreshold, mseMatrix,xDistAdjusted,reactanceThreshold=0.046)
# predictedTransLabels: numpy array of int (1,customers) - the predicted labels
# for each customer. Positive labels will be unchanged from the original
# set of transformer labels. Negative labels will be new transformer groupings
# which should be the correct groups of customers served by a particular
# transformer but will require mapping back to a particular physical transformer.
# In the sample data customer_4 was injected with an incorrect label and should now be grouped with customer_5 and customer_6
# customer_53 was also injected with an incorrect label and should now be grouped with customer_54 and customer_55
print('Meter to Transformer Pairing Algorithm Results')
M2TUtils.PrettyPrintChangedCustomers(predictedTransLabels,transLabelsErrors,custIDInput)
# This function calculates two transformer level metrics of accuracy that we have been using
# incorrectTrans is a list of incorrect transformers where incorrect means customers added or omitted to the correct grouping
# This defines Transformer Accuracy, i.e. the number of correct transformers out of the total transformers
# incorrectPairedIDs lists the customers from incorrect trans which allows us to define
# Customer Pairing Accuracy which is the number of customers in the correct groupings, i.e. no customers added or omitted from the grouping
incorrectTrans,incorrectPairedIndices, incorrectPairedIDs= M2TUtils.CalcTransPredErrors(predictedTransLabels,transLabelsTrue,custIDInput,singleCustMarker=-999)
print('')
print('Ground Truth Results')
print('Transformers with incorrect groupings:')
print(incorrectTrans)
# In the sample data, these will be empty because all customers were correctly grouped together by their service transformer.
# Write output to a csv file
df = pd.DataFrame()
df['customer ID'] = custIDInput
df['Original Transformer Labels (with errors)'] = transLabelsErrors[0,:]
df['Predicted Transformer Labels'] = predictedTransLabels[0,:]
df['Actual Transformer Labels'] = transLabelsTrue[0,:]
df.to_csv('outputs_PredictedTransformerLabels.csv')
print('Predicted transformer labels written to outputs_PredictedTransformerLabels.csv')
df = pd.DataFrame()
df['Ranked Flagged Transformers'] = flaggedTrans
df.to_csv('outputs_RankedFlaggedTransformers.csv')
print('Flagged and ranked transformers written to outputs_RankedFlaggedTransformers.csv')
| 48.492537 | 344 | 0.733149 |
ccc51ab14667f22cd9b5eaba86b2724ac98a38f4 | 10,726 | py | Python | mcdc_tnt/pyk_kernels/all/advance.py | jpmorgan98/MCDC-TNT-2 | c437596097caa9af56df95213e7f64db38aac40e | [
"BSD-3-Clause"
] | 1 | 2022-02-26T02:12:12.000Z | 2022-02-26T02:12:12.000Z | mcdc_tnt/pyk_kernels/all/advance.py | jpmorgan98/MCDC-TNT-2 | c437596097caa9af56df95213e7f64db38aac40e | [
"BSD-3-Clause"
] | null | null | null | mcdc_tnt/pyk_kernels/all/advance.py | jpmorgan98/MCDC-TNT-2 | c437596097caa9af56df95213e7f64db38aac40e | [
"BSD-3-Clause"
] | 1 | 2022-02-09T22:39:42.000Z | 2022-02-09T22:39:42.000Z | import math
import numpy as np
import pykokkos as pk
#@pk.workunit
#def CellSum
# for i in range(num_parts)
#@profile
def Advance(p_pos_x, p_pos_y, p_pos_z, p_mesh_cell, dx, p_dir_y, p_dir_z, p_dir_x, p_speed, p_time,
num_part, mesh_total_xsec, mesh_dist_traveled, mesh_dist_traveled_squared, L):
max_mesh_index = int(len(mesh_total_xsec)-1)
p_end_trans: pk.View1D[int] = pk.View([num_part], int) #flag
p_end_trans.fill(0)
p_dist_travled: pk.View1D[pk.double] = pk.View([num_part], pk.double)
clever_out: pk.View1D[int] = pk.View([4], int)
end_flag = 0
cycle_count = 0
while end_flag == 0:
#allocate randoms
summer = 0
rands_np = np.random.random([num_part])
rands = pk.from_numpy(rands_np)
#vector of indicies for particle transport
p = pk.RangePolicy(pk.get_default_space(), 0, num_part)
p_dist_travled.fill(0)
pre_p_mesh = p_mesh_cell
L = float(L)
#space = pk.ExecutionSpace.OpenMP
pk.execute(pk.ExecutionSpace.OpenMP, Advance_cycle(num_part, p_pos_x, p_pos_y, p_pos_z, p_dir_y, p_dir_z, p_dir_x, p_mesh_cell, p_speed, p_time, dx, mesh_total_xsec, L, p_dist_travled, p_end_trans, rands))#pk for number still in transport
pk.execute(pk.ExecutionSpace.OpenMP,
DistTraveled(num_part, max_mesh_index, mesh_dist_traveled, mesh_dist_traveled_squared, p_dist_travled, pre_p_mesh, p_end_trans, clever_out))
end_flag = clever_out[0]
summer = clever_out[1]
#print(cycle_count)
if (cycle_count > int(1e3)):
print("************ERROR**********")
print(" Max itter hit")
print(p_end_trans)
print()
print()
return()
cycle_count += 1
print("Advance Complete:......{1}% ".format(cycle_count, int(100*summer/num_part)), end = "\r")
print()
"""
def test_Advance():
L = 1
dx = .25
N_m = 4
num_part = 6
p_pos_x = np.array([-.01, 0, .1544, .2257, .75, 1.1])
p_pos_y = 2.1*np.ones(num_part)
p_pos_z = 3.4*np.ones(num_part)
p_mesh_cell = np.array([-1, 0, 0, 1, 3, 4], dtype=int)
p_dir_x = np.ones(num_part)
p_dir_x[0] = -1
p_dir_y = np.zeros(num_part)
p_dir_z = np.zeros(num_part)
p_speed = np.ones(num_part)
p_time = np.zeros(num_part)
p_alive = np.ones(num_part, bool)
p_alive[5] = False
particle_speed = 1
mesh_total_xsec = np.array([0.1,1,.1,100])
mesh_dist_traveled_squared = np.zeros(N_m)
mesh_dist_traveled = np.zeros(N_m)
[p_pos_x, p_pos_y, p_pos_z, p_mesh_cell, p_dir_y, p_dir_z, p_dir_x, p_speed, p_time, mesh_dist_traveled, mesh_dist_traveled_squared] = Advance(p_pos_x, p_pos_y, p_pos_z, p_mesh_cell, dx, p_dir_y, p_dir_z, p_dir_x, p_speed, p_time, num_part, mesh_total_xsec, mesh_dist_traveled, mesh_dist_traveled_squared, L)
assert (np.sum(mesh_dist_traveled) > 0)
assert (np.sum(mesh_dist_traveled_squared) > 0)
assert (p_pos_x[0] == -.01)
assert (p_pos_x[5] == 1.1)
assert (p_pos_x[1:4].all() > .75)
"""
if __name__ == '__main__':
speedTestAdvance()
| 35.516556 | 312 | 0.591274 |
ccc5fd6159d96d352e343e6b001e8aa4a53ede03 | 514 | py | Python | DGF/auth/_authenticator.py | LonguCodes/DGF | bd344eff34cbe6438f71631e2cc103f1c4584e09 | [
"MIT"
] | null | null | null | DGF/auth/_authenticator.py | LonguCodes/DGF | bd344eff34cbe6438f71631e2cc103f1c4584e09 | [
"MIT"
] | null | null | null | DGF/auth/_authenticator.py | LonguCodes/DGF | bd344eff34cbe6438f71631e2cc103f1c4584e09 | [
"MIT"
] | null | null | null | from django.conf import settings
from ..defaults import AUTHENTICATORS
from ..utils import get_module
authenticators = []
all_args = []
| 34.266667 | 110 | 0.774319 |
ccc6026c8a78ded0d003eb1ba605983fe1d65590 | 1,123 | py | Python | facerec/face_util.py | seanbenhur/sih2020 | f8b5988425185ed3c85872b98b622932895f932b | [
"MIT"
] | null | null | null | facerec/face_util.py | seanbenhur/sih2020 | f8b5988425185ed3c85872b98b622932895f932b | [
"MIT"
] | null | null | null | facerec/face_util.py | seanbenhur/sih2020 | f8b5988425185ed3c85872b98b622932895f932b | [
"MIT"
] | null | null | null | import face_recognition as fr
def compare_faces(file1, file2):
"""
Compare two images and return True / False for matching.
"""
# Load the jpg files into numpy arrays
image1 = fr.load_image_file(file1)
image2 = fr.load_image_file(file2)
# Get the face encodings for each face in each image file
# Assume there is only 1 face in each image, so get 1st face of an image.
image1_encoding = fr.face_encodings(image1)[0]
image2_encoding = fr.face_encodings(image2)[0]
# results is an array of True/False telling if the unknown face matched anyone in the known_faces array
results = fr.compare_faces([image1_encoding], image2_encoding)
return results[0]
# Each face is tuple of (Name,sample image)
known_faces = [('Stark','sample_images/stark.jpg'),
('Hannah','sample_images/hannah.jpg'),
]
def face_rec(file):
"""
Return name for a known face, otherwise return 'Uknown'.
"""
for name, known_file in known_faces:
if compare_faces(known_file,file):
return name
return 'Unknown'
| 33.029412 | 107 | 0.664292 |
ccc6e968fc2455af2569e67afae509ff5c1e5fc0 | 3,354 | py | Python | src/correlation/dataset_correlation.py | sakdag/crime-data-analysis | 9c95238c6aaf1394f68be59e26e8c6d75f669d7e | [
"MIT"
] | null | null | null | src/correlation/dataset_correlation.py | sakdag/crime-data-analysis | 9c95238c6aaf1394f68be59e26e8c6d75f669d7e | [
"MIT"
] | null | null | null | src/correlation/dataset_correlation.py | sakdag/crime-data-analysis | 9c95238c6aaf1394f68be59e26e8c6d75f669d7e | [
"MIT"
] | null | null | null | import numpy as np
import pandas as pd
import pyproj as pyproj
import shapely as shapely
from shapely.geometry import Point
from haversine import haversine
import src.config.column_names as col_names
# Finds nearest zipcode geolocation to the crime location, then adds this zipcode to the crime dataset
| 46.583333 | 129 | 0.707812 |
cccac18c6d9e0d8ce330a9adb50aea424e46871f | 197 | py | Python | Function/xargs.py | tusharxoxoxo/Python-practice | 2d75750029655c65271fd51a8d175285719a62e6 | [
"MIT"
] | null | null | null | Function/xargs.py | tusharxoxoxo/Python-practice | 2d75750029655c65271fd51a8d175285719a62e6 | [
"MIT"
] | null | null | null | Function/xargs.py | tusharxoxoxo/Python-practice | 2d75750029655c65271fd51a8d175285719a62e6 | [
"MIT"
] | null | null | null |
increment(3,4,5,6)
print(dance(1,2,3,4,5,6))
| 13.133333 | 26 | 0.614213 |
cccd71a65c5bf63a13dfaa2062a6d5203e62b9ef | 2,386 | py | Python | blog/resources/tag.py | espstan/fBlog | 7c63d117a3bbae3da80b3e8d7f731ae89036eb0d | [
"MIT"
] | 2 | 2019-06-17T13:55:36.000Z | 2019-06-19T22:40:06.000Z | blog/resources/tag.py | espstan/fBlog | 7c63d117a3bbae3da80b3e8d7f731ae89036eb0d | [
"MIT"
] | 35 | 2019-06-17T07:00:49.000Z | 2020-02-17T09:41:53.000Z | blog/resources/tag.py | espstan/fBlog | 7c63d117a3bbae3da80b3e8d7f731ae89036eb0d | [
"MIT"
] | null | null | null | from flask_restful import Resource
from flask_restful import reqparse
from sqlalchemy.exc import SQLAlchemyError
from config import Configuration
from models.tag import TagModel as TM
| 31.394737 | 106 | 0.55658 |
ccd003a4c9bdfc241fd64f093ae2afa4fde92196 | 2,198 | py | Python | Torrents/migrations/0001_initial.py | tabish-ali/torrent_site | 58e2e7f943a28b16134b26f450b54e9a066e7114 | [
"MIT"
] | 1 | 2019-12-17T07:36:46.000Z | 2019-12-17T07:36:46.000Z | Torrents/migrations/0001_initial.py | tabish-ali/torrent_site | 58e2e7f943a28b16134b26f450b54e9a066e7114 | [
"MIT"
] | null | null | null | Torrents/migrations/0001_initial.py | tabish-ali/torrent_site | 58e2e7f943a28b16134b26f450b54e9a066e7114 | [
"MIT"
] | null | null | null | # Generated by Django 3.0.7 on 2020-06-06 16:06
from django.db import migrations, models
| 41.471698 | 114 | 0.560055 |
ccd0e580fc925afb48c3d9bac3c4c4c1b73ecf0c | 5,493 | py | Python | Agent.py | 594zyc/CMCC_DialogSystem | 9d85f4c319677bf5e682a562041b908fc3135a17 | [
"Apache-2.0"
] | 1 | 2019-11-20T16:36:30.000Z | 2019-11-20T16:36:30.000Z | Agent.py | 594zyc/CMCC_DialogSystem | 9d85f4c319677bf5e682a562041b908fc3135a17 | [
"Apache-2.0"
] | null | null | null | Agent.py | 594zyc/CMCC_DialogSystem | 9d85f4c319677bf5e682a562041b908fc3135a17 | [
"Apache-2.0"
] | null | null | null | """
Manager,text-in text-out agent
"""
import os
import sys
import time
import argparse
import logging
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.join(BASE_DIR, '../..'))
os.environ["CUDA_VISIBLE_DEVICES"]="1"
from DM.DST.StateTracking import DialogStateTracker
from DM.policy.RuleMapping import RulePolicy
from data.DataManager import DataManager
from NLU.NLUManager import NLUManager
from NLG.NLGManager import rule_based_NLG
parser = argparse.ArgumentParser()
parser.add_argument('-p', '--print', type=bool, default=True, help='print details')
FLAGS= parser.parse_args()
UserPersonal = {
"": ["180", "184G"], # dict
"": " 11.10 GB 0 110.20 247.29 ",
"": "18811369685",
"" : "",
"": "",
"": "",
"": " 110.20 ",
"": " 11.10 GB",
"": " 2017-04-04 2017-05-01",
"": "",
"": " APP ",
"": " APP ",
"": " APP "
}
NLU_save_path_dict = {
'domain': os.path.join(BASE_DIR, 'NLU/DomDect/model/ckpt'),
'useract': os.path.join(BASE_DIR, 'NLU/UserAct/model/ckpt'),
'slotfilling': os.path.join(BASE_DIR, 'NLU/SlotFilling/model/ckpt'),
'entity': os.path.join(BASE_DIR, 'NLU/ER/entity_list.txt'),
'sentiment': os.path.join(BASE_DIR, 'NLU/SentiDect')
}
if __name__ == '__main__':
agent = DialogAgent()
agent.run()
| 34.54717 | 97 | 0.54178 |
ccd2091aa554752b80be765a8333a7d8212199e1 | 1,712 | py | Python | clu/constants/exceptions.py | fish2000/CLU | 80bc2df5f001b5639d79ba979e19ec77a9931425 | [
"BSD-3-Clause"
] | 1 | 2019-07-02T08:17:59.000Z | 2019-07-02T08:17:59.000Z | clu/constants/exceptions.py | fish2000/CLU | 80bc2df5f001b5639d79ba979e19ec77a9931425 | [
"BSD-3-Clause"
] | 13 | 2019-12-17T02:28:30.000Z | 2021-11-17T03:46:10.000Z | clu/constants/exceptions.py | fish2000/CLU | 80bc2df5f001b5639d79ba979e19ec77a9931425 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import print_function
""" CLUs Custom exception classes live here """
__all__ = ('BadDotpathWarning',
'CDBError',
'ConfigurationError',
'ExecutionError', 'FilesystemError',
'ExportError', 'ExportWarning',
'KeyValueError',
'Nondeterminism',
'UnusedValueWarning')
__dir__ = lambda: list(__all__) | 28.065574 | 74 | 0.672313 |
ccd4b8a090334276bc85ce2b13557564dea0406a | 231 | py | Python | setup.py | seakarki/vehicle_accident_prediction | a921015104865dc1e4901fb1581f7822bef75f06 | [
"FTL"
] | null | null | null | setup.py | seakarki/vehicle_accident_prediction | a921015104865dc1e4901fb1581f7822bef75f06 | [
"FTL"
] | null | null | null | setup.py | seakarki/vehicle_accident_prediction | a921015104865dc1e4901fb1581f7822bef75f06 | [
"FTL"
] | null | null | null | from setuptools import find_packages, setup
setup(
name='src',
packages=find_packages(),
version='0.1.0',
description='Prediction of likely occurance of Vehicle Accident',
author='sea karki',
license='',
)
| 21 | 69 | 0.679654 |
ccd5ca4d81ce2bd9480bed7ee3a94756e7ada584 | 6,543 | py | Python | utils/revOrm.py | NSLS-II/aphla | ceb5410dc836a8fb16321b6dc5e10d442be765c5 | [
"BSD-3-Clause"
] | null | null | null | utils/revOrm.py | NSLS-II/aphla | ceb5410dc836a8fb16321b6dc5e10d442be765c5 | [
"BSD-3-Clause"
] | 1 | 2020-02-17T18:56:18.000Z | 2020-02-20T17:06:20.000Z | utils/revOrm.py | NSLS-II/aphla | ceb5410dc836a8fb16321b6dc5e10d442be765c5 | [
"BSD-3-Clause"
] | 1 | 2021-03-08T16:07:11.000Z | 2021-03-08T16:07:11.000Z | #!/usr/bin/env python
import sys, time, os
sys.path.append('../src')
import numpy as np
import matplotlib.pylab as plt
import hla
from hla import caget, caput
if __name__ == "__main__":
#filter_orm('../test/dat/orm-full-0179.pkl')
#filter_orm('../test/dat/orm-full-0181.pkl')
#filter_orm('../test/dat/orm-full.pkl')
#filter_orm('../test/dat/orm-sub1.pkl')
#mask_orm('../test/dat/orm-full-0179.pkl', [52, 90, 141, 226, 317, 413])
#merge_orm('../test/dat/orm-full-0179.pkl',
# '../test/dat/orm-full-0181.pkl')
#merge_orm('../test/dat/orm-full-0181.pkl', 'orm.pkl')
#test_orbit('../test/dat/orm-full-0181.pkl')
#update_orm('../test/dat/orm-full-0184.pkl')
#update_orm('orm-full-update.pkl')
correct_orbit('orm-full-update.pkl')
| 31.917073 | 197 | 0.52346 |
ae9f5ca026e9179a91eee39f486434f7618b47ec | 1,698 | py | Python | Object Detection and Tracking HSV color space.py | shivtejshete/Advanced_Image_Processing | b5a7ef94a44ab0b3bd9fa4a70d843099af70079e | [
"MIT"
] | null | null | null | Object Detection and Tracking HSV color space.py | shivtejshete/Advanced_Image_Processing | b5a7ef94a44ab0b3bd9fa4a70d843099af70079e | [
"MIT"
] | null | null | null | Object Detection and Tracking HSV color space.py | shivtejshete/Advanced_Image_Processing | b5a7ef94a44ab0b3bd9fa4a70d843099af70079e | [
"MIT"
] | null | null | null | import numpy as np
import cv2
#sample function - dummy callback function
#capture live video
cap = cv2.VideoCapture(0)
#window for trackbars
cv2.namedWindow('Track')
#defining trackbars to control HSV values of given video stream
cv2.createTrackbar('L_HUE', 'Track', 0, 255, nothing)
cv2.createTrackbar('L_Sat', 'Track', 0, 255, nothing)
cv2.createTrackbar('L_Val', 'Track', 0, 255, nothing)
cv2.createTrackbar('H_HUE', 'Track', 255, 255, nothing)
cv2.createTrackbar('H_Sat', 'Track', 255, 255, nothing)
cv2.createTrackbar('H_Val', 'Track', 255, 255, nothing)
while cap.isOpened()==True :
#read the video feed
_, frame = cap.read()
cv2.imshow('Actual_Feed', frame)
#get current trackbar positions for every frame
l_hue = cv2.getTrackbarPos('L_HUE', 'Track')
l_sat = cv2.getTrackbarPos('L_Sat', 'Track')
l_val = cv2.getTrackbarPos('L_Val', 'Track')
h_hue = cv2.getTrackbarPos('H_HUE', 'Track')
h_sat = cv2.getTrackbarPos('H_Sat', 'Track')
h_val = cv2.getTrackbarPos('H_Val', 'Track')
#convert the captured frame into HSV
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV) #Hue(0), Saturation(1) and Value(2)
# print(hsv.shape)
#trim video feed HSV to a range
lower_bound = np.array([l_hue, l_sat, l_val])
upper_bound = np.array([h_hue, h_sat, h_val])
mask = cv2.inRange(hsv, lower_bound,upper_bound )
frame = cv2.bitwise_and(frame, frame,mask=mask)
cv2.imshow('frame', frame)
cv2.imshow('mask', mask)
# cv2.imshow('converted', hsv)
key = cv2.waitKey(1)
if key==27:
break
cap.release()
cv2.destroyAllWindows() | 29.789474 | 86 | 0.6596 |
ae9fdc83392f9acd375ec6b842e275792831330a | 15,934 | py | Python | tools/run_tests/xds_k8s_test_driver/tests/url_map/fault_injection_test.py | echo80313/grpc | 93cdc8b77e7b3fe4a3afec1c9c7e29b3f02ec3cf | [
"Apache-2.0"
] | null | null | null | tools/run_tests/xds_k8s_test_driver/tests/url_map/fault_injection_test.py | echo80313/grpc | 93cdc8b77e7b3fe4a3afec1c9c7e29b3f02ec3cf | [
"Apache-2.0"
] | 4 | 2022-02-27T18:59:37.000Z | 2022-02-27T18:59:53.000Z | tools/run_tests/xds_k8s_test_driver/tests/url_map/fault_injection_test.py | echo80313/grpc | 93cdc8b77e7b3fe4a3afec1c9c7e29b3f02ec3cf | [
"Apache-2.0"
] | null | null | null | # Copyright 2021 The gRPC Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import time
from typing import Tuple
from absl import flags
from absl.testing import absltest
import grpc
from framework import xds_url_map_testcase
from framework.test_app import client_app
# Type aliases
HostRule = xds_url_map_testcase.HostRule
PathMatcher = xds_url_map_testcase.PathMatcher
GcpResourceManager = xds_url_map_testcase.GcpResourceManager
DumpedXdsConfig = xds_url_map_testcase.DumpedXdsConfig
RpcTypeUnaryCall = xds_url_map_testcase.RpcTypeUnaryCall
RpcTypeEmptyCall = xds_url_map_testcase.RpcTypeEmptyCall
XdsTestClient = client_app.XdsTestClient
ExpectedResult = xds_url_map_testcase.ExpectedResult
logger = logging.getLogger(__name__)
flags.adopt_module_key_flags(xds_url_map_testcase)
# The first batch of RPCs don't count towards the result of test case. They are
# meant to prove the communication between driver and client is fine.
_NUM_RPCS = 10
_LENGTH_OF_RPC_SENDING_SEC = 16
# We are using sleep to synchronize test driver and the client... Even though
# the client is sending at QPS rate, we can't assert that exactly QPS *
# SLEEP_DURATION number of RPC is finished. The final completed RPC might be
# slightly more or less.
_NON_RANDOM_ERROR_TOLERANCE = 0.01
# For random generator involved test cases, we want to be more loose about the
# final result. Otherwise, we will need more test duration (sleep duration) and
# more accurate communication mechanism. The accurate of random number
# generation is not the intention of this test.
_ERROR_TOLERANCE = 0.2
_DELAY_CASE_APPLICATION_TIMEOUT_SEC = 1
_BACKLOG_WAIT_TIME_SEC = 20
def _wait_until_backlog_cleared(test_client: XdsTestClient,
timeout: int = _BACKLOG_WAIT_TIME_SEC):
""" Wait until the completed RPC is close to started RPC.
For delay injected test cases, there might be a backlog of RPCs due to slow
initialization of the client. E.g., if initialization took 20s and qps is
25, then there will be a backlog of 500 RPCs. In normal test cases, this is
fine, because RPCs will fail immediately. But for delay injected test cases,
the RPC might linger much longer and affect the stability of test results.
"""
logger.info('Waiting for RPC backlog to clear for %d seconds', timeout)
deadline = time.time() + timeout
while time.time() < deadline:
stats = test_client.get_load_balancer_accumulated_stats()
ok = True
for rpc_type in [RpcTypeUnaryCall, RpcTypeEmptyCall]:
started = stats.num_rpcs_started_by_method.get(rpc_type, 0)
completed = stats.num_rpcs_succeeded_by_method.get(
rpc_type, 0) + stats.num_rpcs_failed_by_method.get(rpc_type, 0)
# We consider the backlog is healthy, if the diff between started
# RPCs and completed RPCs is less than 1.5 QPS.
if abs(started - completed) > xds_url_map_testcase.QPS.value * 1.1:
logger.info(
'RPC backlog exist: rpc_type=%s started=%s completed=%s',
rpc_type, started, completed)
time.sleep(_DELAY_CASE_APPLICATION_TIMEOUT_SEC)
ok = False
else:
logger.info(
'RPC backlog clear: rpc_type=%s started=%s completed=%s',
rpc_type, started, completed)
if ok:
# Both backlog of both types of RPCs is clear, success, return.
return
raise RuntimeError('failed to clear RPC backlog in %s seconds' % timeout)
def rpc_distribution_validate(self, test_client: XdsTestClient):
self.assertRpcStatusCode(test_client,
expected=(ExpectedResult(
rpc_type=RpcTypeEmptyCall,
status_code=grpc.StatusCode.OK,
ratio=1),),
length=_LENGTH_OF_RPC_SENDING_SEC,
tolerance=_NON_RANDOM_ERROR_TOLERANCE)
if __name__ == '__main__':
absltest.main()
| 43.654795 | 80 | 0.622756 |
ae9ff35726e01cb8b94b132adc55d816f0af6cd8 | 2,008 | py | Python | funda/spiders/FundaListings.py | royzwambag/funda-scraper | c288ae3521731d1fa12fbde7acc10d725048603a | [
"MIT"
] | 1 | 2021-02-21T22:42:43.000Z | 2021-02-21T22:42:43.000Z | funda/spiders/FundaListings.py | royzwambag/funda-scraper | c288ae3521731d1fa12fbde7acc10d725048603a | [
"MIT"
] | null | null | null | funda/spiders/FundaListings.py | royzwambag/funda-scraper | c288ae3521731d1fa12fbde7acc10d725048603a | [
"MIT"
] | 1 | 2021-12-01T12:18:54.000Z | 2021-12-01T12:18:54.000Z | import scrapy
from ..items import BasicHouseItem
from ..utils import generate_url
| 44.622222 | 125 | 0.580677 |
aea00ef15d75bbb6792d81d979d452ecd7795555 | 754 | py | Python | wfdb/__init__.py | melbourne-cdth/wfdb-python | a36c22e12f8417ff18e57dbe54b7180dd183ec66 | [
"MIT"
] | null | null | null | wfdb/__init__.py | melbourne-cdth/wfdb-python | a36c22e12f8417ff18e57dbe54b7180dd183ec66 | [
"MIT"
] | null | null | null | wfdb/__init__.py | melbourne-cdth/wfdb-python | a36c22e12f8417ff18e57dbe54b7180dd183ec66 | [
"MIT"
] | null | null | null | from wfdb.io.record import (Record, MultiRecord, rdheader, rdrecord, rdsamp,
wrsamp, dl_database, edf2mit, mit2edf, wav2mit,
mit2wav, wfdb2mat, csv2mit, sampfreq, signame,
wfdbdesc, wfdbtime, sigavg)
from wfdb.io.annotation import (Annotation, rdann, wrann, show_ann_labels,
show_ann_classes, ann2rr, rr2ann, csv2ann,
rdedfann, mrgann)
from wfdb.io.download import get_dbs, get_record_list, dl_files, set_db_index_url
from wfdb.plot.plot import plot_items, plot_wfdb, plot_all_records
from wfdb.plot.plot_plotly import plot_items_pl, plot_wfdb_pl, plot_all_records_pl
from wfdb.version import __version__
| 58 | 82 | 0.66313 |
aea011bdabd2351b652997696862bc1051c59d81 | 647 | py | Python | reports/migrations/0103_auto_20190224_1000.py | CMU-TRP/podd-api | 6eb5c4598f848f75d131287163cd9babf2a0a0fc | [
"MIT"
] | 3 | 2020-04-26T06:28:50.000Z | 2021-04-05T08:02:26.000Z | reports/migrations/0103_auto_20190224_1000.py | CMU-TRP/podd-api | 6eb5c4598f848f75d131287163cd9babf2a0a0fc | [
"MIT"
] | 10 | 2020-06-05T17:36:10.000Z | 2022-03-11T23:16:42.000Z | reports/migrations/0103_auto_20190224_1000.py | CMU-TRP/podd-api | 6eb5c4598f848f75d131287163cd9babf2a0a0fc | [
"MIT"
] | 5 | 2021-04-08T08:43:49.000Z | 2021-11-27T06:36:46.000Z | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
| 23.962963 | 50 | 0.591963 |
aea0f505ac0ae8958de0fe2ef7a2f11f420d2940 | 936 | py | Python | CodeFights2.py | sassy224/challenges | 3e4c9c6fe19351f2e47408967bf1bd3f4ca63bb7 | [
"MIT"
] | null | null | null | CodeFights2.py | sassy224/challenges | 3e4c9c6fe19351f2e47408967bf1bd3f4ca63bb7 | [
"MIT"
] | null | null | null | CodeFights2.py | sassy224/challenges | 3e4c9c6fe19351f2e47408967bf1bd3f4ca63bb7 | [
"MIT"
] | null | null | null | """
You are given a digital number written down on a sheet of paper.
Your task is to figure out if you rotate the given sheet of paper by 180 degrees would the number still look exactly the same.
input: "1"
output: false
input: "29562"
output: true
input: "77"
output: false
"""
def digital_number(number):
"""
>>> digital_number('1')
False
>>> digital_number('29562')
True
>>> digital_number('77')
False
>>> digital_number('000')
True
The trick here is you need to understand what rotation is:
https://en.wikipedia.org/wiki/Seven-segment_display_character_representations
In short:
0 -> 0
2 -> 2
5 -> 5
6 -> 9
8 -> 8
9 -> 6
Other numbers can't be rotated into another number
"""
rule = {'0':'0', '2':'2', '5':'5', '6':'9', '8':'8', '9':'6'}
new_str = [rule.get(digit, "x") for digit in number ]
return number == "".join(new_str[::-1])
if __name__ == "__main__":
import doctest
doctest.testmod() | 21.767442 | 126 | 0.654915 |
aea133a870e185a7ab120fb1bfa33b1fd3b07e6f | 5,196 | py | Python | AKSDataOpsDemo/scripts/evaluate.py | cloudmelon/aks-severless | ade3b6a110444c09bc4c0ff44cb232cb09ddebe7 | [
"MIT"
] | 6 | 2019-12-06T22:55:41.000Z | 2019-12-10T23:57:40.000Z | AKSDataOpsDemo/scripts/evaluate.py | cloudmelon/aks-severless | ade3b6a110444c09bc4c0ff44cb232cb09ddebe7 | [
"MIT"
] | null | null | null | AKSDataOpsDemo/scripts/evaluate.py | cloudmelon/aks-severless | ade3b6a110444c09bc4c0ff44cb232cb09ddebe7 | [
"MIT"
] | 2 | 2019-12-26T16:25:44.000Z | 2020-09-02T22:43:41.000Z | import argparse
import os, json, sys
import azureml.core
from azureml.core import Workspace
from azureml.core import Experiment
from azureml.core.model import Model
import azureml.core
from azureml.core import Run
from azureml.core.webservice import AciWebservice, Webservice
from azureml.core.conda_dependencies import CondaDependencies
from azureml.core.image import ContainerImage
from azureml.core import Image
print("In evaluate.py")
parser = argparse.ArgumentParser("evaluate")
parser.add_argument("--model_name", type=str, help="model name", dest="model_name", required=True)
parser.add_argument("--image_name", type=str, help="image name", dest="image_name", required=True)
parser.add_argument("--output", type=str, help="eval output directory", dest="output", required=True)
args = parser.parse_args()
print("Argument 1: %s" % args.model_name)
print("Argument 2: %s" % args.image_name)
print("Argument 3: %s" % args.output)
run = Run.get_context()
ws = run.experiment.workspace
print('Workspace configuration succeeded')
model_list = Model.list(ws, name = args.model_name)
latest_model = sorted(model_list, reverse=True, key = lambda x: x.created_time)[0]
latest_model_id = latest_model.id
latest_model_name = latest_model.name
latest_model_version = latest_model.version
latest_model_path = latest_model.get_model_path(latest_model_name, _workspace=ws)
print('Latest model id: ', latest_model_id)
print('Latest model name: ', latest_model_name)
print('Latest model version: ', latest_model_version)
print('Latest model path: ', latest_model_path)
latest_model_run_id = latest_model.tags.get("run_id")
print('Latest model run id: ', latest_model_run_id)
latest_model_run = Run(run.experiment, run_id = latest_model_run_id)
latest_model_accuracy = latest_model_run.get_metrics().get("acc")
print('Latest model accuracy: ', latest_model_accuracy)
ws_list = Webservice.list(ws, model_name = latest_model_name)
print('webservice list')
print(ws_list)
deploy_model = False
current_model = None
if(len(ws_list) > 0):
webservice = ws_list[0]
try:
image_id = webservice.tags['image_id']
image = Image(ws, id = image_id)
current_model = image.models[0]
print('Found current deployed model!')
except:
deploy_model = True
print('Image id tag not found!')
else:
deploy_model = True
print('No deployed webservice for model: ', latest_model_name)
current_model_accuracy = -1 # undefined
if current_model != None:
current_model_run = Run(run.experiment, run_id = current_model.tags.get("run_id"))
current_model_accuracy = current_model_run.get_metrics().get("acc")
print('accuracies')
print(latest_model_accuracy, current_model_accuracy)
if latest_model_accuracy > current_model_accuracy:
deploy_model = True
print('Current model performs better and will be deployed!')
else:
print('Current model does NOT perform better and thus will NOT be deployed!')
eval_info = {}
eval_info["model_name"] = latest_model_name
eval_info["model_version"] = latest_model_version
eval_info["model_path"] = latest_model_path
eval_info["model_acc"] = latest_model_accuracy
eval_info["deployed_model_acc"] = current_model_accuracy
eval_info["deploy_model"] = deploy_model
eval_info["image_name"] = args.image_name
eval_info["image_id"] = ""
os.makedirs(args.output, exist_ok=True)
eval_filepath = os.path.join(args.output, 'eval_info.json')
if deploy_model == False:
with open(eval_filepath, "w") as f:
json.dump(eval_info, f)
print('eval_info.json saved')
print('Model did not meet the accuracy criteria and will not be deployed!')
print('Exiting')
sys.exit(0)
# Continue to package Model and create image
print('Model accuracy has met the criteria!')
print('Proceeding to package model and create the image...')
print('Updating scoring file with the correct model name')
with open('score.py') as f:
data = f.read()
with open('score_fixed.py', "w") as f:
f.write(data.replace('MODEL-NAME', args.model_name)) #replace the placeholder MODEL-NAME
print('score_fixed.py saved')
# create a Conda dependencies environment file
print("Creating conda dependencies file locally...")
conda_packages = ['numpy']
pip_packages = ['tensorflow==1.12.2', 'keras==2.2.4', 'azureml-sdk', 'azureml-monitoring']
mycondaenv = CondaDependencies.create(conda_packages=conda_packages, pip_packages=pip_packages)
conda_file = 'scoring_dependencies.yml'
with open(conda_file, 'w') as f:
f.write(mycondaenv.serialize_to_string())
# create container image configuration
print("Creating container image configuration...")
image_config = ContainerImage.image_configuration(execution_script = 'score_fixed.py',
runtime = 'python', conda_file = conda_file)
print("Creating image...")
image = Image.create(name=args.image_name, models=[latest_model], image_config=image_config, workspace=ws)
# wait for image creation to finish
image.wait_for_creation(show_output=True)
eval_info["image_id"] = image.id
with open(eval_filepath, "w") as f:
json.dump(eval_info, f)
print('eval_info.json saved')
| 33.960784 | 106 | 0.746151 |
aea32992cb3ebb2d5d93b2c9d53e7b2172d6b5d0 | 2,048 | py | Python | tools/instruction.py | mzins/MIPS-CPU-SIM | 0f6723c668266447035c5010c67abdd041324d1a | [
"MIT"
] | null | null | null | tools/instruction.py | mzins/MIPS-CPU-SIM | 0f6723c668266447035c5010c67abdd041324d1a | [
"MIT"
] | null | null | null | tools/instruction.py | mzins/MIPS-CPU-SIM | 0f6723c668266447035c5010c67abdd041324d1a | [
"MIT"
] | null | null | null | from tools import instruction_helpers
from tools.errors import InstructionNotFound
from logs.logconfig import log_config
LOG = log_config()
| 37.236364 | 100 | 0.657715 |
aea38d32840ca9cfc7b545135134ea487ccba8a9 | 19,377 | py | Python | projects/views.py | msherman64/portal | e5399ef2ed3051d7c9a46c660f028c666ae22ca6 | [
"Apache-2.0"
] | null | null | null | projects/views.py | msherman64/portal | e5399ef2ed3051d7c9a46c660f028c666ae22ca6 | [
"Apache-2.0"
] | null | null | null | projects/views.py | msherman64/portal | e5399ef2ed3051d7c9a46c660f028c666ae22ca6 | [
"Apache-2.0"
] | null | null | null | from django.shortcuts import render
from django.contrib.auth.decorators import login_required
from chameleon.decorators import terms_required
from django.contrib import messages
from django.http import (
Http404,
HttpResponseForbidden,
HttpResponse,
HttpResponseRedirect,
HttpResponseNotAllowed,
JsonResponse,
)
from django.core.urlresolvers import reverse
from django.core.exceptions import PermissionDenied
from django import forms
from datetime import datetime
from django.conf import settings
from .models import Project, ProjectExtras
from projects.serializer import ProjectExtrasJSONSerializer
from django.contrib.auth.models import User
from django.views.decorators.http import require_POST
from .forms import (
ProjectCreateForm,
ProjectAddUserForm,
AllocationCreateForm,
EditNicknameForm,
AddBibtexPublicationForm,
)
from django.db import IntegrityError
import re
import logging
import json
from keystoneclient.v3 import client as ks_client
from keystoneauth1 import adapter
from django.conf import settings
import uuid
import sys
from chameleon.keystone_auth import admin_ks_client, sync_projects, get_user
from util.project_allocation_mapper import ProjectAllocationMapper
logger = logging.getLogger("projects")
def set_ks_project_nickname(chargeCode, nickname):
for region in list(settings.OPENSTACK_AUTH_REGIONS.keys()):
ks_admin = admin_ks_client(region=region)
project_list = ks_admin.projects.list(domain=ks_admin.user_domain_id)
project = [
this
for this in project_list
if getattr(this, "charge_code", None) == chargeCode
]
logger.info(
"Assigning nickname {0} to project with charge code {1} at {2}".format(
nickname, chargeCode, region
)
)
if project and project[0]:
project = project[0]
ks_admin.projects.update(project, name=nickname)
logger.info(
"Successfully assigned nickname {0} to project with charge code {1} at {2}".format(
nickname, chargeCode, region
)
)
def sync_project_memberships(request, username):
"""Re-sync a user's Keystone project memberships.
This calls utils.auth.keystone_auth.sync_projects under the hood, which
will dynamically create missing projects as well.
Args:
request (Request): the parent request; used for region detection.
username (str): the username to sync memberships for.
Return:
List[keystone.Project]: a list of Keystone projects the user is a
member of.
"""
mapper = ProjectAllocationMapper(request)
try:
ks_admin = admin_ks_client(request=request)
ks_user = get_user(ks_admin, username)
if not ks_user:
logger.error(
(
"Could not fetch Keystone user for {}, skipping membership syncing".format(
username
)
)
)
return
active_projects = mapper.get_user_projects(
username, alloc_status=["Active"], to_pytas_model=True
)
return sync_projects(ks_admin, ks_user, active_projects)
except Exception as e:
logger.error("Could not sync project memberships for %s: %s", username, e)
return []
def get_extras(request):
provided_token = request.GET.get("token") if request.GET.get("token") else None
stored_token = getattr(settings, "PROJECT_EXTRAS_API_TOKEN", None)
if not provided_token or not stored_token or provided_token != stored_token:
logger.error("Project Extras json api Access Token validation failed")
return HttpResponseForbidden()
logger.info("Get all project extras json endpoint requested")
response = {"status": "success"}
try:
serializer = ProjectExtrasJSONSerializer()
response["message"] = ""
extras = json.loads(serializer.serialize(ProjectExtras.objects.all()))
response["result"] = extras
except ProjectExtras.DoesNotExist:
response["message"] = "Does not exist."
response["result"] = None
return JsonResponse(response)
| 35.489011 | 95 | 0.588533 |
aea75e1eb83214dedb643e0e6cd5c7568f6bf1b4 | 2,193 | py | Python | listeners/pipe.py | ggilestro/majordomo | d111c1dd1a4c4b8d2cdaa9651b51ece60a1b648d | [
"MIT"
] | null | null | null | listeners/pipe.py | ggilestro/majordomo | d111c1dd1a4c4b8d2cdaa9651b51ece60a1b648d | [
"MIT"
] | null | null | null | listeners/pipe.py | ggilestro/majordomo | d111c1dd1a4c4b8d2cdaa9651b51ece60a1b648d | [
"MIT"
] | null | null | null | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
#
# pipe.py
#
# Copyright 2014 Giorgio Gilestro <gg@kozak>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
#
#
# Listen from pipefile
# e.g.: echo "TEST COMMAND" > /tmp/pipefile
import os, tempfile
import logging
import threading
if __name__ == '__main__':
p = pipe("pipefile", "none")
| 28.480519 | 96 | 0.617419 |
aea97cde74c5b76516a1287e08c111bed632fb99 | 1,608 | py | Python | scripts/android-patch.py | rasaha91/react-native-macos | 5da5441c1d98596683590bc076541560db61ff82 | [
"CC-BY-4.0",
"MIT"
] | 2,114 | 2020-05-06T10:05:45.000Z | 2022-03-31T23:19:28.000Z | scripts/android-patch.py | rasaha91/react-native-macos | 5da5441c1d98596683590bc076541560db61ff82 | [
"CC-BY-4.0",
"MIT"
] | 623 | 2020-05-05T21:24:26.000Z | 2022-03-30T21:00:31.000Z | scripts/android-patch.py | rasaha91/react-native-macos | 5da5441c1d98596683590bc076541560db61ff82 | [
"CC-BY-4.0",
"MIT"
] | 85 | 2020-05-05T23:09:40.000Z | 2022-03-29T10:12:42.000Z | import os
import sys
# A Python script that can be used to determine which files that require
# patching have been touched between two points in the repo.
if __name__ == '__main__':
if len(sys.argv) != 3:
sys.stderr.write('Usage: android-patch.py <commit> <commit>')
sys.exit(1)
patches = get_patches()
touched_files = set(get_touched_files(sys.argv[1], sys.argv[2]))
for patch_name in sorted(patches.keys()):
patched_and_touched = [file for file in patches[patch_name] \
if file in touched_files]
if len(patched_and_touched) > 0:
print('\033[4m{0}\033[0m'.format(patch_name))
for file in patched_and_touched:
print('* {0}'.format(file))
| 34.212766 | 77 | 0.625622 |
aeaa0a335119f48bebc88e35cf1698370503f3a0 | 259 | py | Python | pyautofinance/common/strategies/usable_strategies/live_trading_test_strategy.py | webclinic017/PyAutoFinance | 532cb1c5418dd9eeb07f2f08646170cde1fe0303 | [
"MIT"
] | null | null | null | pyautofinance/common/strategies/usable_strategies/live_trading_test_strategy.py | webclinic017/PyAutoFinance | 532cb1c5418dd9eeb07f2f08646170cde1fe0303 | [
"MIT"
] | null | null | null | pyautofinance/common/strategies/usable_strategies/live_trading_test_strategy.py | webclinic017/PyAutoFinance | 532cb1c5418dd9eeb07f2f08646170cde1fe0303 | [
"MIT"
] | 1 | 2022-02-24T09:18:13.000Z | 2022-02-24T09:18:13.000Z | from pyautofinance.common.strategies.bracket_strategy import BracketStrategy
| 23.545455 | 76 | 0.760618 |
aeaabc6f5b0d5b7cf3ded3141d91ff7d3817bb49 | 5,624 | py | Python | src/example_publish_pypi_medium/asm-enforce-ready-signatures.py | lrhazi/example-publish-pypi | 135bc75b0e37225b1879cb79d644f709977f1f3d | [
"MIT"
] | null | null | null | src/example_publish_pypi_medium/asm-enforce-ready-signatures.py | lrhazi/example-publish-pypi | 135bc75b0e37225b1879cb79d644f709977f1f3d | [
"MIT"
] | null | null | null | src/example_publish_pypi_medium/asm-enforce-ready-signatures.py | lrhazi/example-publish-pypi | 135bc75b0e37225b1879cb79d644f709977f1f3d | [
"MIT"
] | null | null | null | import json
from docopt import docopt
from bigip_utils.logger import logger
from bigip_utils.bigip import *
#
# This script enforces all attack signatures that are ready to be enforced:
# https://support.f5.com/csp/article/K60640453?utm_source=f5support&utm_medium=RSS
#
__doc__ = """
Usage:
enforce-ready-signatures.py [-hvndsb] [-p POLICY_NAME] -l LIST_FILE
Options:
-h --help Show this screen.
-v --version Show version.
-n --dry-run Show actions. Do not execute them.
-s --sync Sync devices after changes.
-b --backup-config Create and download a UCS file.
-d --dev-devices-only Skip non DEV devices.
-l LIST_FILE --list-file=LIST_FILE CSV file with list of bigips. Format: hostname,ip,username,password
-p POLICY_NAME --policy-name=POLICY_NAME Name of a policy to act on. [default: all]
"""
VERSION = "0.2"
if __name__ == "__main__":
arguments = docopt(__doc__, version=VERSION)
devices_file = arguments['--list-file']
dry_run = arguments['--dry-run']
dev_only = arguments['--dev-devices-only']
policy_name = arguments['--policy-name']
sync = arguments['--sync']
backup_config = arguments['--backup-config']
for (hostname, ip, username, password) in get_bigips(devices_file, dev_only=dev_only):
b = BigIP(hostname, username, password, ip=ip, verify_ssl=False)
logger.info(
f"{b.hostname}: Started. Policy: {policy_name} Dry-Run: {dry_run}")
proceed = True
check_active(b)
device_group = get_asm_sync_group(b)
if not device_group and not check_standalone(b):
logger.error(
f"{b.hostname}: Could not find ASM device group name. {device_group}")
proceed = False
elif device_group:
logger.info(f"{b.hostname}: Sync Device Group: {device_group}")
if (not b.token):
logger.warning(
f'{b.hostname}: Unable to obtain authentication token')
proceed = False
if not check_active(b):
logger.warning(f'{b.hostname}: Not active, skipping device.')
proceed = False
enforced_signatures_count = 0
get_ucs(b,overwrite=True)
if proceed:
if backup_config and not dry_run:
get_ucs(b,overwrite=True)
enforced_signatures_count = process_device(
b, dry_run=dry_run, policy=policy_name, sync_device_group=device_group)
logger.info(
f"{b.hostname}: Finished. enforced signatures count: {enforced_signatures_count}")
logger.info("Done.")
| 43.9375 | 163 | 0.618243 |
aeabb21d13fb47406449b6804d29f4983877b33d | 13,358 | py | Python | farb/sysinstall.py | samskivert/farbot | d88f16dcbd23d7ca3b7fdcf341c9346c0ab21bb8 | [
"BSD-3-Clause"
] | null | null | null | farb/sysinstall.py | samskivert/farbot | d88f16dcbd23d7ca3b7fdcf341c9346c0ab21bb8 | [
"BSD-3-Clause"
] | null | null | null | farb/sysinstall.py | samskivert/farbot | d88f16dcbd23d7ca3b7fdcf341c9346c0ab21bb8 | [
"BSD-3-Clause"
] | null | null | null | # sysinstall.py vi:ts=4:sw=4:expandtab:
#
# Copyright (c) 2006-2008 Three Rings Design, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright owner nor the names of contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import copy
import os
import string
import farb
| 33.562814 | 96 | 0.643584 |
aeac7c6dc8a5f5e913ca2483b61751b8f3dca05f | 1,233 | py | Python | dynamo_consistency/dynamo/v1/siteinfo.py | dabercro/dynamo-consistency | f37dfebe781a833e9ae30869d8f57be79b4f583c | [
"MIT"
] | null | null | null | dynamo_consistency/dynamo/v1/siteinfo.py | dabercro/dynamo-consistency | f37dfebe781a833e9ae30869d8f57be79b4f583c | [
"MIT"
] | 1 | 2018-02-20T21:21:14.000Z | 2018-02-20T21:21:14.000Z | dynamo_consistency/dynamo/v1/siteinfo.py | dabercro/dynamo-consistency | f37dfebe781a833e9ae30869d8f57be79b4f583c | [
"MIT"
] | 2 | 2018-06-25T11:27:45.000Z | 2021-05-13T20:32:36.000Z | """
This module is for fetching information from dynamo about different sites
"""
from .inventory import _get_inventory
def _small_query(*args):
"""
This is a wrapper function for opening and closing inventory connection
:param args: arguments to pass to query
:returns: Result of the query
:rtype: list
"""
mysql_reg = _get_inventory()
sql = args[0]
result = mysql_reg.query(sql, *(args[1:]))
mysql_reg.close()
return result
def site_list():
"""
:returns: The list of sites dynamo is storing
:rtype: list
"""
return _small_query('SELECT name FROM sites')
_READY = None # A cached list of ready sites
def ready_sites():
"""
:returns: Set of sites that are in ready status
:rtype: set
"""
global _READY # pylint: disable=global-statement
if _READY is None:
_READY = set(_small_query('SELECT name FROM sites WHERE status = "ready"'))
return _READY
def get_gfal_location(site):
"""
:param str site: A site that we want to list with GFAL
:returns: The host and path needed by the gfal-ls command
:rtype: str
"""
return _small_query('SELECT backend FROM sites WHERE name=%s', site)[0]
| 20.898305 | 83 | 0.652879 |
aeadb61a2cb83bca3659659ea3f7918d3b60f93f | 1,163 | py | Python | core/management/commands/onetime_fix_cover_paths.py | flaiming/bookfinder | 59154d106b62680668087e46eca9c0bf9cdaf336 | [
"MIT"
] | null | null | null | core/management/commands/onetime_fix_cover_paths.py | flaiming/bookfinder | 59154d106b62680668087e46eca9c0bf9cdaf336 | [
"MIT"
] | null | null | null | core/management/commands/onetime_fix_cover_paths.py | flaiming/bookfinder | 59154d106b62680668087e46eca9c0bf9cdaf336 | [
"MIT"
] | null | null | null | import shutil
import os
from django.core.management.base import BaseCommand, CommandError
from django.conf import settings
from .models import BookCover, get_book_cover_image_path
| 31.432432 | 87 | 0.55632 |
aeafb1f7e55c1238f2645e1019c684aa0c10629d | 1,002 | py | Python | src/gafaelfawr/schema/admin_history.py | slaclab/gafaelfawr | 7a64b0f159003d4745531c89d5b0f7d9777f7bce | [
"MIT"
] | null | null | null | src/gafaelfawr/schema/admin_history.py | slaclab/gafaelfawr | 7a64b0f159003d4745531c89d5b0f7d9777f7bce | [
"MIT"
] | 5 | 2019-06-18T17:28:52.000Z | 2020-04-20T22:15:54.000Z | src/gafaelfawr/schema/admin_history.py | slaclab/gafaelfawr | 7a64b0f159003d4745531c89d5b0f7d9777f7bce | [
"MIT"
] | 1 | 2019-06-25T16:04:57.000Z | 2019-06-25T16:04:57.000Z | """The admin_history database table.
This is a stopgap representation of changes to the admin table until we have a
group system and a group-based authorization system up and running.
"""
from __future__ import annotations
from datetime import datetime
from sqlalchemy import Column, DateTime, Enum, Index, Integer, String
from sqlalchemy.dialects import postgresql
from ..models.history import AdminChange
from .base import Base
__all__ = ["AdminHistory"]
| 30.363636 | 78 | 0.746507 |
aeb200362d7e9aa9bfe424f4789c60841fc0fb5c | 316 | py | Python | app/utils/build_handled_query.py | msolorio/flask_world_api | a2d5394618b736aa7d5d5e75a422dbe9e5713533 | [
"MIT"
] | 1 | 2022-02-24T04:37:04.000Z | 2022-02-24T04:37:04.000Z | app/utils/build_handled_query.py | msolorio/flask_world_api | a2d5394618b736aa7d5d5e75a422dbe9e5713533 | [
"MIT"
] | null | null | null | app/utils/build_handled_query.py | msolorio/flask_world_api | a2d5394618b736aa7d5d5e75a422dbe9e5713533 | [
"MIT"
] | null | null | null | import traceback
from ..models import ServerError
| 21.066667 | 60 | 0.617089 |
aeb2c65fe32f5a74d10e3aa724b39a7ca6e18fa4 | 8,153 | py | Python | wpscan_out_parse/parser/_cli_parser.py | clivewalkden/wpscan_out_parse | dff46aa2e98390afc79b7bb622eb4c01d066fbb5 | [
"MIT"
] | 1 | 2021-06-24T08:35:15.000Z | 2021-06-24T08:35:15.000Z | wpscan_out_parse/parser/_cli_parser.py | clivewalkden/wpscan_out_parse | dff46aa2e98390afc79b7bb622eb4c01d066fbb5 | [
"MIT"
] | null | null | null | wpscan_out_parse/parser/_cli_parser.py | clivewalkden/wpscan_out_parse | dff46aa2e98390afc79b7bb622eb4c01d066fbb5 | [
"MIT"
] | null | null | null | import re
from typing import Any, Dict, Sequence, List, Optional, Tuple
from .base import Parser
from .components import InterestingFinding
from .results import WPScanResults
#################### CLI PARSER ######################
| 36.725225 | 135 | 0.531461 |
aeb3088e188323526fd71e3f97c23bb2c556a657 | 1,095 | py | Python | backend/backend/views.py | synw/django-mqueue-livefeed | d4e8cc2a4c1014f1d7322c297f8f50db9ab8540f | [
"MIT"
] | 9 | 2016-08-30T16:15:15.000Z | 2022-03-22T17:17:01.000Z | backend/backend/views.py | synw/django-mqueue-livefeed | d4e8cc2a4c1014f1d7322c297f8f50db9ab8540f | [
"MIT"
] | null | null | null | backend/backend/views.py | synw/django-mqueue-livefeed | d4e8cc2a4c1014f1d7322c297f8f50db9ab8540f | [
"MIT"
] | 2 | 2017-01-20T22:03:53.000Z | 2017-12-20T11:43:38.000Z | from typing import Any, Dict, Union
from django.conf import settings
from django.http.response import HttpResponseBase, HttpResponseRedirect
from django.views.generic import TemplateView
from django.contrib.auth.views import LoginView, redirect_to_login
| 36.5 | 84 | 0.689498 |
aeb3a40599bade079c5b02f2bf2cc33038b720fa | 3,220 | py | Python | tests/animation/generator/test_animation.py | OrangeUtan/MCMetagen | 0293ea14bf1c6b1bae58741f9876ba662930b43d | [
"MIT"
] | null | null | null | tests/animation/generator/test_animation.py | OrangeUtan/MCMetagen | 0293ea14bf1c6b1bae58741f9876ba662930b43d | [
"MIT"
] | null | null | null | tests/animation/generator/test_animation.py | OrangeUtan/MCMetagen | 0293ea14bf1c6b1bae58741f9876ba662930b43d | [
"MIT"
] | null | null | null | import pytest
from mcanitexgen.animation.generator import Animation, GeneratorError
| 29.009009 | 87 | 0.491304 |
aeb447aba93643942a7e8b9f82a86a22f927bf04 | 2,214 | py | Python | bin/mircx_polsplit.py | jdmonnier/mircx_mystic | 45bf8491117674157b39f49cfe0c92c5ec6da500 | [
"MIT"
] | 1 | 2022-01-13T19:32:51.000Z | 2022-01-13T19:32:51.000Z | bin/mircx_polsplit.py | jdmonnier/mircx_mystic | 45bf8491117674157b39f49cfe0c92c5ec6da500 | [
"MIT"
] | null | null | null | bin/mircx_polsplit.py | jdmonnier/mircx_mystic | 45bf8491117674157b39f49cfe0c92c5ec6da500 | [
"MIT"
] | null | null | null | #! /usr/bin/env python
"""
This is a "quick and dirty" solution to getting polarization data through the pipeline.
This script creates new fits files with independent polarization states.
Make sure you have plenty of diskspace.
"""
from __future__ import print_function
import argparse
import os
from time import sleep
from astropy.io import fits
from tqdm import tqdm
parser = argparse.ArgumentParser(description='Process MIRC-X raw data files')
parser.add_argument("--no-warn", action="store_true")
parser.add_argument("--crop-bad", action="store_true")
parser.add_argument("files", nargs="+", help="File(s) to process")
args = parser.parse_args()
if not args.no_warn:
print("Warning: Make sure you have plenty of disk space; this is going to hurt.")
print("(Hint: ^C while you still can! Sleeping 10 seconds for your benefit.)")
sleep(10)
for dir in ["pol1", "pol2"]:
try:
os.mkdir(dir)
except FileExistsError:
if os.path.isdir(dir):
print("Warning: directory `" + dir + "` already exists")
else:
raise FileExistsError("Looks like you have a file named `" + dir + "`; please remove it.")
for file in tqdm(args.files):
fz = file[-3:] == ".fz"
if fz:
os.system("funpack " + file)
file = file[:-3]
polstate(file, 1)
polstate(file, 2)
if fz:
os.remove(file)
| 32.558824 | 103 | 0.62421 |
aeb56753078f68e7ebf914dfe3362d2ce395b9ab | 44 | py | Python | holoprot/models/__init__.py | vsomnath/holoprot | 9bd6c58491eec701db94ce12f8e15e2143e202b9 | [
"MIT"
] | 10 | 2022-01-19T19:01:35.000Z | 2022-03-21T13:04:59.000Z | holoprot/models/__init__.py | vsomnath/holoprot | 9bd6c58491eec701db94ce12f8e15e2143e202b9 | [
"MIT"
] | null | null | null | holoprot/models/__init__.py | vsomnath/holoprot | 9bd6c58491eec701db94ce12f8e15e2143e202b9 | [
"MIT"
] | 3 | 2022-01-11T16:21:32.000Z | 2022-03-11T15:33:57.000Z | from holoprot.models.trainer import Trainer
| 22 | 43 | 0.863636 |
aeb5bac5587511509d5243a09d8d7dc4620f3a3a | 3,233 | py | Python | mars/dataframe/datasource/core.py | HarshCasper/mars | 4c12c968414d666c7a10f497bc22de90376b1932 | [
"Apache-2.0"
] | 2 | 2019-03-29T04:11:10.000Z | 2020-07-08T10:19:54.000Z | mars/dataframe/datasource/core.py | HarshCasper/mars | 4c12c968414d666c7a10f497bc22de90376b1932 | [
"Apache-2.0"
] | null | null | null | mars/dataframe/datasource/core.py | HarshCasper/mars | 4c12c968414d666c7a10f497bc22de90376b1932 | [
"Apache-2.0"
] | null | null | null | # Copyright 1999-2020 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ...context import get_context
from ...serialize import Int64Field, KeyField
from ...tiles import TilesError
from ..operands import DataFrameOperand, DataFrameOperandMixin
| 33.677083 | 82 | 0.631921 |
aeb5f63f02d815be3691dc63ea53c88a38fdabc3 | 1,759 | py | Python | scripts/chengyu.py | cfeibiao/chinese-xinhua | fc39a885e6bbd6d79576997bea53682af4e8f596 | [
"MIT"
] | 9,321 | 2018-02-10T09:17:33.000Z | 2022-03-31T11:39:38.000Z | scripts/chengyu.py | cfeibiao/chinese-xinhua | fc39a885e6bbd6d79576997bea53682af4e8f596 | [
"MIT"
] | 48 | 2018-07-30T12:35:49.000Z | 2022-03-11T03:49:24.000Z | scripts/chengyu.py | cfeibiao/chinese-xinhua | fc39a885e6bbd6d79576997bea53682af4e8f596 | [
"MIT"
] | 2,255 | 2018-03-12T09:54:37.000Z | 2022-03-31T10:17:47.000Z | # -*- coding: utf-8 -*-
"""
author: pwxcoo
date: 2018-02-05
description:
"""
import requests, json
from bs4 import BeautifulSoup
def downloader(url):
"""
"""
response = requests.get(url)
if response.status_code != 200:
print(f'{url} is failed!')
return
print(f'{url} is parsing')
html = BeautifulSoup(response.content.decode('gbk', errors='ignore'), "lxml")
table = html.find_all('table')[-2]
prefix = 'http://www.zd9999.com'
words = [prefix + a.get('href') for a in table.find_all('a')]
res = []
for i in range(0, len(words)):
response = requests.get(words[i])
print(f'{[words[i]]} is parsing')
if response.status_code != 200:
print(f'{words[i]} is failed!')
continue
wordhtml = BeautifulSoup(response.content.decode('gbk', errors='ignore'), "lxml")
explanation = wordhtml.find_all('table')[-3].find_all('tr')
res.append({'word':explanation[0].text.strip(),\
'pinyin': explanation[1].find_all('tr')[0].find_all('td')[1].text.strip(),\
'explanation': explanation[1].find_all('tr')[1].find_all('td')[1].text.strip(),\
'derivation': explanation[1].find_all('tr')[2].find_all('td')[1].text.strip(),\
'example': explanation[1].find_all('tr')[3].find_all('td')[1].text.strip()})
return res
if __name__ == '__main__':
res = downloader('http://www.zd9999.com/cy/')
for i in range(2, 199):
res += downloader(f'http://www.zd9999.com/cy/index_{i}.htm')
print(len(res))
with open('chengyu.json', mode='w+', encoding='utf-8') as json_file:
json.dump(res, json_file, ensure_ascii=False) | 32.574074 | 100 | 0.582149 |
aeb856b22c775bb3a156957570f8b200275c77c9 | 21,563 | py | Python | data/prepare_data_3dhp.py | YxZhxn/Ray3D | cc19cf1a8471b8464c172808e77d6305a3218a3c | [
"Apache-2.0"
] | 23 | 2022-03-22T05:09:34.000Z | 2022-03-31T08:24:43.000Z | data/prepare_data_3dhp.py | YxZhxn/Ray3D | cc19cf1a8471b8464c172808e77d6305a3218a3c | [
"Apache-2.0"
] | 4 | 2022-03-22T05:11:48.000Z | 2022-03-30T03:27:10.000Z | data/prepare_data_3dhp.py | YxZhxn/Ray3D | cc19cf1a8471b8464c172808e77d6305a3218a3c | [
"Apache-2.0"
] | 2 | 2022-03-23T09:47:50.000Z | 2022-03-25T01:12:18.000Z | import os
import sys
import copy
import ipdb
import json
import mat73
import numpy as np
import scipy.io as sio
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
from lib.camera.camera import CameraInfoPacket, catesian2homogenous
rot = np.array([[1, 0, 0], [0, 0, -1], [0, 1, 0]]) # rotate along the x axis 90 degrees
mpii_3dhp_cameras_intrinsic_params = [
{
'id': '0',
'center': [1024.704, 1051.394],
'focal_length': [1497.693, 1497.103],
'res_w': 2048,
'res_h': 2048,
'azimuth': 70,
},
{
'id': '1',
'center': [1030.519, 1052.626],
'focal_length': [1495.217, 1495.52],
'res_w': 2048,
'res_h': 2048,
'azimuth': 70,
},
{
'id': '2',
'center': [983.8873, 987.5902],
'focal_length': [1495.587, 1497.828],
'res_w': 2048,
'res_h': 2048,
'azimuth': 70,
},
{
'id': '3',
'center': [1029.06, 1041.409],
'focal_length': [1495.886, 1496.033],
'res_w': 2048,
'res_h': 2048,
'azimuth': 70,
},
{
'id': '4',
'center': [987.6075, 1019.069],
'focal_length': [1490.952, 1491.108],
'res_w': 2048,
'res_h': 2048,
'azimuth': 70,
},
{
'id': '5',
'center': [1012.331, 998.5009],
'focal_length': [1500.414, 1499.971],
'res_w': 2048,
'res_h': 2048,
'azimuth': 70,
},
{
'id': '6',
'center': [999.7319, 1010.251],
'focal_length': [1498.471, 1498.8],
'res_w': 2048,
'res_h': 2048,
'azimuth': 70,
},
{
'id': '7',
'center': [987.2716, 976.8773],
'focal_length': [1498.831, 1499.674],
'res_w': 2048,
'res_h': 2048,
'azimuth': 70,
},
{
'id': '8',
'center': [1017.387, 1043.032],
'focal_length': [1500.172, 1500.837],
'res_w': 2048,
'res_h': 2048,
'azimuth': 70,
},
{
'id': '9',
'center': [1010.423, 1037.096],
'focal_length': [1501.554, 1501.9],
'res_w': 2048,
'res_h': 2048,
'azimuth': 70,
},
{
'id': '10',
'center': [1041.614, 997.0433],
'focal_length': [1498.423, 1498.585],
'res_w': 2048,
'res_h': 2048,
'azimuth': 70,
},
{
'id': '11',
'center': [1009.802, 999.9984],
'focal_length': [1495.779, 1493.703],
'res_w': 2048,
'res_h': 2048,
'azimuth': 70,
},
{
'id': '12',
'center': [1000.56, 1014.975],
'focal_length': [1501.326, 1501.491],
'res_w': 2048,
'res_h': 2048,
'azimuth': 70,
},
{
'id': '13',
'center': [1005.702, 1004.214],
'focal_length': [1496.961, 1497.378],
'res_w': 2048,
'res_h': 2048,
'azimuth': 70,
}
]
mpii_3dhp_cameras_extrinsic_params = [
{
'translation': [-0.5628666, 1.3981379999999999, 3.852623],
'R':
[
[0.9650164, 0.00488022, 0.262144],
[-0.004488356, -0.9993728, 0.0351275],
[0.262151, -0.03507521, -0.9643893]
]
},
{
'translation': [-1.429856, 0.7381779, 4.897966],
'R':
[
[0.6050639, -0.02184232, 0.7958773],
[-0.22647, -0.9630526, 0.1457429],
[0.7632883, -0.2684261, -0.587655]
]
},
{
'translation': [0.05725702, 1.307287, 2.7998220000000003],
'R':
[
[-0.3608179, -0.009492658, 0.932588],
[-0.0585942, -0.9977421, -0.03282591],
[0.9307939, -0.06648842, 0.359447]
]
},
{
'translation': [-0.2848168, 0.8079184, 3.1771599999999998],
'R':
[
[-0.0721105, -0.04817664, 0.9962325],
[-0.4393254, -0.8951841, -0.07508985],
[0.895429, -0.443085, 0.04338695]
]
},
{
'translation': [-1.563911, 0.8019607999999999, 3.5173159999999997],
'R':
[
[0.3737275, 0.09688602, 0.9224646],
[-0.009716132, -0.9940662, 0.1083427],
[0.9274878, -0.04945343, -0.3705685]
]
},
{
'translation': [0.35841340000000005, 0.9945657999999999, 3.439832],
'R':
[
[-0.3521056, 0.01328985, -0.9358659],
[-0.04961938, -0.9987582, 0.004485628],
[-0.9346441, 0.0480165, 0.3523278]
]
},
{
'translation': [0.5694388, 0.528871, 3.6873690000000003],
'R':
[
[-0.9150326, -0.04843184, 0.4004618],
[-0.1804886, -0.8386868, -0.5138369],
[0.3607481, -0.5424563, 0.7586845]
]
},
{
'translation': [1.378866, 1.270781, 2.631567],
'R':
[
[-0.9995936, 0.02847456, 0.001368653],
[-0.02843213, -0.9992908, 0.0246889],
[0.002070688, 0.02463995, 0.9996943]
]
},
{
'translation': [0.2213543, 0.65987, 3.644688],
'R':
[
[0.000575281, 0.06160985, -0.9981001],
[0.2082146, -0.9762325, -0.06013997],
[-0.978083, -0.2077844, -0.01338968]
]
},
{
'translation': [0.38862169999999996, 0.1375452, 4.216635],
'R':
[
[0.04176839, 0.00780962, -0.9990969],
[0.5555364, -0.831324, 0.01672664],
[-0.8304425, -0.5557333, -0.03906159]
]
},
{
'translation': [1.167962, 0.6176362000000001, 4.472351],
'R':
[
[-0.8970265, 0.1361548, -0.4204822],
[0.09417118, -0.8706428, -0.4828178],
[-0.4318278, -0.4726976, 0.7681679]
]
},
{
'translation': [0.1348272, 0.2515094, 4.570244],
'R':
[
[0.9170455, 0.1972746, -0.3465695],
[0.1720879, 0.5882171, 0.7901813],
[0.3597408, -0.7842726, 0.5054733]
]
},
{
'translation': [0.4124695, 0.5327588, 4.887095],
'R':
[
[-0.7926738, 0.1323657, 0.5951031],
[-0.396246, 0.6299778, -0.66792],
[-0.4633114, -0.7652499, -0.4469175]
]
},
{
'translation': [0.8671278, 0.8274571999999999, 3.985159],
'R':
[
[-0.8701088, -0.09522671, -0.4835728],
[0.4120245, 0.3978655, -0.8197188],
[0.270456, -0.9124883, -0.3069505]
]
}
]
subjects = [
'S1_Seq1_0', 'S1_Seq1_1', 'S1_Seq1_2', 'S1_Seq1_3', 'S1_Seq1_4', 'S1_Seq1_5', 'S1_Seq1_6', 'S1_Seq1_7',
'S1_Seq1_8', 'S1_Seq1_9', 'S1_Seq1_10', 'S1_Seq1_11', 'S1_Seq1_12', 'S1_Seq1_13', 'S1_Seq2_0', 'S1_Seq2_1',
'S1_Seq2_2', 'S1_Seq2_3', 'S1_Seq2_4', 'S1_Seq2_5', 'S1_Seq2_6', 'S1_Seq2_7', 'S1_Seq2_8', 'S1_Seq2_9',
'S1_Seq2_10', 'S1_Seq2_11', 'S1_Seq2_12', 'S1_Seq2_13', 'S2_Seq1_0', 'S2_Seq1_1', 'S2_Seq1_2', 'S2_Seq1_3',
'S2_Seq1_4', 'S2_Seq1_5', 'S2_Seq1_6', 'S2_Seq1_7', 'S2_Seq1_8', 'S2_Seq1_9', 'S2_Seq1_10', 'S2_Seq1_11',
'S2_Seq1_12', 'S2_Seq1_13', 'S2_Seq2_0', 'S2_Seq2_1', 'S2_Seq2_2', 'S2_Seq2_3', 'S2_Seq2_4', 'S2_Seq2_5',
'S2_Seq2_6', 'S2_Seq2_7', 'S2_Seq2_8', 'S2_Seq2_9', 'S2_Seq2_10', 'S2_Seq2_11', 'S2_Seq2_12', 'S2_Seq2_13',
'S3_Seq1_0', 'S3_Seq1_1', 'S3_Seq1_2', 'S3_Seq1_3', 'S3_Seq1_4', 'S3_Seq1_5', 'S3_Seq1_6', 'S3_Seq1_7',
'S3_Seq1_8', 'S3_Seq1_9', 'S3_Seq1_10', 'S3_Seq1_11', 'S3_Seq1_12', 'S3_Seq1_13', 'S3_Seq2_0', 'S3_Seq2_1',
'S3_Seq2_2', 'S3_Seq2_3', 'S3_Seq2_4', 'S3_Seq2_5', 'S3_Seq2_6', 'S3_Seq2_7', 'S3_Seq2_8', 'S3_Seq2_9',
'S3_Seq2_10', 'S3_Seq2_11', 'S3_Seq2_12', 'S3_Seq2_13', 'S4_Seq1_0', 'S4_Seq1_1', 'S4_Seq1_2', 'S4_Seq1_3',
'S4_Seq1_4', 'S4_Seq1_5', 'S4_Seq1_6', 'S4_Seq1_7', 'S4_Seq1_8', 'S4_Seq1_9', 'S4_Seq1_10', 'S4_Seq1_11',
'S4_Seq1_12', 'S4_Seq1_13', 'S4_Seq2_0', 'S4_Seq2_1', 'S4_Seq2_2', 'S4_Seq2_3', 'S4_Seq2_4', 'S4_Seq2_5',
'S4_Seq2_6', 'S4_Seq2_7', 'S4_Seq2_8', 'S4_Seq2_9', 'S4_Seq2_10', 'S4_Seq2_11', 'S4_Seq2_12', 'S4_Seq2_13',
'S5_Seq1_0', 'S5_Seq1_1', 'S5_Seq1_2', 'S5_Seq1_3', 'S5_Seq1_4', 'S5_Seq1_5', 'S5_Seq1_6', 'S5_Seq1_7',
'S5_Seq1_8', 'S5_Seq1_9', 'S5_Seq1_10', 'S5_Seq1_11', 'S5_Seq1_12', 'S5_Seq1_13', 'S5_Seq2_0', 'S5_Seq2_1',
'S5_Seq2_2', 'S5_Seq2_3', 'S5_Seq2_4', 'S5_Seq2_5', 'S5_Seq2_6', 'S5_Seq2_7', 'S5_Seq2_8', 'S5_Seq2_9',
'S5_Seq2_10', 'S5_Seq2_11', 'S5_Seq2_12', 'S5_Seq2_13', 'S6_Seq1_0', 'S6_Seq1_1', 'S6_Seq1_2', 'S6_Seq1_3',
'S6_Seq1_4', 'S6_Seq1_5', 'S6_Seq1_6', 'S6_Seq1_7', 'S6_Seq1_8', 'S6_Seq1_9', 'S6_Seq1_10', 'S6_Seq1_11',
'S6_Seq1_12', 'S6_Seq1_13', 'S6_Seq2_0', 'S6_Seq2_1', 'S6_Seq2_2', 'S6_Seq2_3', 'S6_Seq2_4', 'S6_Seq2_5',
'S6_Seq2_6', 'S6_Seq2_7', 'S6_Seq2_8', 'S6_Seq2_9', 'S6_Seq2_10', 'S6_Seq2_11', 'S6_Seq2_12', 'S6_Seq2_13',
'S7_Seq1_0', 'S7_Seq1_1', 'S7_Seq1_2', 'S7_Seq1_3', 'S7_Seq1_4', 'S7_Seq1_5', 'S7_Seq1_6', 'S7_Seq1_7',
'S7_Seq1_8', 'S7_Seq1_9', 'S7_Seq1_10', 'S7_Seq1_11', 'S7_Seq1_12', 'S7_Seq1_13', 'S7_Seq2_0', 'S7_Seq2_1',
'S7_Seq2_2', 'S7_Seq2_3', 'S7_Seq2_4', 'S7_Seq2_5', 'S7_Seq2_6', 'S7_Seq2_7', 'S7_Seq2_8', 'S7_Seq2_9',
'S7_Seq2_10', 'S7_Seq2_11', 'S7_Seq2_12', 'S7_Seq2_13', 'S8_Seq1_0', 'S8_Seq1_1', 'S8_Seq1_2', 'S8_Seq1_3',
'S8_Seq1_4', 'S8_Seq1_5', 'S8_Seq1_6', 'S8_Seq1_7', 'S8_Seq1_8', 'S8_Seq1_9', 'S8_Seq1_10', 'S8_Seq1_11',
'S8_Seq1_12', 'S8_Seq1_13', 'S8_Seq2_0', 'S8_Seq2_1', 'S8_Seq2_2', 'S8_Seq2_3', 'S8_Seq2_4', 'S8_Seq2_5',
'S8_Seq2_6', 'S8_Seq2_7', 'S8_Seq2_8', 'S8_Seq2_9', 'S8_Seq2_10', 'S8_Seq2_11', 'S8_Seq2_12', 'S8_Seq2_13',
'TS1', 'TS3', 'TS4'
]
camera_params = dict()
for sbj in subjects:
if sbj.startswith('S'):
subject, seq, cid = sbj.split('_')
cid = int(cid)
camera_meta = dict()
camera_meta.update(mpii_3dhp_cameras_extrinsic_params[cid])
camera_meta.update(mpii_3dhp_cameras_intrinsic_params[cid])
camera_params[sbj] = [camera_meta]
if sbj.startswith('T'):
camera_meta = dict()
camera_meta.update(mpii_3dhp_cameras_extrinsic_params[8])
camera_meta.update(mpii_3dhp_cameras_intrinsic_params[8])
camera_params[sbj] = [camera_meta]
def read_ann(ann_file, mode):
"""
:param ann_file:
:param mode:
:return:
"""
if mode == 'train':
return sio.loadmat(ann_file)
if mode == 'test':
return mat73.loadmat(ann_file)
def read_cali(cali_file, vid_idx, mode):
"""
:param cali_file:
:param vid_idx:
:return:
"""
Ks, Rs, Ts = [], [], []
if mode == 'train':
file = open(cali_file, 'r')
content = file.readlines()
for vid_i in vid_idx:
K = np.array([float(s) for s in content[vid_i * 7 + 5][11:-2].split()])
K = np.reshape(K, (4, 4))[:3, :3]
RT = np.array([float(s) for s in content[vid_i * 7 + 6][11:-2].split()])
RT = np.reshape(RT, (4, 4))
R = RT[:3, :3]
R = R @ np.linalg.inv(rot)
T = RT[:3, 3] / 1000 # mm to m
Ks.append(K)
Rs.append(R)
Ts.append(T)
if mode == 'test':
raise NotImplementedError
return Ks, Rs, Ts
if __name__ == '__main__':
# REFERENCE: https://github.com/nkolot/SPIN/blob/master/datasets/preprocess/mpi_inf_3dhp.py
data_root = '/ssd/yzhan/data/benchmark/3D/mpi_inf_3dhp'
res_w = 2048
res_h = 2048
# train
train_subjects = ['S1', 'S2', 'S3', 'S4', 'S5', 'S6', 'S7', 'S8']
sequences = ['Seq1', 'Seq2']
video_list = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
train_kpt_idx = [4, 23, 24, 25, 18, 19, 20, 3, 5, 6, 7, 9, 10, 11, 14, 15, 16]
# test
test_subjects = ['TS1', 'TS3', 'TS4'] # drop TS2, due to inaccurate extrinsic
test_kpt_idx = [14, 8, 9, 10, 11, 12, 13, 15, 1, 16, 0, 5, 6, 7, 2, 3, 4]
METADATA = {
'layout': '3dhp',
'num_joints': 17,
'keypoints_symmetry': [[4, 5, 6, 11, 12, 13], [1, 2, 3, 14, 15, 16]]
}
data_3d = {}
data_2d = {}
intrinsics = {}
extrinsics = {}
for sbj in train_subjects:
for seq in sequences:
ann_meta = read_ann(os.path.join(data_root, sbj, seq, 'annot.mat'), mode='train')
valid_cameras = ann_meta['cameras'].reshape(-1).tolist()
valid_frames = ann_meta['frames'].reshape(-1).tolist()
kpts_2d = ann_meta['annot2']
kpts_3d = ann_meta['annot3']
Ks, Rs, Ts = read_cali(os.path.join(data_root, sbj, seq, 'camera.calibration'), video_list, mode='train')
assert len(Ks) == len(Rs) == len(Ts) == len(valid_cameras), 'camera miss match'
for cam_idx in valid_cameras:
subject = '{}_{}_{}'.format(sbj, seq, cam_idx)
joints_2d = kpts_2d[cam_idx, 0][:len(valid_frames)].reshape(len(valid_frames), -1, 2)[:, train_kpt_idx]
joints_3d = kpts_3d[cam_idx, 0][:len(valid_frames)].reshape(len(valid_frames), -1, 3)[:, train_kpt_idx]
joints_3d /= 1000 # mm to m
valid_joints_2d = list()
valid_joints_3d = list()
valid_file_names = list()
num_invalid_frame = 0
for frame_idx in range(len(valid_frames)):
joint_2d = joints_2d[frame_idx]
joint_3d = joints_3d[frame_idx]
x_in = np.logical_and(joint_2d[:, 0] < res_w, joint_2d[:, 0] >= 0)
y_in = np.logical_and(joint_2d[:, 1] < res_h, joint_2d[:, 1] >= 0)
ok_pts = np.logical_and(x_in, y_in)
if np.sum(ok_pts) < len(train_kpt_idx):
num_invalid_frame += 1
continue
frame_name = os.path.join(data_root, sbj, seq, 'imageSequence',
'video_{}'.format(cam_idx), 'img_%06d.jpg' % (frame_idx + 1))
if not os.path.exists(frame_name):
num_invalid_frame += 1
continue
valid_joints_2d.append(joint_2d)
valid_joints_3d.append(joint_3d)
valid_file_names.append('img_%06d.jpg' % (frame_idx + 1))
print('sbj -> {}, seq -> {}, camera -> {}, total frames -> {}, invalid frames -> {}'.format(
sbj, seq, cam_idx, len(valid_frames), num_invalid_frame)
)
valid_joints_2d = np.array(valid_joints_2d)
valid_joints_3d = np.array(valid_joints_3d)
assert valid_joints_2d.shape[0] == valid_joints_3d.shape[0] == len(valid_frames) - num_invalid_frame
data_3d.setdefault(subject, dict())
data_3d[subject].setdefault('Action', list())
data_3d[subject]['Action'] = valid_joints_3d
data_2d.setdefault(subject, dict())
data_2d[subject].setdefault('Action', list())
data_2d[subject]['Action'].append(
{
'file_name': valid_file_names,
'positions_2d': valid_joints_2d
}
)
intrinsics.setdefault(subject, [Ks[cam_idx].tolist()])
extrinsics.setdefault(subject, [Rs[cam_idx].tolist(), Ts[cam_idx].tolist()])
for sbj in test_subjects:
ann_meta = read_ann(os.path.join(data_root, sbj, 'annot_data.mat'), mode='test')
valid_frames = ann_meta['valid_frame'].reshape(-1).tolist()
kpts_2d = ann_meta['annot2'].transpose(2, 1, 0)[:, test_kpt_idx]
kpts_3d = ann_meta['annot3'].transpose(2, 1, 0)[:, test_kpt_idx]
kpts_3d /= 1000 # mm to m
valid_joints_2d = list()
valid_joints_3d = list()
valid_file_names = list()
num_invalid_frame = 0
for frame_idx, flag in enumerate(valid_frames):
if flag == 0:
num_invalid_frame += 1
continue
joint_2d = kpts_2d[frame_idx]
joint_3d = kpts_3d[frame_idx]
x_in = np.logical_and(joint_2d[:, 0] < res_w, joint_2d[:, 0] >= 0)
y_in = np.logical_and(joint_2d[:, 1] < res_h, joint_2d[:, 1] >= 0)
ok_pts = np.logical_and(x_in, y_in)
if np.sum(ok_pts) < len(train_kpt_idx):
num_invalid_frame += 1
continue
frame_name = os.path.join(data_root, sbj, 'imageSequence', 'img_%06d.jpg' % (frame_idx + 1))
if not os.path.exists(frame_name):
num_invalid_frame += 1
continue
valid_joints_2d.append(joint_2d)
valid_joints_3d.append(joint_3d)
valid_file_names.append('img_%06d.jpg' % (frame_idx + 1))
print('sbj -> {}, total frames -> {}, invalid frames -> {}'.format(
sbj, len(valid_frames), num_invalid_frame)
)
valid_joints_2d = np.array(valid_joints_2d)
valid_joints_3d = np.array(valid_joints_3d)
try:
assert valid_joints_2d.shape[0] == valid_joints_3d.shape[0] == len(valid_frames) - num_invalid_frame
except:
ipdb.set_trace()
data_3d.setdefault(sbj, dict())
data_3d[sbj].setdefault('Action', list())
data_3d[sbj]['Action'] = valid_joints_3d
data_2d.setdefault(sbj, dict())
data_2d[sbj].setdefault('Action', list())
data_2d[sbj]['Action'].append(
{
'file_name': valid_file_names,
'positions_2d': valid_joints_2d
}
)
_cameras = copy.deepcopy(camera_params)
for cameras in _cameras.values():
for i, cam in enumerate(cameras):
for k, v in cam.items():
if k not in ['id', 'res_w', 'res_h']:
cam[k] = np.array(v, dtype='float32')
camera_info = dict()
for subject in _cameras:
camera_info.setdefault(subject, list())
for cam in _cameras[subject]:
if 'translation' not in cam:
continue
K = np.eye(3, dtype=np.float)
K[0, 0] = cam['focal_length'][0]
K[1, 1] = cam['focal_length'][1]
K[0, 2] = cam['center'][0]
K[1, 2] = cam['center'][1]
R = cam['R']
t = np.array(cam['translation'], dtype=np.float64).reshape(3, 1)
camera_info[subject].append(CameraInfoPacket(P=None, K=K, R=R, t=t,
res_w=cam['res_w'], res_h=cam['res_h'],
azimuth=cam['azimuth'],
dist_coeff=None, undistort=False))
new_camera_info = dict()
for subject in _cameras:
new_camera_info.setdefault(subject, list())
for cam in _cameras[subject]:
if 'translation' not in cam:
continue
K = np.eye(3, dtype=np.float)
K[0, 0] = cam['focal_length'][0]
K[1, 1] = cam['focal_length'][1]
K[0, 2] = cam['center'][0]
K[1, 2] = cam['center'][1]
R = cam['R']
R = R @ np.linalg.inv(rot)
t = np.array(cam['translation'], dtype=np.float64).reshape(3, 1)
if subject.startswith('S'):
cid = int(subject.split('_')[-1])
else:
cid = 8
try:
assert np.sum(K - Ks[cid]) < 1e-3
except:
ipdb.set_trace()
assert np.sum(R - Rs[cid]) < 1e-6
assert np.sum(t.reshape(3) - Ts[cid]) < 1e-6
new_camera_info[subject].append(CameraInfoPacket(P=None, K=K, R=R, t=t,
res_w=cam['res_w'], res_h=cam['res_h'],
azimuth=cam['azimuth'],
dist_coeff=None, undistort=False))
for ky in subjects:
joint_2d, file_names = data_2d[ky]['Action'][0]['positions_2d'], data_2d[ky]['Action'][0]['file_name']
joint_3d = data_3d[ky]['Action']
cam = camera_info[ky][0]
new_cam = new_camera_info[ky][0]
world_3d = cam.camera2world(joint_3d)
world_3d_update = world_3d.copy()
for idx in range(world_3d.shape[0]):
world_3d_update[idx] = (rot @ world_3d[idx].T).T
projected_2d = new_cam.project(catesian2homogenous(world_3d_update))
error = np.sum(joint_2d - projected_2d)
print('{} error: {}'.format(ky, error/world_3d_update.shape[0]))
data_3d[ky]['Action'] = world_3d_update
np.savez(os.path.join(data_root, 'data_2d_3dhp_gt.npz'), metadata=METADATA, positions_2d=data_2d)
np.savez(os.path.join(data_root, 'data_3d_3dhp.npz'), positions_3d=data_3d)
json.dump(intrinsics, open(os.path.join(data_root, 'intrinsic.json'), 'w'), indent=4)
json.dump(extrinsics, open(os.path.join(data_root, 'extrinsic.json'), 'w'), indent=4) | 37.177586 | 119 | 0.518202 |
aeb984e53fbe4f0422547372387cc06ffdabc996 | 1,512 | py | Python | alipay/aop/api/response/KoubeiServindustryLeadsRecordBatchqueryResponse.py | antopen/alipay-sdk-python-all | 8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c | [
"Apache-2.0"
] | 213 | 2018-08-27T16:49:32.000Z | 2021-12-29T04:34:12.000Z | alipay/aop/api/response/KoubeiServindustryLeadsRecordBatchqueryResponse.py | antopen/alipay-sdk-python-all | 8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c | [
"Apache-2.0"
] | 29 | 2018-09-29T06:43:00.000Z | 2021-09-02T03:27:32.000Z | alipay/aop/api/response/KoubeiServindustryLeadsRecordBatchqueryResponse.py | antopen/alipay-sdk-python-all | 8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c | [
"Apache-2.0"
] | 59 | 2018-08-27T16:59:26.000Z | 2022-03-25T10:08:15.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
from alipay.aop.api.domain.LeadsOrderInfo import LeadsOrderInfo
| 35.162791 | 120 | 0.70172 |
aeba289fa36713ab8da25a19a9e51eb95799d065 | 6,125 | py | Python | iadmin/filters.py | saxix/django-iadmin | 675317e8f0b4142eaf351595da27c065637a83ba | [
"BSD-1-Clause"
] | 1 | 2015-06-23T09:24:12.000Z | 2015-06-23T09:24:12.000Z | iadmin/filters.py | saxix/django-iadmin | 675317e8f0b4142eaf351595da27c065637a83ba | [
"BSD-1-Clause"
] | null | null | null | iadmin/filters.py | saxix/django-iadmin | 675317e8f0b4142eaf351595da27c065637a83ba | [
"BSD-1-Clause"
] | null | null | null | from django.contrib.admin.filters import RelatedFieldListFilter, AllValuesFieldListFilter
from django.db import models
from django.db.models.query_utils import Q
from django.utils.translation import ugettext as _
from django.utils.encoding import smart_unicode
| 37.576687 | 112 | 0.629388 |
aeba984159365e20b71a8671ae068606c6f145f8 | 332 | py | Python | db.py | amkudr/roadbotproject | f3a0cd10b85bc43e8c823b8670c8b4801e9ed3ab | [
"MIT"
] | null | null | null | db.py | amkudr/roadbotproject | f3a0cd10b85bc43e8c823b8670c8b4801e9ed3ab | [
"MIT"
] | 1 | 2021-06-21T12:41:08.000Z | 2021-06-21T12:41:08.000Z | db.py | amkudr/roadbotproject | f3a0cd10b85bc43e8c823b8670c8b4801e9ed3ab | [
"MIT"
] | 1 | 2021-06-21T08:01:12.000Z | 2021-06-21T08:01:12.000Z | from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import scoped_session, sessionmaker
import settings
engine = create_engine(settings.API_DATA)
db_session = scoped_session(sessionmaker(bind=engine))
Base = declarative_base()
Base.query = db_session.query_property()
| 27.666667 | 55 | 0.843373 |
aebb903fbc8412bafe91beffd2ed946df7885f12 | 132 | py | Python | python/Numpy/np-arrays.py | cdrowley/hackerrank | cc5c925327cd3ce5b52c1614b814da75d42cca72 | [
"MIT"
] | null | null | null | python/Numpy/np-arrays.py | cdrowley/hackerrank | cc5c925327cd3ce5b52c1614b814da75d42cca72 | [
"MIT"
] | null | null | null | python/Numpy/np-arrays.py | cdrowley/hackerrank | cc5c925327cd3ce5b52c1614b814da75d42cca72 | [
"MIT"
] | null | null | null | # https://www.hackerrank.com/challenges/np-arrays/problem
import numpy
| 16.5 | 57 | 0.712121 |
aebbeb314ce75549bd49f12026710823e4b7c6f5 | 2,738 | py | Python | pycfg.py | rhum1s/scripting_tools | c354a8a3bd38ca8aeaaa91405aa1c4ce703ef36b | [
"MIT"
] | null | null | null | pycfg.py | rhum1s/scripting_tools | c354a8a3bd38ca8aeaaa91405aa1c4ce703ef36b | [
"MIT"
] | null | null | null | pycfg.py | rhum1s/scripting_tools | c354a8a3bd38ca8aeaaa91405aa1c4ce703ef36b | [
"MIT"
] | null | null | null | # -*- coding:utf-8 -*-
"""
PyCfg - R. Souweine, 2016.
"""
import sys
import os
import ConfigParser
from datetime import datetime
if __name__ == "__main__":
c = Cfg("configs/global.cfg", ["inv", "general"])
print c.show_config() | 34.225 | 88 | 0.528853 |
aebcb1ae1aa0b22cd2b6d96651c21aa61504ef82 | 504 | py | Python | DGF/resolvers/change.py | LonguCodes/DGF | bd344eff34cbe6438f71631e2cc103f1c4584e09 | [
"MIT"
] | null | null | null | DGF/resolvers/change.py | LonguCodes/DGF | bd344eff34cbe6438f71631e2cc103f1c4584e09 | [
"MIT"
] | null | null | null | DGF/resolvers/change.py | LonguCodes/DGF | bd344eff34cbe6438f71631e2cc103f1c4584e09 | [
"MIT"
] | null | null | null | from .utils import get_filters, get_values, get_relations, set_values, set_relations
| 28 | 84 | 0.712302 |
aebe95d42dfd494b017b2be2cdcdbc8f022206bc | 212 | py | Python | 11 Februari 2021/7_Ceil and Floor.py | FarrelRamdhani/KelasInformatika | 4c43c0405f335447b10aab74afec627df23b919f | [
"MIT"
] | null | null | null | 11 Februari 2021/7_Ceil and Floor.py | FarrelRamdhani/KelasInformatika | 4c43c0405f335447b10aab74afec627df23b919f | [
"MIT"
] | null | null | null | 11 Februari 2021/7_Ceil and Floor.py | FarrelRamdhani/KelasInformatika | 4c43c0405f335447b10aab74afec627df23b919f | [
"MIT"
] | null | null | null | from math import ceil, floor, trunc
x = 1.4
y = 2.6
print(floor(x), floor(y))
print(floor(-x), floor(-y))
print(ceil(x), ceil(y))
print(ceil(-x), ceil(-y))
print(trunc(x), trunc(y))
print(trunc(-x), trunc(-y))
| 17.666667 | 35 | 0.622642 |
aebf8e2ff0886eb00e8aeefe34ba66669b5dca4c | 513 | py | Python | clients/python/rest.py | BernardNotarianni/barrel-aof17 | 02b959aa38751f9912aa1b0dc5d112b76b727585 | [
"Apache-2.0"
] | 1 | 2017-09-11T00:35:49.000Z | 2017-09-11T00:35:49.000Z | clients/python/rest.py | BernardNotarianni/barrel-aof17 | 02b959aa38751f9912aa1b0dc5d112b76b727585 | [
"Apache-2.0"
] | null | null | null | clients/python/rest.py | BernardNotarianni/barrel-aof17 | 02b959aa38751f9912aa1b0dc5d112b76b727585 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
import requests
import json
store = 'http://localhost:8080/source'
print "post a document"
doc = {'name': 'tom'}
headers = {'Content-Type': 'application/json'}
r = requests.post(store, headers=headers, data=json.dumps(doc))
print r.status_code
docid = r.json()['id']
revid = r.json()['rev']
print "get the document"
r = requests.get(store + '/' + docid)
print r.status_code
print "delete the document"
r = requests.delete(store + '/' + docid + '?rev=' + revid)
print r.status_code
| 18.321429 | 63 | 0.678363 |
aec0c6a44b056aadc36e40e0073e2808b0d0bb55 | 10,463 | py | Python | mvg_distributions/sqrt_gamma_gaussian.py | Liang813/tf_mvg | 01bc681a8b3aac5dcf0837d481b963f4968eb777 | [
"MIT"
] | 21 | 2019-04-04T07:46:54.000Z | 2021-12-15T18:06:35.000Z | mvg_distributions/sqrt_gamma_gaussian.py | Liang813/tf_mvg | 01bc681a8b3aac5dcf0837d481b963f4968eb777 | [
"MIT"
] | 8 | 2019-03-01T10:08:30.000Z | 2021-10-04T13:00:11.000Z | mvg_distributions/sqrt_gamma_gaussian.py | Liang813/tf_mvg | 01bc681a8b3aac5dcf0837d481b963f4968eb777 | [
"MIT"
] | 7 | 2019-12-18T23:41:44.000Z | 2021-11-21T10:15:48.000Z | import numpy as np
import tensorflow as tf
from tensorflow_probability.python.distributions import seed_stream
import tensorflow_probability as tfp
import mvg_distributions.covariance_representations as cov_rep
from mvg_distributions.gamma import SqrtGamma
tfd = tfp.distributions
tfb = tfp.bijectors
class SparseSqrtGammaGaussian(SqrtGammaGaussian):
def __init__(self, df, log_diag_scale, add_mode_correction=False, validate_args=False, allow_nan_stats=True,
name="SparseSqrtGammaGaussian"):
"""
Sparse square root Gamma-Gaussian distribution, this is equivalent to a Cholesky-Wishart distribution with a
diagonal scale matrix and with a sparsity correction factor. Thus it has the same hyper-parameters, as a the
Cholesky-Wishart distribution.
Args:
The distribution is defined for batch (b) of M (pxp) matrices, forming a tensor of [b, p, p]
df: degrees of freedom, a tensor of [b], the values in it must be df > p - 1
log_diag_scale: a tensor of [b, p] with the log diagonal values of the matrix S
add_mode_correction: bool, if using the distribution as a prior, setting this to True will add
a correction factor to log_diag_scale, such that the log_prob will have the maximum in S
validate_args:
allow_nan_stats:
name:
"""
super().__init__(df, log_diag_scale, add_mode_correction=add_mode_correction, validate_args=validate_args,
allow_nan_stats=allow_nan_stats, name=name)
def _call_log_prob(self, value, name, **kwargs):
with self._name_scope(name):
value, log_prob_shape = self._convert_to_cov_obj(value)
try:
log_prob = self._log_prob(value)
return tf.reshape(log_prob, log_prob_shape)
except NotImplementedError as original_exception:
try:
log_prob = tf.log(self._prob(value))
return tf.reshape(log_prob, log_prob_shape)
except NotImplementedError:
raise original_exception
def _log_prob_sqrt_gamma(self, x):
log_diag_prob = self.sqrt_gamma_dist.log_prob(x.log_diag_chol_precision)
return tf.reduce_sum(log_diag_prob, axis=1)
def _log_prob_normal(self, x):
if isinstance(x, cov_rep.PrecisionConvCholFilters):
nb = x.recons_filters_precision.shape[2].value
# Get the elements in matrix [b, n, n] after they've been aligned per row, this is a [b, n, nb] tensor
# that if it were reshaped to [b, n_w, n_h, n_b], the vector [b, i, j, :] contain the values of
# the kth row in the matrix, where k corresponds to the i,j pixel.
# For each row, we discard the leading zeros and the diagonal element
off_diag_elements_aligned = x.recons_filters_precision_aligned[:, :, nb // 2 + 1:]
log_off_diag_prob = self.normal_dist.log_prob(off_diag_elements_aligned)
# Some elements in recons_filters_precision get zeroed out due to the zero padding for elements out of the
# image in the convolution operator, thus they are not part of the Cholesky matrix.
# Do not take into account those elements for the log probability computation
off_diag_mask_aligned = x.off_diag_mask_compact_aligned()
# log_off_diag_prob is [b, n, nb // 2 + 1], off_diag_mask is [n, nb]
log_off_diag_prob *= off_diag_mask_aligned[tf.newaxis, :, nb // 2 + 1:]
log_off_diag_prob = tf.reduce_sum(log_off_diag_prob, axis=[1, 2])
else:
log_off_diag_prob = super()._log_prob_normal(x.chol_precision)
return log_off_diag_prob
| 43.057613 | 119 | 0.640925 |
aec3ddbaacba1c772ca5cc048314681c7330681e | 3,606 | py | Python | anymail/webhooks/sendinblue.py | alee/django-anymail | acca6a46e17143caadb0445d7bdeb2f1eb00b71b | [
"BSD-3-Clause"
] | 1,324 | 2016-03-10T04:57:52.000Z | 2022-03-31T15:14:58.000Z | anymail/webhooks/sendinblue.py | alee/django-anymail | acca6a46e17143caadb0445d7bdeb2f1eb00b71b | [
"BSD-3-Clause"
] | 208 | 2016-03-10T03:40:59.000Z | 2022-03-22T23:16:08.000Z | anymail/webhooks/sendinblue.py | alee/django-anymail | acca6a46e17143caadb0445d7bdeb2f1eb00b71b | [
"BSD-3-Clause"
] | 129 | 2016-03-10T09:24:52.000Z | 2022-02-07T05:37:24.000Z | import json
from datetime import datetime
from django.utils.timezone import utc
from .base import AnymailBaseWebhookView
from ..signals import AnymailTrackingEvent, EventType, RejectReason, tracking
| 42.928571 | 111 | 0.640044 |
aec6b13b4c7b4c30dffc5f72de017a641a160409 | 2,232 | py | Python | authentication/strategy.py | mitodl/bootcamp-ecommerce | ba7d6aefe56c6481ae2a5afc84cdd644538b6d50 | [
"BSD-3-Clause"
] | 2 | 2018-06-20T19:37:03.000Z | 2021-01-06T09:51:40.000Z | authentication/strategy.py | mitodl/bootcamp-ecommerce | ba7d6aefe56c6481ae2a5afc84cdd644538b6d50 | [
"BSD-3-Clause"
] | 1,226 | 2017-02-23T14:52:28.000Z | 2022-03-29T13:19:54.000Z | authentication/strategy.py | mitodl/bootcamp-ecommerce | ba7d6aefe56c6481ae2a5afc84cdd644538b6d50 | [
"BSD-3-Clause"
] | 3 | 2017-03-20T03:51:27.000Z | 2021-03-19T15:54:31.000Z | """Custom strategy"""
from urllib.parse import urlencode
from django.db import transaction
from social_django.strategy import DjangoStrategy
from main import features
from profiles.models import LegalAddress, Profile
| 35.428571 | 96 | 0.696237 |
aec73f93f4747f28f93931758f77133311e3036f | 83 | py | Python | dynasty/__init__.py | LeMinaw/Dynasty | 458685df8051cd11f497222e0cd7b672515cd6aa | [
"MIT"
] | 2 | 2021-04-04T19:31:32.000Z | 2022-02-06T13:38:09.000Z | dynasty/__init__.py | LeMinaw/Dynasty | 458685df8051cd11f497222e0cd7b672515cd6aa | [
"MIT"
] | null | null | null | dynasty/__init__.py | LeMinaw/Dynasty | 458685df8051cd11f497222e0cd7b672515cd6aa | [
"MIT"
] | null | null | null | __version__ = '0.0.2'
from pathlib import Path
APP_DIR = Path(__file__).parent
| 10.375 | 31 | 0.722892 |
aec8d771cb10d5ba72ec5fd7b9d6f0e927a8f333 | 580 | py | Python | addons/swift/tests/utils.py | tsukaeru/RDM-osf.io | 2dc3e539322b6110e51772f8bd25ebdeb8e12d0e | [
"Apache-2.0"
] | 11 | 2018-12-11T16:39:40.000Z | 2022-02-26T09:51:32.000Z | addons/swift/tests/utils.py | tsukaeru/RDM-osf.io | 2dc3e539322b6110e51772f8bd25ebdeb8e12d0e | [
"Apache-2.0"
] | 52 | 2018-04-13T05:03:21.000Z | 2022-03-22T02:56:19.000Z | addons/swift/tests/utils.py | tsukaeru/RDM-osf.io | 2dc3e539322b6110e51772f8bd25ebdeb8e12d0e | [
"Apache-2.0"
] | 16 | 2018-07-09T01:44:51.000Z | 2021-06-30T01:57:16.000Z | # -*- coding: utf-8 -*-
from addons.base.tests.base import OAuthAddonTestCaseMixin, AddonTestCase
from addons.swift.provider import SwiftProvider
from addons.swift.serializer import SwiftSerializer
from addons.swift.tests.factories import SwiftAccountFactory
| 30.526316 | 73 | 0.753448 |
aec9ee64277ff4b007aad939e4ac17372b6cdf19 | 1,311 | py | Python | _grains/mac_sp.py | mosen/salt-osx | 818d4ae89bb2853b28999a8ddb883c0fe1b1a657 | [
"MIT"
] | 68 | 2015-02-11T00:53:54.000Z | 2021-11-06T16:07:17.000Z | _grains/mac_sp.py | Jaharmi/salt-osx | f6db606f04846d45935f3ed729e6243441cee360 | [
"MIT"
] | 13 | 2016-01-05T00:01:34.000Z | 2022-03-18T23:44:21.000Z | _grains/mac_sp.py | Jaharmi/salt-osx | f6db606f04846d45935f3ed729e6243441cee360 | [
"MIT"
] | 19 | 2015-04-09T20:58:55.000Z | 2020-11-04T06:39:06.000Z | # -*- coding: utf-8 -*-
'''
Mac hardware information, generated by system_profiler.
This is a separate grains module because it has a dependency on plistlib.
'''
import logging
import salt.utils
import salt.modules.cmdmod
log = logging.getLogger(__name__)
__virtualname__ = 'mac_sp'
try:
import plistlib
has_libs = True
except ImportError:
has_libs = False
# Chicken and egg problem, SaltStack style
# __salt__ is already populated with grains by this stage.
cmdmod = {
'cmd.run': salt.modules.cmdmod._run_quiet,
# 'cmd.retcode': salt.modules.cmdmod._retcode_quiet,
'cmd.run_all': salt.modules.cmdmod._run_all_quiet
}
def _get_spdatatype(sp_data_type):
'''
Run system_profiler with a specific data type.
Running with all types slows down execution a bit, so be picky about what you need.
'''
output_plist = cmdmod['cmd.run']('system_profiler {0} -xml'.format(sp_data_type))
return output_plist
def hardware():
'''
Get general hardware information.
Provided by SPHardwareDataType (/System/Library/SystemProfiler/SPPlatformReporter.spreporter)
'''
sp_hardware = _get_spdatatype('SPHardwareDataType')
| 25.211538 | 97 | 0.723112 |