hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 11
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 3 251 | max_stars_repo_name stringlengths 4 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 251 | max_issues_repo_name stringlengths 4 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 251 | max_forks_repo_name stringlengths 4 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.05M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.04M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
6dde1212ba406b7fb0a629963b918fa2448f2579 | 2,533 | py | Python | pcwg/reporting/colour.py | lcameron05/PCWG | 8ae8ea7d644aa5bec0d1651101d83d8f17994f4b | [
"MIT"
] | 14 | 2015-01-15T12:40:51.000Z | 2019-06-14T16:10:08.000Z | pcwg/reporting/colour.py | lzhiwen3090/PCWG | 795e3ea267c7b87187dce04721c91a9d9c7999a7 | [
"MIT"
] | 121 | 2015-01-06T11:31:25.000Z | 2018-05-29T21:13:23.000Z | pcwg/reporting/colour.py | lzhiwen3090/PCWG | 795e3ea267c7b87187dce04721c91a9d9c7999a7 | [
"MIT"
] | 26 | 2015-01-15T12:41:09.000Z | 2019-04-11T14:45:32.000Z | import xlwt
| 32.474359 | 107 | 0.446901 |
6dde7cdccad8c45c7dc62586357016b68fc1ea28 | 2,044 | py | Python | mwlib/serve.py | h4ck3rm1k3/mwlib | 11475c6ad7480e35a4a59f276c47f6a5203435cc | [
"Unlicense"
] | 1 | 2019-04-27T20:14:53.000Z | 2019-04-27T20:14:53.000Z | mwlib/serve.py | h4ck3rm1k3/mwlib | 11475c6ad7480e35a4a59f276c47f6a5203435cc | [
"Unlicense"
] | null | null | null | mwlib/serve.py | h4ck3rm1k3/mwlib | 11475c6ad7480e35a4a59f276c47f6a5203435cc | [
"Unlicense"
] | null | null | null | #! /usr/bin/env python
"""WSGI server interface to mw-render and mw-zip/mw-post"""
import sys, os, time, re, shutil, StringIO
from hashlib import md5
from mwlib import myjson as json
from mwlib import log, _version
from mwlib.metabook import calc_checksum
log = log.Log('mwlib.serve')
collection_id_rex = re.compile(r'^[a-z0-9]{16}$')
def purge_cache(max_age, cache_dir):
"""Remove all subdirectories of cache_dir whose mtime is before now-max_age
@param max_age: max age of directories in seconds
@type max_age: int
@param cache_dir: cache directory
@type cache_dir: basestring
"""
now = time.time()
for path in get_collection_dirs(cache_dir):
for fn in os.listdir(path):
if now - os.stat(os.path.join(path, fn)).st_mtime > max_age:
break
else:
continue
try:
shutil.rmtree(path)
except Exception, exc:
log.ERROR('could not remove directory %r: %s' % (path, exc))
| 28.788732 | 114 | 0.629159 |
6ddf22f66b957c8246b1badd12845d6735c4bf0d | 118 | py | Python | src/passport/use_cases/__init__.py | clayman-micro/passport | 37aeb548560458cd4ba9bf9db551e360ad219b9c | [
"MIT"
] | null | null | null | src/passport/use_cases/__init__.py | clayman-micro/passport | 37aeb548560458cd4ba9bf9db551e360ad219b9c | [
"MIT"
] | 5 | 2020-09-15T15:03:33.000Z | 2021-11-26T08:35:10.000Z | src/passport/use_cases/__init__.py | clayman74/passport | 37aeb548560458cd4ba9bf9db551e360ad219b9c | [
"MIT"
] | null | null | null | from aiohttp import web
| 16.857143 | 53 | 0.661017 |
6ddf36ca544a3c8ccbf3d16260d57ec9db94a87c | 2,021 | py | Python | amap_distance_matrix/schemas/amap.py | Euraxluo/distance_matrix | 680e3147c263ea5f1abb26998aeb0b1985442a4b | [
"MIT"
] | 1 | 2022-03-15T06:47:36.000Z | 2022-03-15T06:47:36.000Z | amap_distance_matrix/schemas/amap.py | Euraxluo/distance_matrix | 680e3147c263ea5f1abb26998aeb0b1985442a4b | [
"MIT"
] | null | null | null | amap_distance_matrix/schemas/amap.py | Euraxluo/distance_matrix | 680e3147c263ea5f1abb26998aeb0b1985442a4b | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Time: 2022-03-01 15:43
# Copyright (c) 2022
# author: Euraxluo
from typing import *
from amap_distance_matrix.helper import haversine,format_loc
| 29.720588 | 94 | 0.573973 |
6ddf53b03344bb1f12510f5ef0a187bb6b91724f | 1,589 | py | Python | linear_regression.py | michael-golfi/comp551_assignment1 | a4eb73bae4280d5934165ee8b8400dd1c24e9758 | [
"MIT"
] | null | null | null | linear_regression.py | michael-golfi/comp551_assignment1 | a4eb73bae4280d5934165ee8b8400dd1c24e9758 | [
"MIT"
] | null | null | null | linear_regression.py | michael-golfi/comp551_assignment1 | a4eb73bae4280d5934165ee8b8400dd1c24e9758 | [
"MIT"
] | null | null | null | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import math
import csv
import datetime
AGE = "Age Category"
SEX = "Sex"
TIME = "Time"
OUTPUT_BASE = "output/"
COLS_X = [AGE, SEX, TIME, "TotalRaces"]
COLS_Y = [TIME]
#training data
TRAIN_X = "output/Y2X_train.csv"
TRAIN_Y = "output/Y2Y_train.csv"
#testing data
TEST_X = "output/Y2X_test.csv"
TEST_Y = "output/Y2Y_test.csv"
#Prediction data
PREDICTION = "output/PREDICTION.csv"
#Read preprocessed data
df_X_train = pd.read_csv(TRAIN_X, usecols=COLS_X)
df_Y_train = pd.read_csv(TRAIN_Y, usecols=COLS_Y)
#Read test data
df_X_test = pd.read_csv(TEST_X, usecols=COLS_X)
df_Y_test = pd.read_csv(TEST_Y, usecols=COLS_Y)
df_2017 = pd.read_csv(PREDICTION, usecols=COLS_X)
#Closed form solution: w = (XTX)^(-1)XTY
a = np.linalg.inv(np.dot(df_X_train.as_matrix().transpose(),df_X_train.as_matrix()))
b = np.dot(df_X_train.as_matrix().transpose(),df_Y_train.as_matrix())
w = np.dot(a,b)
y = np.dot(df_X_test, w)
print w
print abs((y-df_Y_test.as_matrix())).mean()
print (y-df_Y_test.as_matrix()).max()
a = abs(((y-df_Y_test.as_matrix())/df_Y_test.as_matrix())*100)
print a.max()
print a.mean()
c=0
for threshold in range(0,21):
for i in range(len(a)):
if a[i] < threshold:
c = c + 1
d = (float(c)/float(len(a)))*100
print threshold,d
c = 0
y_2017 = np.dot(df_2017,w)
new_time = list()
for i in range(len(y_2017)):
m, s = divmod(y_2017[i], 60)
h, m = divmod(m, 60)
new_time.append("%d:%02d:%02d" % (h, m, s))
pd.DataFrame(new_time).to_csv(OUTPUT_BASE + "LR_Results.csv")
| 17.086022 | 84 | 0.69163 |
6de2331479d616c60c982b16b354a172879db20e | 393 | py | Python | at_learner_core/at_learner_core/models/init_model.py | hieuvecto/CASIA-SURF_CeFA | 71dfd846ce968b3ed26974392a6e0c9b40aa12ae | [
"MIT"
] | 133 | 2020-03-03T03:58:04.000Z | 2022-03-28T21:42:36.000Z | at_learner_core/at_learner_core/models/init_model.py | lucaslu1987/CASIA-SURF_CeFA | 205d3d976523ed0c15d1e709ed7f21d50d7cf19b | [
"MIT"
] | 24 | 2020-03-13T09:30:09.000Z | 2022-03-22T07:47:15.000Z | at_learner_core/at_learner_core/models/init_model.py | lucaslu1987/CASIA-SURF_CeFA | 205d3d976523ed0c15d1e709ed7f21d50d7cf19b | [
"MIT"
] | 29 | 2020-03-10T06:46:45.000Z | 2022-01-29T15:35:21.000Z | from .wrappers import SimpleClassifierWrapper
| 32.75 | 73 | 0.750636 |
6de2eb54f1f884015cd25862ba629bbde92b8312 | 11,739 | py | Python | register.py | khvmaths/Register_UM_Crawl | 2741bfe9267e9ad068b438b27141cfc664f140f2 | [
"MIT"
] | null | null | null | register.py | khvmaths/Register_UM_Crawl | 2741bfe9267e9ad068b438b27141cfc664f140f2 | [
"MIT"
] | null | null | null | register.py | khvmaths/Register_UM_Crawl | 2741bfe9267e9ad068b438b27141cfc664f140f2 | [
"MIT"
] | null | null | null | from bs4 import BeautifulSoup
from urllib.request import Request,urlopen
from urllib.error import HTTPError
from PyQt5 import QtCore, QtGui, QtWidgets, Qt
import sys
import threading
import datetime
import win32con
import os
import struct
import time
import pyttsx3
from win32api import *
from win32gui import *
w=WindowsBalloonTip()
engine = pyttsx3.init()
if __name__ == '__main__':
main()
| 43.639405 | 164 | 0.597155 |
6de329be760fa541cce9f8961d309f42264a1df3 | 1,604 | py | Python | tests/utils.py | ofek/hatch-containers | dd57acc812db8e62994f2b00160a05292d5f35c1 | [
"MIT"
] | 3 | 2021-12-29T06:44:41.000Z | 2022-02-28T09:27:20.000Z | tests/utils.py | ofek/hatch-containers | dd57acc812db8e62994f2b00160a05292d5f35c1 | [
"MIT"
] | null | null | null | tests/utils.py | ofek/hatch-containers | dd57acc812db8e62994f2b00160a05292d5f35c1 | [
"MIT"
] | null | null | null | # SPDX-FileCopyrightText: 2021-present Ofek Lev <oss@ofek.dev>
#
# SPDX-License-Identifier: MIT
import subprocess
from textwrap import dedent as _dedent
import tomli
import tomli_w
| 30.846154 | 119 | 0.667082 |
6de4785de957dcd93698e538204a1309d3d31d03 | 881 | py | Python | Question Set 3 - (Functions)/Version 1/main.py | Randula98/Python-For-Beginners | e41a6014be882f01c6ccdcbe2167e2b581646eee | [
"MIT"
] | 6 | 2021-12-14T17:52:11.000Z | 2021-12-19T20:22:44.000Z | Question Set 3 - (Functions)/Version 1/main.py | GIHAA/Python-For-Beginners | e41a6014be882f01c6ccdcbe2167e2b581646eee | [
"MIT"
] | null | null | null | Question Set 3 - (Functions)/Version 1/main.py | GIHAA/Python-For-Beginners | e41a6014be882f01c6ccdcbe2167e2b581646eee | [
"MIT"
] | 2 | 2021-12-19T18:50:30.000Z | 2022-01-01T23:05:18.000Z |
#define calcIncrement function
#define calcTotalSalary function
#get user inputs for salary
salary = input("Enter Salary : ")
salary = float(salary)
#get user inputs for number of years worked
years = input("Enter no of years worked : ")
years = int(years)
#calculate the increment by passing the given values to the function
increment = calcIncrement(salary , years)
#calculate the total salary by passing the given values to the function
totalSalary = calcTotalSalary(salary , increment)
#display the increment and the total salary
print("Increment : " + str(increment))
print("Total Salary : " + str(totalSalary))
| 26.69697 | 71 | 0.732123 |
6de4cb3c7c7e948bd05ee2500418fd79816b080a | 438 | py | Python | rubin_sim/maf/mafContrib/LSSObsStrategy/__init__.py | RileyWClarke/flarubin | eb7b1ee21c828523f8a5374fe4510fe6e5ec2a2a | [
"MIT"
] | null | null | null | rubin_sim/maf/mafContrib/LSSObsStrategy/__init__.py | RileyWClarke/flarubin | eb7b1ee21c828523f8a5374fe4510fe6e5ec2a2a | [
"MIT"
] | null | null | null | rubin_sim/maf/mafContrib/LSSObsStrategy/__init__.py | RileyWClarke/flarubin | eb7b1ee21c828523f8a5374fe4510fe6e5ec2a2a | [
"MIT"
] | null | null | null | from .newDitherStackers import *
from .newDitherStackers import *
from .maskingAlgorithmGeneralized import *
from .saveBundleData_npzFormat import *
from .numObsMetric import *
from .galaxyCountsMetric_extended import *
from .galaxyCounts_withPixelCalibration import *
from .artificialStructureCalculation import *
from .almPlots import *
from .coaddM5Analysis import *
from .constantsForPipeline import *
from .os_bias_analysis import *
| 33.692308 | 48 | 0.835616 |
6de56b62032d39a0f8c492c5d736fc6926aeb427 | 2,576 | py | Python | setup.py | vomaufgang/publish | 6e610c055118f9761d49962a12d9095cf2936386 | [
"MIT"
] | 1 | 2019-08-19T01:45:29.000Z | 2019-08-19T01:45:29.000Z | setup.py | vomaufgang/publish | 6e610c055118f9761d49962a12d9095cf2936386 | [
"MIT"
] | 11 | 2019-08-18T09:31:10.000Z | 2021-01-27T19:02:53.000Z | setup.py | vomaufgang/publish | 6e610c055118f9761d49962a12d9095cf2936386 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# anited. publish - Python package with cli to turn markdown files into ebooks
# Copyright (c) 2014 Christopher Knrndel
#
# Distributed under the MIT License
# (license terms are at http://opensource.org/licenses/MIT).
"""Setup script for easy_install and pip."""
import sys
import codecs
import os.path
MIN_SUPPORTED_PYTHON_VERSION = (3, 6)
if sys.version_info < MIN_SUPPORTED_PYTHON_VERSION:
sys.exit('Sorry, Python < {} is not supported.'.format(
'.'.join(map(str, MIN_SUPPORTED_PYTHON_VERSION))
))
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
def read(rel_path):
"""Reads the contents of the file atthe relative path `rel_path`.
"""
here = os.path.abspath(os.path.dirname(__file__))
with codecs.open(os.path.join(here, rel_path), 'r') as file_:
return file_.read()
def get_version(rel_path):
"""Gets the version number declared in the `__version__` constant of
the Python file at `rel_path`.
"""
for line in read(rel_path).splitlines():
if line.startswith('__version__'):
delim = '"' if '"' in line else "'"
return line.split(delim)[1]
raise RuntimeError("Unable to find version string.")
README = open('README.md').read()
VERSION = get_version('publish/__init__.py')
REQUIREMENTS = open('requirements.txt').readlines()
DEV_REQUIREMENTS = open('dev-requirements.txt').readlines()[1:]
setup(
name='anited-publish',
version=VERSION,
description='Python package with command line interface to turn markdown '
'files into ebooks.',
long_description=README,
long_description_content_type='text/markdown',
author='Christopher Knrndel',
author_email='cknoerndel@anited.de',
url='https://gitlab.com/anited/publish',
packages=[
'publish',
],
package_data={
'publish': ['template.jinja', 'VERSION']
},
entry_points={
'console_scripts': [
'publish = publish.cli:main'
]
},
python_requires=">=3.6",
install_requires=REQUIREMENTS,
tests_require=DEV_REQUIREMENTS,
extras_require={
'dev': DEV_REQUIREMENTS
},
license="MIT",
zip_safe=False,
keywords='publish',
classifiers=[
'Intended Audience :: End Users/Desktop',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
],
)
| 28 | 78 | 0.65295 |
6de715d666507e3f3849eece5131f4cb5a8c80e7 | 1,187 | py | Python | main.py | brunofornazari/tcc | 57990d68ca196b4da7791faab717d67cfe5497d3 | [
"Unlicense"
] | null | null | null | main.py | brunofornazari/tcc | 57990d68ca196b4da7791faab717d67cfe5497d3 | [
"Unlicense"
] | null | null | null | main.py | brunofornazari/tcc | 57990d68ca196b4da7791faab717d67cfe5497d3 | [
"Unlicense"
] | null | null | null | """
main.py
Main.py responsvel por iniciar o processo o programa completamente, atravs de duas threads, uma para manter o
servidor de aplicao via flask para poder receber requisies e comunicar com o cliente, seus processos esto
detalhados em server.py e outra thread para manter o fluxo da aplicao, baseado no processo descrito em app.py.
Entre a inicializao de uma thread e outra, foi alocado um tempo de 3s de espera para que haja tempo hbil do
servidor de aplicao ativar seus servios antes do restante do processo comear a enviar requisies.
Para casos onde for iniciado atravs de uma mquina diferente de um raspberry pi, necessrio inserir uma varivel
de ambiente ENV_TYPE=DEV, para que as bilbiotecas exclusivas do microcomputador no sejam carregadas e causem erros
de importao, podendo ser ento iniciado e testado em outros tipos de computadores e sistemas operacionais em geral.
"""
import threading
import time
import server
import app
if __name__ == '__main__' :
threadMain = threading.Thread(target=app.main)
threadServer = threading.Thread(target=server.startServer)
threadServer.start()
time.sleep(3)
threadMain.start() | 40.931034 | 117 | 0.795282 |
6de7ad1350d5b902468a609df5d16498912264b6 | 1,347 | py | Python | examples/DGL/alagnn.py | dongzizhu/GraphGallery | c65eab42daeb52de5019609fe7b368e30863b4ae | [
"MIT"
] | 1 | 2020-07-29T08:00:32.000Z | 2020-07-29T08:00:32.000Z | examples/DGL/alagnn.py | dongzizhu/GraphGallery | c65eab42daeb52de5019609fe7b368e30863b4ae | [
"MIT"
] | null | null | null | examples/DGL/alagnn.py | dongzizhu/GraphGallery | c65eab42daeb52de5019609fe7b368e30863b4ae | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# coding: utf-8
import random
import math
import torch
import dgl
import graphgallery
from graphgallery.datasets import Planetoid
print("GraphGallery version: ", graphgallery.__version__)
print("PyTorch version: ", torch.__version__)
print("DGL version: ", dgl.__version__)
'''
Load Datasets
- cora/citeseer/pubmed
'''
data = Planetoid('cora', root="~/GraphData/datasets/", verbose=False)
graph = data.graph
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
# splits = data.split_nodes()
graphgallery.set_backend("dgl")
# experimental setup in
# `When Do GNNs Work: Understanding and Improving Neighborhood Aggregation
# <https://www.ijcai.org/Proceedings/2020/0181.pdf>`
random.seed(2020)
split = 0.01
n_nodes = graph.num_nodes
sample_size = math.ceil(n_nodes * split)
train_idx = random.sample(range(n_nodes - 1000), sample_size)
train_nodes = [idx if idx < 500 else idx + 1000 for idx in train_idx]
test_nodes = list(range(500, 1500))
from graphgallery.gallery.nodeclas import ALaGCN, ALaGAT
# trainer = ALaGAT(device=device, seed=123).setup_graph(graph).build()
trainer = ALaGCN(device=device, seed=123).setup_graph(graph).build()
trainer.fit(train_nodes, verbose=1)
results = trainer.evaluate(test_nodes)
print(f'Test loss {results.loss:.5}, Test accuracy {results.accuracy:.2%}')
| 32.071429 | 83 | 0.76095 |
6de7d2a8bf2473e87c45ac2498b9443dac0b4f4e | 125 | py | Python | packit_service/service/api/errors.py | majamassarini/packit-service | 12baf67799412c8fa56e2a821cd9d584e2437141 | [
"MIT"
] | 20 | 2019-05-24T12:33:05.000Z | 2020-07-28T06:03:57.000Z | packit_service/service/api/errors.py | majamassarini/packit-service | 12baf67799412c8fa56e2a821cd9d584e2437141 | [
"MIT"
] | 735 | 2019-05-15T11:52:36.000Z | 2020-08-02T23:21:44.000Z | packit_service/service/api/errors.py | majamassarini/packit-service | 12baf67799412c8fa56e2a821cd9d584e2437141 | [
"MIT"
] | 28 | 2019-05-16T13:32:03.000Z | 2020-07-29T10:23:54.000Z | # Copyright Contributors to the Packit project.
# SPDX-License-Identifier: MIT
| 17.857143 | 47 | 0.776 |
6dea6c9087b914af198d695841147682ba1f18e7 | 1,453 | py | Python | garden_test/setup.py | jad-b/garden | 44169c57fdaa08e0edd751d7459da99334e97323 | [
"MIT"
] | null | null | null | garden_test/setup.py | jad-b/garden | 44169c57fdaa08e0edd751d7459da99334e97323 | [
"MIT"
] | null | null | null | garden_test/setup.py | jad-b/garden | 44169c57fdaa08e0edd751d7459da99334e97323 | [
"MIT"
] | null | null | null | import subprocess
import os
from setuptools import setup, find_packages
setup(
name='garden_test',
version=latest_git_tag(),
long_description=readme(),
description='Python package for testing garden',
author='Jeremy Dobbins-Bucklad',
author_email='j.american.db@gmail.com',
url='https://github.com/jad-b/garden',
install_requires=requirements(),
packages = find_packages(),
package_dir = {'garden': 'garden_test'},
py_modules=['testfile'],
entry_points={
'garden.bump': ['garden_test = garden_test.bump:Bumper.bump'],
},
zip_safe=False,
include_package_data=True,
classifiers=(
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Natural Language :: English',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5'
),
)
| 26.907407 | 70 | 0.618032 |
6dea9c8146d56fa2b4a9cdd2da0bad21ab08f14c | 1,078 | py | Python | test/solutions/test_checkout.py | DPNT-Sourcecode/CHK-mxxq01 | 85be1ef3e87a6490d1490b64596de25d7e3cb60e | [
"Apache-2.0"
] | null | null | null | test/solutions/test_checkout.py | DPNT-Sourcecode/CHK-mxxq01 | 85be1ef3e87a6490d1490b64596de25d7e3cb60e | [
"Apache-2.0"
] | null | null | null | test/solutions/test_checkout.py | DPNT-Sourcecode/CHK-mxxq01 | 85be1ef3e87a6490d1490b64596de25d7e3cb60e | [
"Apache-2.0"
] | null | null | null | import unittest
from lib.solutions.checkout import checkout
if __name__ == '__main__':
unittest.main()
| 35.933333 | 59 | 0.646568 |
6deb927562ea3e9179ad55bccae3b4caee1a3331 | 338 | py | Python | libs/imagemetadata.py | epinux/labelImg | a4002eb771f9c82b703a3fcd0b554e0030bf24e7 | [
"MIT"
] | null | null | null | libs/imagemetadata.py | epinux/labelImg | a4002eb771f9c82b703a3fcd0b554e0030bf24e7 | [
"MIT"
] | null | null | null | libs/imagemetadata.py | epinux/labelImg | a4002eb771f9c82b703a3fcd0b554e0030bf24e7 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import sys
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
from libs.imagemetadata_ui import Ui_imagemetadata
| 18.777778 | 51 | 0.745562 |
6debe89876c11c370db73006de84c2358493d8ef | 19,992 | py | Python | test/coco_save.py | ZCDu/CenternessNet | 03f5d01999a4e1595eaceef9f62b4450ed017843 | [
"MIT"
] | null | null | null | test/coco_save.py | ZCDu/CenternessNet | 03f5d01999a4e1595eaceef9f62b4450ed017843 | [
"MIT"
] | null | null | null | test/coco_save.py | ZCDu/CenternessNet | 03f5d01999a4e1595eaceef9f62b4450ed017843 | [
"MIT"
] | null | null | null | import os
import cv2
import pdb
import json
import copy
import numpy as np
import torch
from PIL import Image, ImageDraw, ImageFont
import matplotlib.pyplot as plt
import matplotlib
import math
from tqdm import tqdm
from config import system_configs
from utils import crop_image, normalize_
from external.nms import soft_nms, soft_nms_merge
import pdb
colours = np.random.rand(80, 3)
| 42.626866 | 89 | 0.456633 |
6dec0625c10c417a63b087f29b4b429210299d04 | 752 | py | Python | lcd/interrupts/interrupt2.py | BornToDebug/homeStruction | 354e03c05cb363d8397d0e2d7afeb78a029266f9 | [
"Apache-2.0"
] | 6 | 2016-08-31T16:46:54.000Z | 2017-09-15T19:34:30.000Z | lcd/interrupts/interrupt2.py | BornToDebug/homeStruction | 354e03c05cb363d8397d0e2d7afeb78a029266f9 | [
"Apache-2.0"
] | 4 | 2016-09-02T09:18:41.000Z | 2016-09-02T09:24:08.000Z | lcd/interrupts/interrupt2.py | BornToDebug/homeStruction | 354e03c05cb363d8397d0e2d7afeb78a029266f9 | [
"Apache-2.0"
] | null | null | null | import RPi.GPIO as GPIO
GPIO.setmode(GPIO.BCM)
GPIO.setup(23, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.setup(24, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)
#now we'll define the threaded callback function
#this will run in another threadwhen our event is detected
raw_input("Press Enter when ready\n>")
GPIO.add_event_detect(24, GPIO.RISING, callback=my_callback, bouncetime=300)
try:
print "Waiting for falling edge on port 23"
GPIO.wait_for_edge(23, GPIO.FALLING)
print "Falling edge detected. Here endeth the second lesson."
except KetboardInterrupt:
GPIO.cleanup()
GPIO.cleanup()
| 30.08 | 76 | 0.772606 |
6dee89b3a35ae4ccdd9553002d458259723951b4 | 31,184 | py | Python | Apps/polls/Views/CourseArrangement.py | shadowofgost/WebEngineering | 693af827e3458806cdace959262cf393d29f6504 | [
"Apache-2.0"
] | 1 | 2021-04-05T05:40:17.000Z | 2021-04-05T05:40:17.000Z | Apps/polls/Views/CourseArrangement.py | shadowofgost/WebEngineering | 693af827e3458806cdace959262cf393d29f6504 | [
"Apache-2.0"
] | null | null | null | Apps/polls/Views/CourseArrangement.py | shadowofgost/WebEngineering | 693af827e3458806cdace959262cf393d29f6504 | [
"Apache-2.0"
] | null | null | null |
from django.http import HttpResponse
from django.db.models import Q
from drf_yasg.utils import swagger_auto_schema
from drf_yasg.openapi import Parameter, Schema, Response, TYPE_INTEGER, TYPE_OBJECT, TYPE_STRING, IN_QUERY
from json import dumps
from .. import models
from .Public import responses_success, responses_fail, get_request_args, data_page_response, content_type_tmp, post_search, put_success, put_error, post_error, data_base_error_specific, patch_success, patch_error, id_error, delete_schema
from rest_framework.views import APIView
from django.views.decorators.csrf import csrf_exempt
| 38.594059 | 455 | 0.536942 |
6def8fbc025a4ae631780ed754a16d15160b7b0b | 6,514 | py | Python | knx_stack/client/knxnet_ip_discovery.py | majamassarini/knx-stack | 11a9baac6b7600649b5fbca43c93b200b23676b4 | [
"MIT"
] | 2 | 2021-07-28T07:42:28.000Z | 2022-01-25T18:56:05.000Z | knx_stack/client/knxnet_ip_discovery.py | majamassarini/knx-stack | 11a9baac6b7600649b5fbca43c93b200b23676b4 | [
"MIT"
] | 6 | 2021-07-25T21:36:01.000Z | 2022-02-20T21:11:31.000Z | knx_stack/client/knxnet_ip_discovery.py | majamassarini/knx-stack | 11a9baac6b7600649b5fbca43c93b200b23676b4 | [
"MIT"
] | null | null | null | import struct
import socket
import asyncio
import logging
import knx_stack
if __name__ == "__main__":
import sys
root = logging.getLogger()
root.setLevel(logging.DEBUG)
handler = logging.StreamHandler(sys.stdout)
root.addHandler(handler)
loop = asyncio.get_event_loop()
if len(sys.argv):
transport1, _ = loop.run_until_complete(
loop.create_task(listen_discovery_responses(sys.argv[0], 5544))
)
transport2, _ = loop.run_until_complete(
loop.create_task(send_discovery_request(sys.argv[0], 5544))
)
try:
loop.run_forever()
except KeyboardInterrupt:
pass
print("Closing transport...")
transport1.close()
transport2.close()
loop.close()
| 34.648936 | 120 | 0.642462 |
6defac8f1015dd7e947197dcda96dec8473101d6 | 199 | py | Python | autodisc/representations/static/pytorchnnrepresentation/models/__init__.py | flowersteam/holmes | e38fb8417ec56cfde8142eddd0f751e319e35d8c | [
"MIT"
] | 6 | 2020-12-19T00:16:16.000Z | 2022-01-28T14:59:21.000Z | autodisc/representations/static/pytorchnnrepresentation/models/__init__.py | Evolutionary-Intelligence/holmes | e38fb8417ec56cfde8142eddd0f751e319e35d8c | [
"MIT"
] | null | null | null | autodisc/representations/static/pytorchnnrepresentation/models/__init__.py | Evolutionary-Intelligence/holmes | e38fb8417ec56cfde8142eddd0f751e319e35d8c | [
"MIT"
] | 1 | 2021-05-24T14:58:26.000Z | 2021-05-24T14:58:26.000Z | from autodisc.representations.static.pytorchnnrepresentation.models.encoders import EncoderBurgess
from autodisc.representations.static.pytorchnnrepresentation.models.decoders import DecoderBurgess
| 49.75 | 98 | 0.904523 |
6defdfc013df6a621f25fd5ffba934ad58dd3acd | 3,312 | py | Python | lights.py | team-7108/computer-vision-tutorials | cfb7e455b5d8bba8779c440907344d9763573f57 | [
"MIT"
] | 3 | 2018-09-12T02:56:46.000Z | 2020-11-13T13:48:44.000Z | lights.py | team-7108/computer-vision-tutorials | cfb7e455b5d8bba8779c440907344d9763573f57 | [
"MIT"
] | null | null | null | lights.py | team-7108/computer-vision-tutorials | cfb7e455b5d8bba8779c440907344d9763573f57 | [
"MIT"
] | 1 | 2020-11-13T13:48:45.000Z | 2020-11-13T13:48:45.000Z | # Import OpenCV module
import cv2
# Import numpy for array operations
import numpy as np
image = cv2.imread('images/five_cubes.jpeg')
# Show the image
cv2.imshow('Image',image)
# Resize the image if it is too big, also helps to speed up the processing
image = cv2.resize(image, (600, 600))
cv2.imshow('Resized Image',image)
# Equalizing histograms, we try to reduce the effect of light here
image = cv2.cvtColor(image,cv2.COLOR_BGR2YUV)
channel = cv2.split(image)
cv2.equalizeHist(channel[0], channel[0])
cv2.merge(channel,image)
image = cv2.cvtColor(image,cv2.COLOR_YUV2BGR)
cv2.imshow('Normalized Image',image)
# This is a dummy function needed for creating trackbars
# Create a window named 'Colorbars'
cv2.namedWindow('Colorbars')
# Assign strings for ease of coding
bh='Blue High'
bl='Blue Low'
gh='Green High'
gl='Green Low'
rh='Red High'
rl='Red Low'
wnd = 'Colorbars'
# Begin Creating trackbars for each BGR value
cv2.createTrackbar(bl, wnd, 0, 255, nothing)
cv2.createTrackbar(bh, wnd, 149, 255, nothing)
cv2.createTrackbar(gl, wnd, 156, 255, nothing)
cv2.createTrackbar(gh, wnd, 255, 255, nothing)
cv2.createTrackbar(rl, wnd, 199, 255, nothing)
cv2.createTrackbar(rh, wnd, 255, 255, nothing)
while True:
mergedImage = np.zeros((600,150,3), np.uint8)
# Split image into four pieces and merge again
for i in range(0,4):
resizedImage = image[0:600, i*150:(i+1)*150]
cv2.imshow("cropped", resizedImage)
bLow = cv2.getTrackbarPos(bl, wnd)
bHigh = cv2.getTrackbarPos(bh, wnd)
gLow = cv2.getTrackbarPos(gl, wnd)
gHigh = cv2.getTrackbarPos(gh, wnd)
rLow = cv2.getTrackbarPos(rl, wnd)
rHigh = cv2.getTrackbarPos(rh, wnd)
rgbLow=np.array([bLow,gLow,rLow])
rgbHigh=np.array([bHigh,gHigh,rHigh])
maskedImage = cv2.inRange(resizedImage, rgbLow, rgbHigh)
cv2.imshow('Masked Image', maskedImage)
kernel = np.ones((15,15),np.uint8)
# the first morphological transformation is called opening, it will sweep out extra lone pixels around the image
openedImage = cv2.morphologyEx(maskedImage, cv2.MORPH_OPEN, kernel)
cv2.imshow("Open Image", openedImage)
outImage = resizedImage.copy()
try:
contourImage, contours, hierarchy = cv2.findContours(openedImage.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnt = contours[0]
print(cnt) # contours are the points on the outline of the image
# bounding rectangle is the minimum rectangle that includes all the contours
# this bounding rectangle is perpendicular to image
x,y,w,h = cv2.boundingRect(cnt)
# We mark that bounding rectangle with green
cv2.rectangle(outImage,(x,y),(x+w,y+h),(255,0,0),4)
except:
pass
cv2.imshow("Bboxed",outImage)
mergedImage = np.concatenate((mergedImage,outImage), axis=1)
mergedImage = mergedImage[0:600, 150:750]
cv2.imshow("Merged",mergedImage)
keyPressed = cv2.waitKey(1) # Look for keys to be pressed
if keyPressed == 27: # if the key is ESC, check the ASCII table, 27 = ESC
break # Exit the loop
cv2.destroyAllWindows() # Destroy the windows and close the program
| 35.234043 | 128 | 0.679348 |
6df0403cfe638d0fa7c9fc0942bb17cdd38113df | 569 | py | Python | exastics/publish_github_api_releases.py | exastro-suite/exastics | de6193159943319333abc2688f543e7424810823 | [
"Apache-2.0"
] | null | null | null | exastics/publish_github_api_releases.py | exastro-suite/exastics | de6193159943319333abc2688f543e7424810823 | [
"Apache-2.0"
] | 1 | 2020-10-25T08:30:59.000Z | 2020-10-25T08:30:59.000Z | exastics/publish_github_api_releases.py | exastro-suite/exastics | de6193159943319333abc2688f543e7424810823 | [
"Apache-2.0"
] | 8 | 2020-10-09T13:11:08.000Z | 2021-11-04T06:26:27.000Z | import exastics.collect
import pathlib
import sys
import urllib.parse
if __name__ == '__main__':
github_account = sys.argv[1]
github_repository = sys.argv[2]
url_parts = (
'https',
'api.github.com',
urllib.parse.quote(f'/repos/{github_account}/{github_repository}/releases'),
'',
'',
''
)
headers = {
'Accept': 'application/vnd.github.v3+json'
}
output_dir = pathlib.PurePath(github_repository, 'github-releases')
exastics.collect.publish_api(url_parts, headers, output_dir)
| 21.074074 | 84 | 0.630931 |
6df0ee5285eb665d18e287fcf75e62d896c148dd | 1,471 | py | Python | cohesity_management_sdk/models/rpo_schedule.py | sachinthakare-cohesity/management-sdk-python | c95f67b7d387d5bab8392be43190e598280ae7b5 | [
"MIT"
] | null | null | null | cohesity_management_sdk/models/rpo_schedule.py | sachinthakare-cohesity/management-sdk-python | c95f67b7d387d5bab8392be43190e598280ae7b5 | [
"MIT"
] | null | null | null | cohesity_management_sdk/models/rpo_schedule.py | sachinthakare-cohesity/management-sdk-python | c95f67b7d387d5bab8392be43190e598280ae7b5 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2019 Cohesity Inc.
| 26.745455 | 81 | 0.633583 |
6df1040ee952e6ce1e567165568234bfbe1f725c | 5,834 | py | Python | Gh compilation files/text.py | ibois-epfl/Manis-timber-plate-joinery-solver | fecdb1dfe23348de261f034f85baf24ac396e8cc | [
"MIT"
] | 3 | 2021-10-19T11:55:59.000Z | 2022-02-04T15:29:04.000Z | Gh compilation files/text.py | ibois-epfl/Manis-timber-plate-joinery-solver | fecdb1dfe23348de261f034f85baf24ac396e8cc | [
"MIT"
] | null | null | null | Gh compilation files/text.py | ibois-epfl/Manis-timber-plate-joinery-solver | fecdb1dfe23348de261f034f85baf24ac396e8cc | [
"MIT"
] | null | null | null | """Export a text file."""
from ghpythonlib.componentbase import dotnetcompiledcomponent as component
import Grasshopper, GhPython
import System
import os
import datetime
__author__ = "Nicolas Rogeau"
__laboratory__ = "IBOIS, Laboratory for Timber Construction"
__university__ = "EPFL, Ecole Polytechnique Federale de Lausanne"
__funding__ = "NCCR Digital Fabrication, ETH Zurich"
__version__ = "2021.09"
| 38.130719 | 343 | 0.594961 |
6df14879b950933abacc38dea90c26ad6c6515c2 | 97 | py | Python | component_tests/tests/features/steps/__init__.py | Cobalt0s/sommelier | d64943a5d7be4ecdf08aa18e9f184b757e408425 | [
"MIT"
] | null | null | null | component_tests/tests/features/steps/__init__.py | Cobalt0s/sommelier | d64943a5d7be4ecdf08aa18e9f184b757e408425 | [
"MIT"
] | null | null | null | component_tests/tests/features/steps/__init__.py | Cobalt0s/sommelier | d64943a5d7be4ecdf08aa18e9f184b757e408425 | [
"MIT"
] | null | null | null | from sommelier.steps.response_processing import *
from sommelier.steps.event_processing import *
| 32.333333 | 49 | 0.85567 |
6df14ec0665b31a613e368f74d43196adfd0df56 | 877 | py | Python | setup.py | anthonytw/dutyroll | 489dd452ba614a2214756eba0831b33111187225 | [
"MIT"
] | 2 | 2019-01-22T20:44:03.000Z | 2019-11-30T07:59:32.000Z | setup.py | anthonytw/dutyroll | 489dd452ba614a2214756eba0831b33111187225 | [
"MIT"
] | null | null | null | setup.py | anthonytw/dutyroll | 489dd452ba614a2214756eba0831b33111187225 | [
"MIT"
] | null | null | null | import sys
from packaging.version import LegacyVersion
from skbuild.exceptions import SKBuildError
from skbuild.cmaker import get_cmake_version
from skbuild import setup
setup_requires = []
# Require pytest-runner only when running tests.
if any(arg in sys.argv for arg in ('pytest', 'test')):
setup_requires.append('pytest-runner>=2.0')
# Add CMake as a build requirement if cmake is not installed or is too low a version.
try:
if LegacyVersion(get_cmake_version()) < LegacyVersion('3.10'):
setup_requires.append('cmake')
except SKBuildError:
setup_requires.append('cmake')
setup(
name='dutyroll',
version='1.0.1',
description='Parallel implementation of rolling window duty cycle.',
author='"Anthony Wertz"<awertz@cmu.edu>',
license='MIT',
packages=['dutyroll'],
tests_require=['pytest'],
setup_requires=setup_requires
)
| 28.290323 | 85 | 0.733181 |
6df1e73647be403745ebe4c69e672889f9a73f91 | 162 | py | Python | obdlive/obd/urls.py | hoke-t/OBDLive | 524fb53fad5924b8371d2fce8d7a482bd8112362 | [
"MIT"
] | 8 | 2018-12-15T16:41:21.000Z | 2021-10-03T21:19:11.000Z | obdlive/obd/urls.py | hoke-t/OBDLive | 524fb53fad5924b8371d2fce8d7a482bd8112362 | [
"MIT"
] | null | null | null | obdlive/obd/urls.py | hoke-t/OBDLive | 524fb53fad5924b8371d2fce8d7a482bd8112362 | [
"MIT"
] | 1 | 2020-07-27T18:15:58.000Z | 2020-07-27T18:15:58.000Z | from django.urls import path
from . import views
urlpatterns = [
path('', views.dashboard, name='dashboard'),
path('dtcs/', views.dtcs, name='dtcs'),
]
| 18 | 48 | 0.654321 |
6df2b3c71a785d6478f03f2023bb542307a17b8f | 1,195 | py | Python | crits/locations/forms.py | dutrow/crits | 6b357daa5c3060cf622d3a3b0c7b41a9ca69c049 | [
"MIT"
] | 738 | 2015-01-02T12:39:55.000Z | 2022-03-23T11:05:51.000Z | crits/locations/forms.py | deadbits/crits | 154097a1892e9d3960d6faaed4bd2e912a196a47 | [
"MIT"
] | 605 | 2015-01-01T01:03:39.000Z | 2021-11-17T18:51:07.000Z | crits/locations/forms.py | deadbits/crits | 154097a1892e9d3960d6faaed4bd2e912a196a47 | [
"MIT"
] | 316 | 2015-01-07T12:35:01.000Z | 2022-03-30T04:44:30.000Z | from django import forms
from crits.locations.location import Location
from crits.core.handlers import get_item_names
| 34.142857 | 73 | 0.650209 |
6df4ba8add8eb7e8c911008f72f03e4dab32f5ab | 3,641 | py | Python | utils/utils_preprocess_v3.py | microsoft/normalized_trend_filtering | eb73f124243dfc3dc610abba35a3ad1a6303a227 | [
"MIT"
] | 2 | 2021-09-06T14:04:17.000Z | 2021-11-09T11:55:10.000Z | utils/utils_preprocess_v3.py | microsoft/normalized_trend_filtering | eb73f124243dfc3dc610abba35a3ad1a6303a227 | [
"MIT"
] | null | null | null | utils/utils_preprocess_v3.py | microsoft/normalized_trend_filtering | eb73f124243dfc3dc610abba35a3ad1a6303a227 | [
"MIT"
] | 1 | 2021-11-10T11:44:36.000Z | 2021-11-10T11:44:36.000Z | import pandas as pd
import numpy as np
import sys
import os
import itertools
import pandas as pd
import os
from tqdm import tqdm_notebook, tnrange
import numpy as np
import networkx as nx
import seaborn as sns
import matplotlib.pyplot as plt
from scipy.optimize import minimize
import scipy
from sklearn import linear_model
from sklearn.preprocessing import StandardScaler
import cvxpy as cp
from scipy.sparse import csr_matrix, vstack, hstack
from copy import deepcopy
module_path = os.path.abspath(os.path.join('..'))
def getReducedGraph(sample_nodes, graph_nodes,
interactome):
'''
Reduce graph with only intersection nodes from sample and
interactome.
'''
#find intersection between sample nodes and graph nodes
sample_nodes = set(sample_nodes)
graph_nodes = set(graph_nodes)
intersection_nodes = sample_nodes.intersection(graph_nodes)
print('Number of Intersection Nodes : ', len(intersection_nodes))
g = []
for line in tqdm_notebook(range(len(interactome))):
if (interactome.iloc[line]['node1'] in intersection_nodes
and interactome.iloc[line]['node2'] in intersection_nodes):
g.append(interactome.iloc[line])
return pd.DataFrame(g)
def getNodeCharacterization(g, sample_nodes):
'''
Characterizes nodes based on if node is connected or orphan
'''
connected_nodes = set(g.nodes())
orphan_nodes = set(sample_nodes) - connected_nodes
return connected_nodes, orphan_nodes
def getDataSorting(connected_nodes, sample_df):
'''
Sorts covariant matrix such that connected nodes are first
followed by orphan nodes and nodes not in interactome
'''
sample_df_sorted = deepcopy(sample_df)
sample_df_sorted['IN_INTERACTOME'] = sample_df["node"].isin(list(connected_nodes)).tolist()
sample_df_sorted = sample_df_sorted.sort_values(by="IN_INTERACTOME", ascending=False).reset_index(drop=True)
#get dictionary to map node to number
num_to_node = {}
for i,nod in enumerate(sample_df_sorted['node'].tolist()):
num_to_node[i] = nod
#get ordered list of nodes in interactome
ordered_nodelist = sample_df_sorted.loc[sample_df_sorted['IN_INTERACTOME'] == True]['node'].tolist()
#delete 'IN_INTERACTOME' column
sample_df_sorted = sample_df_sorted.drop(columns = ['IN_INTERACTOME', 'node'])
return sample_df_sorted, ordered_nodelist, num_to_node
def getLaplacian(g, ordered_nodelist, orphan_nodes):
'''
Calculates laplacian matrix with respect to ordering of
covariant matrix
'''
L_norm = nx.normalized_laplacian_matrix(g, nodelist = ordered_nodelist, weight = 'confidence')
L = nx.laplacian_matrix(g, nodelist = ordered_nodelist, weight = 'confidence')
return csr_matrix(scipy.linalg.block_diag(L.todense(),np.eye(len(orphan_nodes)))), \
csr_matrix(scipy.linalg.block_diag(L_norm.todense(),np.eye(len(orphan_nodes))))
| 31.119658 | 109 | 0.762703 |
6df5916ec657908f3c7be4eae54758a97075100c | 791 | py | Python | server.py | marwano/remoterobot | 80409bde8e20de2b9fe97a8f214295aa5290decd | [
"BSD-3-Clause"
] | 1 | 2019-05-26T10:41:07.000Z | 2019-05-26T10:41:07.000Z | server.py | marwano/remoterobot | 80409bde8e20de2b9fe97a8f214295aa5290decd | [
"BSD-3-Clause"
] | 1 | 2018-02-28T23:47:23.000Z | 2018-02-28T23:47:23.000Z | server.py | marwano/remoterobot | 80409bde8e20de2b9fe97a8f214295aa5290decd | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python3
import tornado.ioloop
import tornado.web
import json
import logging
from uf.wrapper.swift_api import SwiftAPI
if __name__ == '__main__':
main()
| 25.516129 | 77 | 0.667509 |
6df62870aa4daf08157f0c702682542e2f8979fe | 2,765 | py | Python | open/core/betterself/serializers/daily_productivity_log_serializers.py | lawrendran/open | d136f694bafab647722c78be6f39ec79d589f774 | [
"MIT"
] | 105 | 2019-06-01T08:34:47.000Z | 2022-03-15T11:48:36.000Z | open/core/betterself/serializers/daily_productivity_log_serializers.py | lawrendran/open | d136f694bafab647722c78be6f39ec79d589f774 | [
"MIT"
] | 111 | 2019-06-04T15:34:14.000Z | 2022-03-12T21:03:20.000Z | open/core/betterself/serializers/daily_productivity_log_serializers.py | lawrendran/open | d136f694bafab647722c78be6f39ec79d589f774 | [
"MIT"
] | 26 | 2019-09-04T06:06:12.000Z | 2022-01-03T03:40:11.000Z | from rest_framework.exceptions import ValidationError
from rest_framework.fields import DateField, ChoiceField, CharField
from open.core.betterself.constants import (
BETTERSELF_LOG_INPUT_SOURCES,
WEB_INPUT_SOURCE,
)
from open.core.betterself.models.daily_productivity_log import DailyProductivityLog
from open.core.betterself.serializers.mixins import (
BaseCreateUpdateSerializer,
BaseModelReadSerializer,
)
from open.core.betterself.serializers.validators import ModelValidatorsMixin
from open.utilities.date_and_time import (
format_datetime_to_human_readable,
yyyy_mm_dd_format_1,
)
| 31.420455 | 88 | 0.654973 |
6df6536077ead8b5315b04728d254740d08d1ea8 | 5,423 | py | Python | twitch_bot.py | VDK45/Arkanoid_audio_command_for_twitch | 178a30fe85d3db5b4da127ee3de0c60dc8b0873d | [
"MIT"
] | null | null | null | twitch_bot.py | VDK45/Arkanoid_audio_command_for_twitch | 178a30fe85d3db5b4da127ee3de0c60dc8b0873d | [
"MIT"
] | null | null | null | twitch_bot.py | VDK45/Arkanoid_audio_command_for_twitch | 178a30fe85d3db5b4da127ee3de0c60dc8b0873d | [
"MIT"
] | null | null | null | import random
import cfg
import utils
import socket
import re
import time
from time import sleep
import sys
try:
file_c = open('chanel.txt', 'r', encoding='utf-8')
CHANEL = file_c.read()
print(f'chanel = {CHANEL}')
file_c.close()
except IOError as err:
print('Please enter your CHANEL!')
print(err)
try:
file_P = open('password.txt', 'r', encoding='utf-8')
PASSWORD = file_P.read()
print(f'password = {PASSWORD}')
file_P.close()
except IOError as err:
print('Please enter your PASSWORD!')
print(err)
command = ''
message = 'Hello world!'
chater = 'VDK_45'
loop_true = True
lst_chat = ['VDK45', 'Hello world!', 'VDK45', 'ARKANOID', 'VDK45', 'This is my first project', 'VDK45', 'Python']
sound = False
| 32.088757 | 116 | 0.491425 |
6df6c6b55d592803eb12aa9541c59df303c76397 | 155 | py | Python | app/core/urls.py | kmnkit/web-todo | e06e42f5b68b2b9473fad820857634a9c5c0dadf | [
"MIT"
] | null | null | null | app/core/urls.py | kmnkit/web-todo | e06e42f5b68b2b9473fad820857634a9c5c0dadf | [
"MIT"
] | null | null | null | app/core/urls.py | kmnkit/web-todo | e06e42f5b68b2b9473fad820857634a9c5c0dadf | [
"MIT"
] | null | null | null | from django.urls import path
from teams.views import TeamListView
app_name = "core"
urlpatterns = [
path("", TeamListView.as_view(), name="home"),
]
| 17.222222 | 50 | 0.709677 |
6df927163bf069ad2144fb5439fa950c5da79469 | 1,409 | py | Python | Sources/Mavsdk/proto/pb_plugins/setup.py | obe711/MAVSDK-Swift | 3ed35bbb57754824f8235f9acf828c73cc10b72b | [
"BSD-3-Clause"
] | null | null | null | Sources/Mavsdk/proto/pb_plugins/setup.py | obe711/MAVSDK-Swift | 3ed35bbb57754824f8235f9acf828c73cc10b72b | [
"BSD-3-Clause"
] | null | null | null | Sources/Mavsdk/proto/pb_plugins/setup.py | obe711/MAVSDK-Swift | 3ed35bbb57754824f8235f9acf828c73cc10b72b | [
"BSD-3-Clause"
] | null | null | null | import os
import subprocess
import sys
from distutils.command.build import build
from distutils.spawn import find_executable
from setuptools import setup
def parse_requirements(filename):
"""
Helper which parses requirement_?.*.txt files
:param filename: relative path, e.g. `./requirements.txt`
:returns: List of requirements
"""
# Get absolute filepath
filepath = os.path.join(os.getcwd(), filename)
# Check if file exists
if not os.path.exists(filepath):
print("[!] File {} not found".format(filename))
return []
# Parse install requirements
with open(filepath, encoding="utf-8") as f:
return [requires.strip() for requires in f.readlines()]
setup(
name="protoc-gen-mavsdk",
version="1.0.1",
description="Protoc plugin used to generate MAVSDK bindings",
url="https://github.com/mavlink/MAVSDK-Proto",
maintainer="Jonas Vautherin, Julian Oes",
maintainer_email="jonas.vautherin@gmail.com, julian@oes.ch",
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"License :: OSI Approved :: BSD License",
],
packages=["protoc_gen_mavsdk"],
install_requires=parse_requirements("requirements.txt"),
entry_points={
"console_scripts": [
"protoc-gen-mavsdk= protoc_gen_mavsdk.__main__:main"
]
}
)
| 27.627451 | 65 | 0.66785 |
6df9c4e9ea68c87b17ca398c46b4ce7c864d776d | 109 | py | Python | src/osms/tts_modules/synthesizer/data/__init__.py | adasegroup/OSM---one-shot-multispeaker | 90c1bbea4db1d49667fcfecb51676ee3281f9458 | [
"MIT"
] | 12 | 2021-05-31T21:09:23.000Z | 2022-01-30T03:48:10.000Z | src/osms/tts_modules/synthesizer/data/__init__.py | adasegroup/OSM---one-shot-multispeaker | 90c1bbea4db1d49667fcfecb51676ee3281f9458 | [
"MIT"
] | null | null | null | src/osms/tts_modules/synthesizer/data/__init__.py | adasegroup/OSM---one-shot-multispeaker | 90c1bbea4db1d49667fcfecb51676ee3281f9458 | [
"MIT"
] | 6 | 2021-05-13T20:28:19.000Z | 2021-09-28T10:24:31.000Z | from .preprocess import SynthesizerPreprocessor
from .dataset import SynthesizerDataset, collate_synthesizer
| 36.333333 | 60 | 0.889908 |
6df9c88395294e4e4698984383fb2662d33b928e | 2,617 | py | Python | qtum_bridge/R8Blockchain/ethereumblockchain.py | Robin8Put/pmes | 338bec94162098f05b75bad035417317e1252fd2 | [
"Apache-2.0"
] | 5 | 2018-07-31T07:37:09.000Z | 2019-05-27T04:40:38.000Z | eth_bridge/R8Blockchain/ethereumblockchain.py | Robin8Put/pmes | 338bec94162098f05b75bad035417317e1252fd2 | [
"Apache-2.0"
] | 4 | 2018-08-01T11:11:54.000Z | 2022-03-11T23:20:53.000Z | qtum_bridge/R8Blockchain/ethereumblockchain.py | Robin8Put/pmes | 338bec94162098f05b75bad035417317e1252fd2 | [
"Apache-2.0"
] | 5 | 2018-06-09T07:42:04.000Z | 2018-12-28T21:15:52.000Z | from web3 import Web3, IPCProvider, HTTPProvider
from web3.middleware import geth_poa_middleware
from R8Blockchain.blockchain_handler import BlockchainHandler
from hashlib import sha256
import codecs
from web3.contract import ConciseContract
if __name__ == '__main__':
eth_blockchain = EthereumBlockchain.from_ipc_path(ipc_path='/home/artem/.ethereum/rinkeby/geth.ipc')
res = eth_blockchain.get_last_block_hash()
print(eth_blockchain.get_block_count())
res = eth_blockchain.get_block_hash(1200)
print(res)
print(eth_blockchain.get_balance())
print(eth_blockchain.get_unspent())
| 30.788235 | 104 | 0.704241 |
6dfaa0aa70efb8bfb33be6b88963dc80e3e7eb1f | 73 | py | Python | vis/test.py | Nathansong/OpenPCDdet-annotated | 2ca2239f3cd5cba8308b1be0744d541be4ff5093 | [
"Apache-2.0"
] | 4 | 2022-02-22T01:18:25.000Z | 2022-03-28T13:30:11.000Z | vis/test.py | Nathansong/OpenPCDdet-annotated | 2ca2239f3cd5cba8308b1be0744d541be4ff5093 | [
"Apache-2.0"
] | 1 | 2022-03-02T03:42:52.000Z | 2022-03-02T03:42:52.000Z | vis/test.py | Nathansong/OpenPCDdet-annotated | 2ca2239f3cd5cba8308b1be0744d541be4ff5093 | [
"Apache-2.0"
] | null | null | null |
path = "/home/nathan/OpenPCDet/data/kitti/training/velodyne/000000.bin" | 24.333333 | 71 | 0.780822 |
6dfb865d03b79b2e933642d474e469577e44cc93 | 690 | py | Python | count.py | sunray97/countrows_excel | 7a95e0f6901051942615c6c16d15fee8e6fd4ded | [
"MIT"
] | null | null | null | count.py | sunray97/countrows_excel | 7a95e0f6901051942615c6c16d15fee8e6fd4ded | [
"MIT"
] | null | null | null | count.py | sunray97/countrows_excel | 7a95e0f6901051942615c6c16d15fee8e6fd4ded | [
"MIT"
] | null | null | null | import xlrd
import os
import sys
# rootdir = 'D://code/electric/'
rootdir = sys.argv[1]
xlrd.Book.encoding = "gbk"
sumnum=0
filenum = 0
list = os.listdir(rootdir) #
for i in range(0,len(list)):
path = os.path.join(rootdir,list[i])
if os.path.isfile(path):
print(''+path)
data = xlrd.open_workbook(path)
table = data.sheet_by_index(0)
# table = data.sheet_by_name(u'Sheet1')
nrows = table.nrows
data.release_resources()
sumnum=sumnum+nrows
filenum=filenum+1
print('-------------------------------------------------------------------------')
print('%d'%filenum)
print('%d'%sumnum)
| 28.75 | 90 | 0.571014 |
6dfbe53d06b44a62a35e17a43905a5b258b2a411 | 1,442 | py | Python | 0_mesh2html/preprocess_segments.py | ygCoconut/volume2stl | bd95fc39620afd21ce08c8c805ac213583d9daaa | [
"MIT"
] | null | null | null | 0_mesh2html/preprocess_segments.py | ygCoconut/volume2stl | bd95fc39620afd21ce08c8c805ac213583d9daaa | [
"MIT"
] | null | null | null | 0_mesh2html/preprocess_segments.py | ygCoconut/volume2stl | bd95fc39620afd21ce08c8c805ac213583d9daaa | [
"MIT"
] | null | null | null | '''
0 Preprocess segments:
-
- specify segments you want to process
- dilate slightly the segments
- create mask for dilation.
- np.unique(my_masked_id) --> select only part with biggest uc
- eliminates ouliers too disconnected/far from main structure
'''
import numpy as np
import h5py
from scipy.ndimage import binary_dilation, label
from tqdm import tqdm
if __name__=='__main__':
print('start')
segpath = '/n/pfister_lab2/Lab/donglai/mito/db/30um_human/seg_64nm.h5'
savepath = '/n/pfister_lab2/Lab/nils/snowproject/seg_64nm_maindendrite.h5'
seg = h5py.File(segpath, 'r')
seg = np.array(seg['main'], np.uint32) # x y z
dendrite_ids = np.loadtxt('seg_spiny_v2.txt', int)
for i, did in enumerate(tqdm(dendrite_ids)):
# dil = binary_dilation(seg==did)*did
# find all components of the dendrite, tolerate tiny gaps
s = np.ones((3, 3, 3), int)
dil, nf = label((seg==did)*did, structure=s)
# find main component
ui, uc = np.unique(dil, return_counts=True)
uc = uc[ui>0]; ui = ui[ui>0]
max_id = ui[np.argmax(uc)]
# remove non-main components from segmentation
seg[seg==did] = 0
seg[dil==max_id] = did
writeh5_file(seg, savepath)
print('start')
| 27.730769 | 78 | 0.640777 |
6dfdee78a36f76a22a8222a5f71ca90b9c824b58 | 2,665 | py | Python | branch/runner.py | sahibsin/Pruning | acc1db31c19c8b23599950cec4fe6399513ed306 | [
"MIT"
] | null | null | null | branch/runner.py | sahibsin/Pruning | acc1db31c19c8b23599950cec4fe6399513ed306 | [
"MIT"
] | null | null | null | branch/runner.py | sahibsin/Pruning | acc1db31c19c8b23599950cec4fe6399513ed306 | [
"MIT"
] | null | null | null | # Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
from dataclasses import dataclass
import sys
from cli import arg_utils
from foundations.runner import Runner
from branch import registry
| 36.013514 | 119 | 0.643152 |
6dff005711decae58a77ac5da887759206c11424 | 946 | py | Python | wulinfeng/L3/WordDic/AQICity.py | qsyPython/Python_play_now | 278b6d5d30082f8f93b26902c854737c4919405a | [
"MIT"
] | 2 | 2018-03-29T08:26:17.000Z | 2019-06-17T10:56:19.000Z | wulinfeng/L3/WordDic/AQICity.py | qsyPython/Python_play_now | 278b6d5d30082f8f93b26902c854737c4919405a | [
"MIT"
] | 1 | 2022-03-22T20:26:08.000Z | 2022-03-22T20:26:08.000Z | wulinfeng/L3/WordDic/AQICity.py | qsyPython/Python_play_now | 278b6d5d30082f8f93b26902c854737c4919405a | [
"MIT"
] | 1 | 2019-02-18T10:44:20.000Z | 2019-02-18T10:44:20.000Z | import requests # requests
from bs4 import BeautifulSoup
import urllib.error
import re | 32.62069 | 64 | 0.524313 |
6dff00eda6e7e13b33088c5ae46ed97a3a4cc3ce | 1,464 | py | Python | setup.py | hiradyazdan/nginx-amplify-agent-health-check | 7aa0fa2aba082491b1b47c2b6189a9266245f647 | [
"MIT"
] | 2 | 2018-05-23T17:34:28.000Z | 2018-07-09T21:55:53.000Z | setup.py | hiradyazdan/nginx-amplify-agent-health-check | 7aa0fa2aba082491b1b47c2b6189a9266245f647 | [
"MIT"
] | null | null | null | setup.py | hiradyazdan/nginx-amplify-agent-health-check | 7aa0fa2aba082491b1b47c2b6189a9266245f647 | [
"MIT"
] | null | null | null | from setuptools import setup
classifiers = [
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: POSIX'
] + [
('Programming Language :: Python :: %s' % x)
for x in '2.7'.split()
]
test_requirements = [
'pytest',
'pytest-cov',
'coveralls',
'mock',
'numpy',
# Only their Exceptions
'setuptools',
'psutil',
'requests'
]
with open('README.rst', 'r') as f:
long_description = f.read()
setup(
name='nginx-amplify-agent-health-check',
version='0.1.6',
description='Static and Dynamic Analysis for nginx-amplify-agent Health Status',
long_description=long_description,
url='https://github.com/hiradyazdan/nginx-amplify-agent-health-check',
author='Hirad Yazdanpanah',
author_email='hirad.y@gmail.com',
license='MIT',
platforms=["linux"],
packages=['amplifyhealthcheck'],
entry_points={
'console_scripts': [
'amphc=amplifyhealthcheck.cli:init_cli'
]
},
classifiers=classifiers,
keywords="nginx amplify nginx-amplify nginx-configuration health-check metrics",
install_requires=[
'psutil',
'setuptools',
'ntplib',
'crossplane',
'requests'
],
setup_requires=['pytest-runner'],
tests_require=test_requirements,
extras_require={
'test': test_requirements,
},
python_requires='==2.7.*',
zip_safe=False
)
| 24 | 84 | 0.623634 |
6dff73cffaed94db5cb5e75fb80b3961ad0f9146 | 1,005 | py | Python | pybatfish/datamodel/aspath.py | li-ch/pybatfish | d406f87a2bdd8d3beb92dc1baa9a5c8d63391879 | [
"Apache-2.0"
] | 1 | 2019-05-09T13:00:39.000Z | 2019-05-09T13:00:39.000Z | pybatfish/datamodel/aspath.py | li-ch/pybatfish | d406f87a2bdd8d3beb92dc1baa9a5c8d63391879 | [
"Apache-2.0"
] | null | null | null | pybatfish/datamodel/aspath.py | li-ch/pybatfish | d406f87a2bdd8d3beb92dc1baa9a5c8d63391879 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# Copyright 2018 The Batfish Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| 35.892857 | 76 | 0.664677 |
6dffaf2548225e608c4b2975db6390a9dca03d10 | 2,849 | py | Python | sherlock_scripts/pythonhops/sherlock_combine_restarts.py | apoletayev/anomalous_ion_conduction | badb91e971e4a5263a433cfa9fcbf914d53ed2a1 | [
"MIT"
] | 2 | 2021-05-20T03:49:51.000Z | 2021-06-21T08:41:10.000Z | sherlock_scripts/pythonhops/sherlock_combine_restarts.py | apoletayev/anomalous_ion_conduction | badb91e971e4a5263a433cfa9fcbf914d53ed2a1 | [
"MIT"
] | null | null | null | sherlock_scripts/pythonhops/sherlock_combine_restarts.py | apoletayev/anomalous_ion_conduction | badb91e971e4a5263a433cfa9fcbf914d53ed2a1 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Sep 3 01:41:52 2020
Combines LAMMPS output files coming from a series of restarts with a * wildcard.
This works on expanded (mode scalar) fixes from LAMMPS where each line is a time.
The overlapping values of times due to restarts are averaged, but they should be identical.
Required command-line args : filenames= ,
Optional command-line args : file_out= ,
@author: andreypoletaev
"""
# =============================================================================
# %% Imports and constants
# =============================================================================
import pandas as pd
import sys
from glob import glob
# =============================================================================
# %% parse input and combine
# =============================================================================
## Parse inputs. Format: key=value
options = dict([ (x.split('=')[0],x.split('=')[1]) for x in sys.argv[1:] ])
keys = list(options.keys())
# print(options)
assert 'filenames' in keys, 'please pass filenames=... [path] as command-line option'
# template = f'/*_vacf_{int(options["duration"])}ps.csv' if 'template' not in keys else options['template']
file_out = options['filenames'].replace('*','') if 'file_out' not in keys else options['file_out']
print('looking for files that look like this: '+options['filenames'], flush=True)
output = pd.DataFrame()
counter = 0
files_to_combine = sorted(glob(options['filenames']))
assert len(files_to_combine) > 1, 'Only one file fits the bill, skipping combining.'
print(files_to_combine, flush=True)
for fin in files_to_combine:
try:
## read the header for column names
fp = open(fin, 'r')
line1 = fp.readline()
line2 = fp.readline()
fp.close()
colnames = line2[:-1].split(' ')[1:]
## read the actual numbers
df = pd.read_csv(fin, skiprows=1, sep=' ')
# colnames = df.iloc[0,1:-1].tolist()
df = df.iloc[:, :-1]
df.columns = colnames
df = df.apply(pd.to_numeric)
# print(df.columns)
# print(df.head(5))
# print(df.dtypes)
# print(df.head())
if len(df) > 0:
output = output.append(df, ignore_index=True)
counter += 1
print(f'appended data from file #{counter} : {fin}', flush=True)
except: print(f'could not load / add {fin}', flush=True)
## ensemble-average in all cases - but not always the first thing
output = output.groupby('TimeStep').agg('mean').reset_index().rename(columns={'TimeStep':line1[:-1]+'\n# '+'TimeStep'})
# output.TimeStep = output.TimeStep.astype(int)
## write file normally
output.to_csv(file_out, index=False, float_format='%.6g', sep=' ') | 32.011236 | 119 | 0.562654 |
a30096d1e64ea75464caa5c47e07ed034748bbc2 | 3,680 | py | Python | src/imephu/utils.py | saltastroops/imephu | 0c302a73d01fe3ad018e7adf4b91e0beaecc6709 | [
"MIT"
] | null | null | null | src/imephu/utils.py | saltastroops/imephu | 0c302a73d01fe3ad018e7adf4b91e0beaecc6709 | [
"MIT"
] | 3 | 2022-02-02T20:51:05.000Z | 2022-02-03T21:13:27.000Z | src/imephu/utils.py | saltastroops/imephu | 0c302a73d01fe3ad018e7adf4b91e0beaecc6709 | [
"MIT"
] | null | null | null | from dataclasses import dataclass
from datetime import datetime
from typing import List, Optional
from astropy.coordinates import SkyCoord
def mid_position(start: SkyCoord, end: SkyCoord) -> SkyCoord:
"""Return the mid position between a start and end position on the sky.
The mid position is the mid position on the great arc between the start and end
position.
Taken from https://github.com/astropy/astropy/issues/5766.
Parameters
----------
start: `~astropy.coordinates.SkyCoords`
Start position on the sky.
end: `~astropy.coordinates.SkyCoords`
End position on the sky.
"""
pa = start.position_angle(end)
separation = start.separation(end)
return start.directional_offset_by(pa, separation / 2)
def ephemerides_magnitude_range(
ephemerides: List[Ephemeris],
) -> Optional[MagnitudeRange]:
"""Return the magnitude range for a list of ephemerides.
The minimum (maximum) magnitude is the minimum (maximum) magnitude for all
ephemerides. If none of the ephemerides has a magnitude range, ``None`` is returned.
Parameters
----------
ephemerides: list of `~imephu.utils.Ephemeris`
The list of ephemerides.
Returns
-------
`~imephu.utils.MagnitudeRange`, optional
The magnitude range for the list of ephemerides.
"""
min_magnitude: Optional[float] = None
max_magnitude: Optional[float] = None
bandpass: Optional[str] = None
for ephemeris in ephemerides:
if ephemeris.magnitude_range:
if (
min_magnitude is None
or ephemeris.magnitude_range.min_magnitude < min_magnitude
):
min_magnitude = ephemeris.magnitude_range.min_magnitude
if (
max_magnitude is None
or ephemeris.magnitude_range.max_magnitude > max_magnitude
):
max_magnitude = ephemeris.magnitude_range.max_magnitude
if bandpass is None:
bandpass = ephemeris.magnitude_range.bandpass
elif ephemeris.magnitude_range.bandpass != bandpass:
raise ValueError("The bandpass must be the same for all ephemerides.")
if min_magnitude is not None and max_magnitude is not None and bandpass is not None:
return MagnitudeRange(
min_magnitude=min_magnitude, max_magnitude=max_magnitude, bandpass=bandpass
)
else:
return None
| 31.186441 | 88 | 0.664402 |
a3009f5a1a8c11a46a1920015fba53e4cf3ae345 | 1,875 | py | Python | app.py | zorro1992/task-app-devops | 48312e53ce5711ce0d9508b481e73f78df411dd2 | [
"MIT"
] | 1 | 2021-08-19T11:54:08.000Z | 2021-08-19T11:54:08.000Z | app.py | zorro1992/task-app-devops | 48312e53ce5711ce0d9508b481e73f78df411dd2 | [
"MIT"
] | null | null | null | app.py | zorro1992/task-app-devops | 48312e53ce5711ce0d9508b481e73f78df411dd2 | [
"MIT"
] | null | null | null | """
app
"""
from flask import Flask, render_template, request, redirect, url_for
from flask_sqlalchemy import SQLAlchemy
app = Flask(__name__)
# /// = relative path, //// = absolute path
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///db.sqlite'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
db = SQLAlchemy(app)
# Edit endpoint
# Default home endpoint
# Add endpoint
# Update endpoint
# Delete endpoint
# Main function
if __name__ == "__main__":
db.create_all()
app.run(host="0.0.0.0", debug=True)
| 25 | 68 | 0.6544 |
a304245df7598c6937f92e93f9b38b346d5b4c9a | 2,009 | py | Python | app/models/version.py | akashtalole/python-flask-restful-api | 475d8fd7be1724183716a197aac4257f8fbbeac4 | [
"MIT"
] | 3 | 2019-09-05T05:28:49.000Z | 2020-06-10T09:03:37.000Z | app/models/version.py | akashtalole/python-flask-restful-api | 475d8fd7be1724183716a197aac4257f8fbbeac4 | [
"MIT"
] | null | null | null | app/models/version.py | akashtalole/python-flask-restful-api | 475d8fd7be1724183716a197aac4257f8fbbeac4 | [
"MIT"
] | null | null | null | from sqlalchemy.orm import backref
from app.models import db
| 35.245614 | 84 | 0.60677 |
a304b175c7cbd222a910af9551f6a774a35e4ab2 | 4,642 | py | Python | pydatajson/catalog_readme.py | datosgobar/pydatajson | f26e3d5928ce9d455485e03fa63a8d8741588b7a | [
"MIT"
] | 13 | 2017-05-17T13:33:43.000Z | 2021-08-10T18:42:59.000Z | pydatajson/catalog_readme.py | datosgobar/pydatajson | f26e3d5928ce9d455485e03fa63a8d8741588b7a | [
"MIT"
] | 296 | 2016-11-29T14:01:09.000Z | 2020-10-27T22:42:26.000Z | pydatajson/catalog_readme.py | datosgobar/pydatajson | f26e3d5928ce9d455485e03fa63a8d8741588b7a | [
"MIT"
] | 11 | 2017-07-06T17:02:31.000Z | 2021-07-19T14:46:51.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import with_statement
import io
import logging
import os
from six import string_types
from pydatajson.helpers import traverse_dict
from pydatajson.indicators import generate_catalogs_indicators
from pydatajson.readers import read_catalog
from pydatajson.validation import validate_catalog
logger = logging.getLogger('pydatajson')
CENTRAL_CATALOG = "http://datos.gob.ar/data.json"
ABSOLUTE_PROJECT_DIR = os.path.dirname(os.path.abspath(__file__))
TEMPLATES_PATH = os.path.join(ABSOLUTE_PROJECT_DIR, "templates")
def generate_catalog_readme(_datajson, catalog,
export_path=None, verify_ssl=True):
"""Este mtodo est para mantener retrocompatibilidad con versiones
anteriores. Se ignora el argumento _data_json."""
return generate_readme(catalog, export_path, verify_ssl=verify_ssl)
def generate_readme(catalog, export_path=None, verify_ssl=True):
"""Genera una descripcin textual en formato Markdown sobre los
metadatos generales de un catlogo (ttulo, editor, fecha de
publicacin, et cetera), junto con:
- estado de los metadatos a nivel catlogo,
- estado global de los metadatos,
- cantidad de datasets federados y no federados,
- detalles de los datasets no federados
- cantidad de datasets y distribuciones incluidas
Es utilizada por la rutina diaria de `libreria-catalogos` para generar
un README con informacin bsica sobre los catlogos mantenidos.
Args:
catalog (str o dict): Path a un catlogo en cualquier formato,
JSON, XLSX, o diccionario de python.
export_path (str): Path donde exportar el texto generado (en
formato Markdown). Si se especifica, el mtodo no devolver
nada.
Returns:
str: Texto de la descripcin generada.
"""
# Si se paso una ruta, guardarla
if isinstance(catalog, string_types):
catalog_path_or_url = catalog
else:
catalog_path_or_url = None
catalog = read_catalog(catalog)
validation = validate_catalog(catalog, verify_ssl=verify_ssl)
# Solo necesito indicadores para un catalogo
indicators = generate_catalogs_indicators(
catalog, CENTRAL_CATALOG)[0][0]
with io.open(os.path.join(TEMPLATES_PATH, 'catalog_readme.txt'), 'r',
encoding='utf-8') as template_file:
readme_template = template_file.read()
not_federated_datasets_list = "\n".join([
"- [{}]({})".format(dataset[0], dataset[1])
for dataset in indicators["datasets_no_federados"]
])
federated_removed_datasets_list = "\n".join([
"- [{}]({})".format(dataset[0], dataset[1])
for dataset in indicators["datasets_federados_eliminados"]
])
federated_datasets_list = "\n".join([
"- [{}]({})".format(dataset[0], dataset[1])
for dataset in indicators["datasets_federados"]
])
non_federated_pct = 1.0 - indicators["datasets_federados_pct"] if \
indicators["datasets_federados_pct"] is not None else \
indicators["datasets_federados_pct"]
content = {
"title": catalog.get("title"),
"publisher_name": traverse_dict(
catalog, ["publisher", "name"]),
"publisher_mbox": traverse_dict(
catalog, ["publisher", "mbox"]),
"catalog_path_or_url": catalog_path_or_url,
"description": catalog.get("description"),
"global_status": validation["status"],
"catalog_status": validation["error"]["catalog"]["status"],
"no_of_datasets": len(catalog["dataset"]),
"no_of_distributions": sum([len(dataset["distribution"]) for
dataset in catalog["dataset"]]),
"federated_datasets": indicators["datasets_federados_cant"],
"not_federated_datasets": indicators["datasets_no_federados_cant"],
"not_federated_datasets_pct": non_federated_pct,
"not_federated_datasets_list": not_federated_datasets_list,
"federated_removed_datasets_list": federated_removed_datasets_list,
"federated_datasets_list": federated_datasets_list,
}
catalog_readme = readme_template.format(**content)
if export_path:
with io.open(export_path, 'w+', encoding='utf-8') as target:
target.write(catalog_readme)
else:
return catalog_readme
| 40.365217 | 79 | 0.667816 |
a304eeaa7c9f4ed5704a6d6deba75d5ddfdbb3d1 | 346 | py | Python | code-tk/scrollbar.py | shilpasayura/bk | 2b0a1aa9300da80e201264bcf80226b3c5ff4ad6 | [
"MIT"
] | 4 | 2018-09-08T10:30:27.000Z | 2021-07-23T07:59:24.000Z | code-tk/scrollbar.py | shilpasayura/bk | 2b0a1aa9300da80e201264bcf80226b3c5ff4ad6 | [
"MIT"
] | null | null | null | code-tk/scrollbar.py | shilpasayura/bk | 2b0a1aa9300da80e201264bcf80226b3c5ff4ad6 | [
"MIT"
] | 6 | 2018-09-07T05:54:17.000Z | 2021-07-23T07:59:25.000Z | from tkinter import *
import tkinter
root = Tk()
scrollbar = Scrollbar(root)
scrollbar.pack( side = RIGHT, fill=Y )
mylist = Listbox(root, yscrollcommand = scrollbar.set )
for line in range(100):
mylist.insert(END, "Line number : " + str(line))
mylist.pack( side = LEFT, fill = BOTH )
scrollbar.config( command = mylist.yview )
mainloop()
| 21.625 | 55 | 0.705202 |
a305196df6eec0820f9171fbedc0fc320734bad3 | 8,354 | py | Python | pkgs/sdk-pkg/src/genie/libs/sdk/apis/iosxe/sisf/get.py | patrickboertje/genielibs | 61c37aacf3dd0f499944555e4ff940f92f53dacb | [
"Apache-2.0"
] | 1 | 2022-01-16T10:00:24.000Z | 2022-01-16T10:00:24.000Z | pkgs/sdk-pkg/src/genie/libs/sdk/apis/iosxe/sisf/get.py | patrickboertje/genielibs | 61c37aacf3dd0f499944555e4ff940f92f53dacb | [
"Apache-2.0"
] | null | null | null | pkgs/sdk-pkg/src/genie/libs/sdk/apis/iosxe/sisf/get.py | patrickboertje/genielibs | 61c37aacf3dd0f499944555e4ff940f92f53dacb | [
"Apache-2.0"
] | null | null | null | """Common get functions for sisf"""
# Python
import logging
import re
# Genie
from genie.metaparser.util.exceptions import SchemaEmptyParserError
log = logging.getLogger(__name__)
log.setLevel(logging.DEBUG)
def get_device_tracking_policy_name_configurations(device, policy):
""" Get device-tracking policy configurations
Args:
device ('obj'): device object
policy ('str'): policy name
Returns:
Dictionary
None
Raises:
None
"""
try:
out = device.parse('show device-tracking policy {policy}'.format(policy=policy))
return out.get('configuration', None)
except SchemaEmptyParserError:
log.info("Command has not returned any results")
return None
def get_device_tracking_database_details_binding_table_configurations(device):
""" Get device-tracking policy configurations
Args:
device ('obj'): device object
Returns:
Dictionary
None
Raises:
None
"""
try:
out = device.parse('show device-tracking database details')
return out.get('binding_table_configuration', None)
except SchemaEmptyParserError:
log.info("Command has not returned any results")
return None
def get_device_tracking_database_details_binding_table_count(device, state=False):
""" Get device-tracking policy configurations
Args:
device ('obj'): device object
state('bool', optional): get state count if True. Defaults to False
Returns:
Dictionary
None
Raises:
None
"""
if state:
key = 'binding_table_state_count'
else:
key = 'binding_table_count'
try:
out = device.parse('show device-tracking database details')
return out.get(key, None)
except SchemaEmptyParserError:
log.info("Command has not returned any results")
return None
def get_ipv6_nd_raguard_policy_configurations(device, policy):
""" Get ipv6 nd raguard policy configurations
Args:
device ('obj'): device object
policy ('str'): policy name
Returns:
Dictionary
None
Raises:
None
"""
try:
out = device.parse('show ipv6 nd raguard policy {policy}'.format(policy=policy))
return out.get('configuration', None)
except SchemaEmptyParserError:
log.info("Command has not returned any results")
return None
def get_ipv6_source_guard_policy_configurations(device, policy):
""" Get ipv6 source guard policy configurations
Args:
device ('obj'): device object
policy ('str'): policy name
Returns:
Dictionary
None
Raises:
None
"""
try:
out = device.parse('show ipv6 source-guard policy {policy}'.format(policy=policy))
return out.get('configuration', None)
except SchemaEmptyParserError:
log.info("Command has not returned any results")
return None
def get_device_tracking_counters_vlan_message_type(device, vlanid, message_type="received"):
""" Get device_tracking vlan count message type
Args:
device ('obj'): device object
vlanid ('str'): vlan
message_type ('str', optional): message type. Defaults to "received"
Returns:
Dictionary
None
Raises:
None
"""
try:
out = device.parse('show device-tracking counters vlan {vlanid}'.format(vlanid=vlanid))
except SchemaEmptyParserError:
log.info("Command has not returned any results")
message_dict = out.get("vlanid", {}).get(int(vlanid), {})
if not message_dict:
log.info("There is no activity corresponding to the message type {type}"
.format(type=message_type))
return None
return message_dict
def get_device_tracking_counters_vlan_faults(device, vlanid):
""" Get device_tracking vlan count message type
Args:
device ('obj'): device object
vlanid ('str'): vlan
Returns:
List
None
Raises:
None
"""
try:
out = device.parse('show device-tracking counters vlan {vlanid}'.format(vlanid=vlanid))
except SchemaEmptyParserError:
log.info("Command has not returned any results")
fault_list = out.get("vlanid", {}).get(int(vlanid), {}).get("faults", [])
if not fault_list:
log.info("There are no faults on vlan {vlanid}".format(vlanid=vlanid))
return None
return fault_list
def get_ip_theft_syslogs(device):
"""Gets IP Theft syslog
Args:
device (obj): device object
Returns:
Dictionary
None
Raises:
None
"""
try:
out = device.parse('show logging | include %SISF-4-IP_THEFT')
except SchemaEmptyParserError:
return {}
# Need to perform additional parsing to extract IP Theft specific data
# *Sep 15 12:53:06.383 EST:
timematch = r'.(?P<timestamp>[A-Za-z]{3}\s+\d+ \d+:\d+:\d+\.\d+( [A-Z]+)?:)'
# *Sep 15 12:53:06.383 EST: %SISF-4-IP_THEFT: IP Theft IP=2001:DB8::101 VLAN=20 MAC=dead.beef.0001 IF=Twe1/0/1 New MAC=dead.beef.0002 New I/F=Twe1/0/1
theft1 = re.compile(
timematch +
r'\s+%SISF-4-IP_THEFT: IP Theft' +
r'\s+IP=(?P<ip>([a-fA-F\d\:]+)|(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}))' +
r'\s+VLAN=(?P<vlan>\d+)' +
r'\s+MAC=(?P<mac>([a-fA-F\d]{4}\.){2}[a-fA-F\d]{4})' +
r'\s+IF=(?P<interface>[\w\/\.\-\:]+)' +
r'\s+New Mac=(?P<new_mac>([a-fA-F\d]{4}\.){2}[a-fA-F\d]{4})' +
r'\s+New I/F=(?P<new_if>[\w\/\.\-\:]+)'
)
# *Sep 16 19:22:29.392 EST: %SISF-4-IP_THEFT: IP Theft IP=2001:DB8::105 VLAN=20 Cand-MAC=dead.beef.0002 Cand-I/F=Twe1/0/1 Known MAC over-fabric Known I/F over-fabric
theft2 = re.compile(
timematch +
r'\s+%SISF-4-IP_THEFT: IP Theft' +
r'\s+IP=(?P<ip>([a-fA-F\d\:]+)|(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}))' +
r'\s+VLAN=(?P<vlan>\d+)' +
r'\s+Cand-MAC=(?P<cand_mac>([a-fA-F\d]{4}\.){2}[a-fA-F\d]{4})' +
r'\s+Cand-I/F=(?P<cand_if>[\w\/\.\-\:]+)'
)
# *Oct 20 16:58:24.807 EST: %SISF-4-IP_THEFT: IP Theft IP=2001:DB8::105 VLAN=20 MAC=dead.beef.0001 IF=Twe1/0/1 New I/F over fabric
theft3 = re.compile(
timematch +
r'\s+%SISF-4-IP_THEFT: IP Theft' +
r'\s+IP=(?P<ip>([a-fA-F\d\:]+)|(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}))' +
r'\s+VLAN=(?P<vlan>\d+)' +
r'\s+MAC=(?P<mac>([a-fA-F\d]{4}\.){2}[a-fA-F\d]{4})' +
r'\s+IF=(?P<if>[\w\/\.\-\:]+)'
)
log_dict = {}
for log_entry in out['logs']:
m = theft1.match(log_entry)
if m:
entry = {}
group = m.groupdict()
ip = group['ip']
vlan = group['vlan']
mac = group['mac']
interface = group['interface']
new_mac = group['new_mac']
new_interface = group['new_if']
entry['ip'] = ip
entry['vlan'] = vlan
entry['mac'] = mac
entry['interface'] = interface
entry['new_mac'] = new_mac
entry['new_interface'] = new_interface
log_dict.setdefault('entries', []).append(entry)
m = theft2.match(log_entry)
if m:
entry = {}
group = m.groupdict()
ip = group['ip']
vlan = group['vlan']
new_mac = group['cand_mac']
new_if = group['cand_if']
entry['ip'] = ip
entry['vlan'] = vlan
entry['new_mac'] = new_mac
entry['new_interface'] = new_if
log_dict.setdefault('entries', []).append(entry)
m = theft3.match(log_entry)
if m:
entry = {}
group = m.groupdict()
ip = group['ip']
vlan = group['vlan']
mac = group['mac']
new_if = group['if']
entry['ip'] = ip
entry['vlan'] = vlan
entry['mac'] = mac
entry['new_interface'] = new_if
log_dict.setdefault('entries', []).append(entry)
return log_dict | 30.268116 | 169 | 0.556739 |
a3052e2c0e4e4d32b495f5d940bc6dff09090dc4 | 1,742 | py | Python | Solutions/2021/13.py | Azurealistic/Winter | 4ef5d1fde10f9ba769c33597e1269f161068f18b | [
"Unlicense"
] | 1 | 2021-12-18T20:02:57.000Z | 2021-12-18T20:02:57.000Z | Solutions/2021/13.py | Azurealistic/Winter | 4ef5d1fde10f9ba769c33597e1269f161068f18b | [
"Unlicense"
] | null | null | null | Solutions/2021/13.py | Azurealistic/Winter | 4ef5d1fde10f9ba769c33597e1269f161068f18b | [
"Unlicense"
] | null | null | null | # Advent of Code 2021 - Day: 13
# Imports (Always imports data based on the folder and file name)
from aocd import data, submit
# Solution
# Call the main function.
if __name__ == '__main__':
main() | 29.525424 | 150 | 0.647532 |
a3053ef8acac68ff1083564fcebe2d53409309ba | 13,598 | py | Python | hax/test/test_work_planner.py | ajaykumarptl/cortx-hare | 6eada402c3f90f2f56743efb959ea308b9e171e5 | [
"Apache-2.0"
] | 16 | 2020-09-25T09:34:07.000Z | 2022-03-29T17:26:39.000Z | hax/test/test_work_planner.py | ajaykumarptl/cortx-hare | 6eada402c3f90f2f56743efb959ea308b9e171e5 | [
"Apache-2.0"
] | 536 | 2020-09-24T14:59:10.000Z | 2022-03-31T15:44:52.000Z | hax/test/test_work_planner.py | ajaykumarptl/cortx-hare | 6eada402c3f90f2f56743efb959ea308b9e171e5 | [
"Apache-2.0"
] | 108 | 2020-09-24T15:09:29.000Z | 2022-03-25T10:13:19.000Z | # Copyright (c) 2020 Seagate Technology LLC and/or its Affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# For any questions about this software or licensing,
# please email opensource@seagate.com or cortx-questions@seagate.com.
#
# flake8: noqa
import logging
import time
import unittest
from threading import Condition, Thread
from time import sleep
from typing import List
from unittest.mock import Mock
from hax.log import TRACE
from hax.message import (BaseMessage, BroadcastHAStates, Die,
EntrypointRequest,
HaNvecGetEvent)
from hax.motr.planner import WorkPlanner, State
from hax.motr.util import LinkedList
from hax.types import Fid, Uint128
LOG = logging.getLogger('hax')
| 33.492611 | 78 | 0.538241 |
a3056d3c0122f76cea061b47c7cebd71302eec5c | 248 | py | Python | packages/standard.py | jurikolo/la-intro-to-python | d15299662f71d7defe6ca178a8344e3c4d605654 | [
"Apache-2.0"
] | null | null | null | packages/standard.py | jurikolo/la-intro-to-python | d15299662f71d7defe6ca178a8344e3c4d605654 | [
"Apache-2.0"
] | null | null | null | packages/standard.py | jurikolo/la-intro-to-python | d15299662f71d7defe6ca178a8344e3c4d605654 | [
"Apache-2.0"
] | null | null | null | print("Modules documentation: https://docs.python.org/3/tutorial/modules.html")
print("Standard modules list: https://docs.python.org/3/py-modindex.html")
import math
print(math.pi)
from math import pi
print(pi)
from math import pi as p
print(p) | 22.545455 | 79 | 0.762097 |
a308348c02f7d05a6bdcec5e102eab0f328f25f9 | 1,329 | py | Python | biosimulators_utils/archive/utils.py | virtualcell/Biosimulators_utils | 1b34e1e0a9ace706d245e9d515d0fae1e55a248d | [
"MIT"
] | 2 | 2021-06-02T13:26:34.000Z | 2021-12-27T23:12:47.000Z | biosimulators_utils/archive/utils.py | virtualcell/Biosimulators_utils | 1b34e1e0a9ace706d245e9d515d0fae1e55a248d | [
"MIT"
] | 102 | 2020-12-06T19:47:43.000Z | 2022-03-31T12:56:17.000Z | biosimulators_utils/archive/utils.py | virtualcell/Biosimulators_utils | 1b34e1e0a9ace706d245e9d515d0fae1e55a248d | [
"MIT"
] | 4 | 2021-01-27T19:56:34.000Z | 2022-02-03T21:08:20.000Z | """ Utilities for creating archives
:Author: Jonathan Karr <karr@mssm.edu>
:Date: 2020-12-06
:Copyright: 2020, Center for Reproducible Biomedical Modeling
:License: MIT
"""
from .data_model import Archive, ArchiveFile
import glob
import os
__all__ = ['build_archive_from_paths']
def build_archive_from_paths(path_patterns, rel_path=None, recursive=True):
""" Build an archive from a list of glob path patterns
Args:
path_patterns (:obj:`list` of :obj:`str`): glob path patterns for files to bundle into an archive
rel_path (:obj:`str`, optional): if provided, set the archive file names to their path relative to this path
recursive (:obj:`bool`, optional): if :obj:`True`, match the path patterns recursively
Returns:
:obj:`Archive`: archive
"""
archive = Archive()
for path_pattern in path_patterns:
for local_path in glob.glob(path_pattern, recursive=recursive):
if os.path.isfile(local_path):
if rel_path:
archive_path = os.path.relpath(local_path, rel_path)
else:
archive_path = local_path
archive.files.append(ArchiveFile(
local_path=local_path,
archive_path=archive_path,
))
return archive
| 32.414634 | 116 | 0.645598 |
096461150c75c546d91d335a2584ba96fe70e040 | 845 | py | Python | v1/Commit.py | gzc/gitstats | d6e41c4f7ad5c3d754ef872fa9e615b88df0ccb8 | [
"MIT"
] | 26 | 2017-06-11T05:44:25.000Z | 2021-02-20T12:21:22.000Z | v1/Commit.py | gzc/gitstats | d6e41c4f7ad5c3d754ef872fa9e615b88df0ccb8 | [
"MIT"
] | 1 | 2020-04-22T15:48:19.000Z | 2020-04-22T15:52:51.000Z | v1/Commit.py | gzc/gitstats | d6e41c4f7ad5c3d754ef872fa9e615b88df0ccb8 | [
"MIT"
] | 1 | 2020-10-20T04:46:11.000Z | 2020-10-20T04:46:11.000Z | """
This class represents the info of one commit
"""
from Change import *;
| 31.296296 | 79 | 0.622485 |
0966490b7f876064ed7777de569aec9aeed5aa61 | 3,758 | py | Python | htdocs/plotting/auto/scripts100/p172.py | trentford/iem | 7264d24f2d79a3cd69251a09758e6531233a732f | [
"MIT"
] | null | null | null | htdocs/plotting/auto/scripts100/p172.py | trentford/iem | 7264d24f2d79a3cd69251a09758e6531233a732f | [
"MIT"
] | null | null | null | htdocs/plotting/auto/scripts100/p172.py | trentford/iem | 7264d24f2d79a3cd69251a09758e6531233a732f | [
"MIT"
] | null | null | null | """YTD precip"""
import calendar
import datetime
from pandas.io.sql import read_sql
from pyiem.util import get_autoplot_context, get_dbconn
from pyiem.plot.use_agg import plt
from pyiem.network import Table as NetworkTable
def get_description():
""" Return a dict describing how to call this plotter """
desc = dict()
desc['data'] = True
desc['description'] = """This chart presents year to date accumulated
precipitation for a station of your choice. The year with the highest and
lowest accumulation is shown along with the envelop of observations and
long term average. You can optionally plot up to three additional years
of your choice.
"""
thisyear = datetime.date.today().year
desc['arguments'] = [
dict(type='station', name='station', default='IA2203',
label='Select Station:', network='IACLIMATE'),
dict(type='year', name='year1', default=thisyear,
label='Additional Year to Plot:'),
dict(type='year', name='year2', optional=True, default=(thisyear - 1),
label='Additional Year to Plot: (optional)'),
dict(type='year', name='year3', optional=True, default=(thisyear - 2),
label='Additional Year to Plot: (optional)'),
]
return desc
def plotter(fdict):
""" Go """
pgconn = get_dbconn('coop')
ctx = get_autoplot_context(fdict, get_description())
station = ctx['station']
network = ctx['network']
year1 = ctx.get('year1')
year2 = ctx.get('year2')
year3 = ctx.get('year3')
nt = NetworkTable(network)
table = "alldata_%s" % (station[:2],)
df = read_sql("""
WITH years as (SELECT distinct year from """ + table + """
WHERE station = %s and sday = '0101')
SELECT day, sday, year, precip,
sum(precip) OVER (PARTITION by year ORDER by day ASC) as accum from
""" + table + """ WHERE station = %s and year in (select year from years)
ORDER by day ASC
""", pgconn, params=(station, station), index_col='day')
if df.empty:
raise ValueError("No data found!")
(fig, ax) = plt.subplots(1, 1)
# Average
jday = df[['sday', 'accum']].groupby('sday').mean()
ax.plot(range(1, len(jday.index)+1), jday['accum'], lw=2, zorder=5,
color='k', label='Average - %.2f' % (jday['accum'].iloc[-1],))
# Min and Max
jmin = df[['sday', 'accum']].groupby('sday').min()
jmax = df[['sday', 'accum']].groupby('sday').max()
ax.fill_between(range(1, len(jday.index)+1), jmin['accum'],
jmax['accum'], zorder=2, color='tan')
# find max year
plotted = []
for year, color in zip([df['accum'].idxmax().year,
df[df['sday'] == '1231']['accum'].idxmin().year,
year1, year2, year3],
['b', 'brown', 'r', 'g', 'purple']):
if year is None or year in plotted:
continue
plotted.append(year)
df2 = df[df['year'] == year]
ax.plot(range(1, len(df2.index)+1), df2['accum'],
label='%s - %.2f' % (year, df2['accum'].iloc[-1]),
color=color, lw=2)
ax.set_title(("Year to Date Accumulated Precipitation\n"
"[%s] %s (%s-%s)"
) % (station, nt.sts[station]['name'],
nt.sts[station]['archive_begin'].year,
datetime.date.today().year))
ax.set_ylabel("Precipitation [inch]")
ax.grid(True)
ax.legend(loc=2)
ax.set_xlim(1, 366)
ax.set_xticks((1, 32, 60, 91, 121, 152, 182, 213, 244, 274,
305, 335, 365))
ax.set_xticklabels(calendar.month_abbr[1:])
return fig, df
if __name__ == '__main__':
plotter(dict())
| 37.207921 | 78 | 0.575838 |
096771220ae65d76d59e3b33fcd89cf0b500185d | 21,536 | py | Python | python/istio_api/networking/v1beta1/gateway_pb2.py | luckyxiaoqiang/api | f296986a5b6512d8d5da7b3f16f01f5733f5f32a | [
"Apache-2.0"
] | 1 | 2021-07-19T14:51:15.000Z | 2021-07-19T14:51:15.000Z | python/istio_api/networking/v1beta1/gateway_pb2.py | luckyxiaoqiang/api | f296986a5b6512d8d5da7b3f16f01f5733f5f32a | [
"Apache-2.0"
] | 11 | 2019-10-15T23:03:57.000Z | 2020-06-14T16:10:12.000Z | python/istio_api/networking/v1beta1/gateway_pb2.py | luckyxiaoqiang/api | f296986a5b6512d8d5da7b3f16f01f5733f5f32a | [
"Apache-2.0"
] | 7 | 2019-07-04T14:23:54.000Z | 2020-04-27T08:52:51.000Z | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: networking/v1beta1/gateway.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.api import field_behavior_pb2 as google_dot_api_dot_field__behavior__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='networking/v1beta1/gateway.proto',
package='istio.networking.v1beta1',
syntax='proto3',
serialized_options=_b('Z\037istio.io/api/networking/v1beta1'),
serialized_pb=_b('\n networking/v1beta1/gateway.proto\x12\x18istio.networking.v1beta1\x1a\x1fgoogle/api/field_behavior.proto\"\xdb\x01\n\x07Gateway\x12@\n\x07servers\x18\x01 \x03(\x0b\x32 .istio.networking.v1beta1.ServerB\x04\xe2\x41\x01\x02R\x07servers\x12Q\n\x08selector\x18\x02 \x03(\x0b\x32/.istio.networking.v1beta1.Gateway.SelectorEntryB\x04\xe2\x41\x01\x02R\x08selector\x1a;\n\rSelectorEntry\x12\x10\n\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n\x05value\x18\x02 \x01(\tR\x05value:\x02\x38\x01\"\xf0\x01\n\x06Server\x12\x38\n\x04port\x18\x01 \x01(\x0b\x32\x1e.istio.networking.v1beta1.PortB\x04\xe2\x41\x01\x02R\x04port\x12\x12\n\x04\x62ind\x18\x04 \x01(\tR\x04\x62ind\x12\x1a\n\x05hosts\x18\x02 \x03(\tB\x04\xe2\x41\x01\x02R\x05hosts\x12=\n\x03tls\x18\x03 \x01(\x0b\x32+.istio.networking.v1beta1.ServerTLSSettingsR\x03tls\x12)\n\x10\x64\x65\x66\x61ult_endpoint\x18\x05 \x01(\tR\x0f\x64\x65\x66\x61ultEndpoint\x12\x12\n\x04name\x18\x06 \x01(\tR\x04name\"\x81\x01\n\x04Port\x12\x1c\n\x06number\x18\x01 \x01(\rB\x04\xe2\x41\x01\x02R\x06number\x12 \n\x08protocol\x18\x02 \x01(\tB\x04\xe2\x41\x01\x02R\x08protocol\x12\x18\n\x04name\x18\x03 \x01(\tB\x04\xe2\x41\x01\x02R\x04name\x12\x1f\n\x0btarget_port\x18\x04 \x01(\rR\ntargetPort\"\xe9\x06\n\x11ServerTLSSettings\x12%\n\x0ehttps_redirect\x18\x01 \x01(\x08R\rhttpsRedirect\x12G\n\x04mode\x18\x02 \x01(\x0e\x32\x33.istio.networking.v1beta1.ServerTLSSettings.TLSmodeR\x04mode\x12-\n\x12server_certificate\x18\x03 \x01(\tR\x11serverCertificate\x12\x1f\n\x0bprivate_key\x18\x04 \x01(\tR\nprivateKey\x12\'\n\x0f\x63\x61_certificates\x18\x05 \x01(\tR\x0e\x63\x61\x43\x65rtificates\x12\'\n\x0f\x63redential_name\x18\n \x01(\tR\x0e\x63redentialName\x12*\n\x11subject_alt_names\x18\x06 \x03(\tR\x0fsubjectAltNames\x12\x36\n\x17verify_certificate_spki\x18\x0b \x03(\tR\x15verifyCertificateSpki\x12\x36\n\x17verify_certificate_hash\x18\x0c \x03(\tR\x15verifyCertificateHash\x12i\n\x14min_protocol_version\x18\x07 \x01(\x0e\x32\x37.istio.networking.v1beta1.ServerTLSSettings.TLSProtocolR\x12minProtocolVersion\x12i\n\x14max_protocol_version\x18\x08 \x01(\x0e\x32\x37.istio.networking.v1beta1.ServerTLSSettings.TLSProtocolR\x12maxProtocolVersion\x12#\n\rcipher_suites\x18\t \x03(\tR\x0c\x63ipherSuites\"Z\n\x07TLSmode\x12\x0f\n\x0bPASSTHROUGH\x10\x00\x12\n\n\x06SIMPLE\x10\x01\x12\n\n\x06MUTUAL\x10\x02\x12\x14\n\x10\x41UTO_PASSTHROUGH\x10\x03\x12\x10\n\x0cISTIO_MUTUAL\x10\x04\"O\n\x0bTLSProtocol\x12\x0c\n\x08TLS_AUTO\x10\x00\x12\x0b\n\x07TLSV1_0\x10\x01\x12\x0b\n\x07TLSV1_1\x10\x02\x12\x0b\n\x07TLSV1_2\x10\x03\x12\x0b\n\x07TLSV1_3\x10\x04\x42!Z\x1fistio.io/api/networking/v1beta1b\x06proto3')
,
dependencies=[google_dot_api_dot_field__behavior__pb2.DESCRIPTOR,])
_SERVERTLSSETTINGS_TLSMODE = _descriptor.EnumDescriptor(
name='TLSmode',
full_name='istio.networking.v1beta1.ServerTLSSettings.TLSmode',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='PASSTHROUGH', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='SIMPLE', index=1, number=1,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='MUTUAL', index=2, number=2,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='AUTO_PASSTHROUGH', index=3, number=3,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ISTIO_MUTUAL', index=4, number=4,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=1395,
serialized_end=1485,
)
_sym_db.RegisterEnumDescriptor(_SERVERTLSSETTINGS_TLSMODE)
_SERVERTLSSETTINGS_TLSPROTOCOL = _descriptor.EnumDescriptor(
name='TLSProtocol',
full_name='istio.networking.v1beta1.ServerTLSSettings.TLSProtocol',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='TLS_AUTO', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='TLSV1_0', index=1, number=1,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='TLSV1_1', index=2, number=2,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='TLSV1_2', index=3, number=3,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='TLSV1_3', index=4, number=4,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=1487,
serialized_end=1566,
)
_sym_db.RegisterEnumDescriptor(_SERVERTLSSETTINGS_TLSPROTOCOL)
_GATEWAY_SELECTORENTRY = _descriptor.Descriptor(
name='SelectorEntry',
full_name='istio.networking.v1beta1.Gateway.SelectorEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='istio.networking.v1beta1.Gateway.SelectorEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='key', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='value', full_name='istio.networking.v1beta1.Gateway.SelectorEntry.value', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='value', file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=_b('8\001'),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=256,
serialized_end=315,
)
_GATEWAY = _descriptor.Descriptor(
name='Gateway',
full_name='istio.networking.v1beta1.Gateway',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='servers', full_name='istio.networking.v1beta1.Gateway.servers', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=_b('\342A\001\002'), json_name='servers', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='selector', full_name='istio.networking.v1beta1.Gateway.selector', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=_b('\342A\001\002'), json_name='selector', file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_GATEWAY_SELECTORENTRY, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=96,
serialized_end=315,
)
_SERVER = _descriptor.Descriptor(
name='Server',
full_name='istio.networking.v1beta1.Server',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='port', full_name='istio.networking.v1beta1.Server.port', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=_b('\342A\001\002'), json_name='port', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='bind', full_name='istio.networking.v1beta1.Server.bind', index=1,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='bind', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='hosts', full_name='istio.networking.v1beta1.Server.hosts', index=2,
number=2, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=_b('\342A\001\002'), json_name='hosts', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='tls', full_name='istio.networking.v1beta1.Server.tls', index=3,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='tls', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='default_endpoint', full_name='istio.networking.v1beta1.Server.default_endpoint', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='defaultEndpoint', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='name', full_name='istio.networking.v1beta1.Server.name', index=5,
number=6, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='name', file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=318,
serialized_end=558,
)
_PORT = _descriptor.Descriptor(
name='Port',
full_name='istio.networking.v1beta1.Port',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='number', full_name='istio.networking.v1beta1.Port.number', index=0,
number=1, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=_b('\342A\001\002'), json_name='number', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='protocol', full_name='istio.networking.v1beta1.Port.protocol', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=_b('\342A\001\002'), json_name='protocol', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='name', full_name='istio.networking.v1beta1.Port.name', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=_b('\342A\001\002'), json_name='name', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='target_port', full_name='istio.networking.v1beta1.Port.target_port', index=3,
number=4, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='targetPort', file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=561,
serialized_end=690,
)
_SERVERTLSSETTINGS = _descriptor.Descriptor(
name='ServerTLSSettings',
full_name='istio.networking.v1beta1.ServerTLSSettings',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='https_redirect', full_name='istio.networking.v1beta1.ServerTLSSettings.https_redirect', index=0,
number=1, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='httpsRedirect', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='mode', full_name='istio.networking.v1beta1.ServerTLSSettings.mode', index=1,
number=2, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='mode', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='server_certificate', full_name='istio.networking.v1beta1.ServerTLSSettings.server_certificate', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='serverCertificate', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='private_key', full_name='istio.networking.v1beta1.ServerTLSSettings.private_key', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='privateKey', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='ca_certificates', full_name='istio.networking.v1beta1.ServerTLSSettings.ca_certificates', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='caCertificates', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='credential_name', full_name='istio.networking.v1beta1.ServerTLSSettings.credential_name', index=5,
number=10, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='credentialName', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='subject_alt_names', full_name='istio.networking.v1beta1.ServerTLSSettings.subject_alt_names', index=6,
number=6, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='subjectAltNames', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='verify_certificate_spki', full_name='istio.networking.v1beta1.ServerTLSSettings.verify_certificate_spki', index=7,
number=11, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='verifyCertificateSpki', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='verify_certificate_hash', full_name='istio.networking.v1beta1.ServerTLSSettings.verify_certificate_hash', index=8,
number=12, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='verifyCertificateHash', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='min_protocol_version', full_name='istio.networking.v1beta1.ServerTLSSettings.min_protocol_version', index=9,
number=7, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='minProtocolVersion', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='max_protocol_version', full_name='istio.networking.v1beta1.ServerTLSSettings.max_protocol_version', index=10,
number=8, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='maxProtocolVersion', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='cipher_suites', full_name='istio.networking.v1beta1.ServerTLSSettings.cipher_suites', index=11,
number=9, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='cipherSuites', file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
_SERVERTLSSETTINGS_TLSMODE,
_SERVERTLSSETTINGS_TLSPROTOCOL,
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=693,
serialized_end=1566,
)
_GATEWAY_SELECTORENTRY.containing_type = _GATEWAY
_GATEWAY.fields_by_name['servers'].message_type = _SERVER
_GATEWAY.fields_by_name['selector'].message_type = _GATEWAY_SELECTORENTRY
_SERVER.fields_by_name['port'].message_type = _PORT
_SERVER.fields_by_name['tls'].message_type = _SERVERTLSSETTINGS
_SERVERTLSSETTINGS.fields_by_name['mode'].enum_type = _SERVERTLSSETTINGS_TLSMODE
_SERVERTLSSETTINGS.fields_by_name['min_protocol_version'].enum_type = _SERVERTLSSETTINGS_TLSPROTOCOL
_SERVERTLSSETTINGS.fields_by_name['max_protocol_version'].enum_type = _SERVERTLSSETTINGS_TLSPROTOCOL
_SERVERTLSSETTINGS_TLSMODE.containing_type = _SERVERTLSSETTINGS
_SERVERTLSSETTINGS_TLSPROTOCOL.containing_type = _SERVERTLSSETTINGS
DESCRIPTOR.message_types_by_name['Gateway'] = _GATEWAY
DESCRIPTOR.message_types_by_name['Server'] = _SERVER
DESCRIPTOR.message_types_by_name['Port'] = _PORT
DESCRIPTOR.message_types_by_name['ServerTLSSettings'] = _SERVERTLSSETTINGS
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Gateway = _reflection.GeneratedProtocolMessageType('Gateway', (_message.Message,), {
'SelectorEntry' : _reflection.GeneratedProtocolMessageType('SelectorEntry', (_message.Message,), {
'DESCRIPTOR' : _GATEWAY_SELECTORENTRY,
'__module__' : 'networking.v1beta1.gateway_pb2'
# @@protoc_insertion_point(class_scope:istio.networking.v1beta1.Gateway.SelectorEntry)
})
,
'DESCRIPTOR' : _GATEWAY,
'__module__' : 'networking.v1beta1.gateway_pb2'
# @@protoc_insertion_point(class_scope:istio.networking.v1beta1.Gateway)
})
_sym_db.RegisterMessage(Gateway)
_sym_db.RegisterMessage(Gateway.SelectorEntry)
Server = _reflection.GeneratedProtocolMessageType('Server', (_message.Message,), {
'DESCRIPTOR' : _SERVER,
'__module__' : 'networking.v1beta1.gateway_pb2'
# @@protoc_insertion_point(class_scope:istio.networking.v1beta1.Server)
})
_sym_db.RegisterMessage(Server)
Port = _reflection.GeneratedProtocolMessageType('Port', (_message.Message,), {
'DESCRIPTOR' : _PORT,
'__module__' : 'networking.v1beta1.gateway_pb2'
# @@protoc_insertion_point(class_scope:istio.networking.v1beta1.Port)
})
_sym_db.RegisterMessage(Port)
ServerTLSSettings = _reflection.GeneratedProtocolMessageType('ServerTLSSettings', (_message.Message,), {
'DESCRIPTOR' : _SERVERTLSSETTINGS,
'__module__' : 'networking.v1beta1.gateway_pb2'
# @@protoc_insertion_point(class_scope:istio.networking.v1beta1.ServerTLSSettings)
})
_sym_db.RegisterMessage(ServerTLSSettings)
DESCRIPTOR._options = None
_GATEWAY_SELECTORENTRY._options = None
_GATEWAY.fields_by_name['servers']._options = None
_GATEWAY.fields_by_name['selector']._options = None
_SERVER.fields_by_name['port']._options = None
_SERVER.fields_by_name['hosts']._options = None
_PORT.fields_by_name['number']._options = None
_PORT.fields_by_name['protocol']._options = None
_PORT.fields_by_name['name']._options = None
# @@protoc_insertion_point(module_scope)
| 46.413793 | 2,637 | 0.751997 |
096a5172854a6f7ee1cbbe59f19ac4a86d87ac0c | 1,684 | py | Python | Steganalysis-CNN/dataload.py | 1129ljc/video-interpolation-detection | eb2931269b2ac19af28de750f0b719fb0d66aaef | [
"Apache-2.0"
] | 2 | 2022-03-29T06:46:21.000Z | 2022-03-30T09:13:10.000Z | Steganalysis-CNN/dataload.py | 1129ljc/video-interpolation-detection | eb2931269b2ac19af28de750f0b719fb0d66aaef | [
"Apache-2.0"
] | null | null | null | Steganalysis-CNN/dataload.py | 1129ljc/video-interpolation-detection | eb2931269b2ac19af28de750f0b719fb0d66aaef | [
"Apache-2.0"
] | null | null | null | '''
@Time : 2021/9/3 9:42
@Author : ljc
@FileName: dataload.py
@Software: PyCharm
'''
import os
import json
import cv2
import numpy as np
import torch
from PIL import Image
from torchvision import transforms
from torch.utils.data import DataLoader
from torch.utils.data import Dataset
transform = transforms.Compose(
[
# transforms.Resize(size=(224, 224)),
transforms.ToTensor(),
# transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])
]
)
| 30.618182 | 92 | 0.647862 |
096a75b1219f50f0c996c46826203e3429895949 | 15,833 | py | Python | model.py | lei-wang-github/unet | 1dcf41a2956b58358f14e00c0df4daf366d272b8 | [
"MIT"
] | null | null | null | model.py | lei-wang-github/unet | 1dcf41a2956b58358f14e00c0df4daf366d272b8 | [
"MIT"
] | null | null | null | model.py | lei-wang-github/unet | 1dcf41a2956b58358f14e00c0df4daf366d272b8 | [
"MIT"
] | null | null | null | import numpy as np
import os
import skimage.io as io
import skimage.transform as trans
import numpy as np
from tensorflow.keras.models import *
from tensorflow.keras.layers import *
from tensorflow.keras.optimizers import *
from tensorflow.keras.callbacks import ModelCheckpoint, LearningRateScheduler
from keras import backend as keras
import math
import configparser
config = configparser.ConfigParser()
config.read('configuration.txt')
img_height = int(config['data attributes']['image_height'])
img_width = int(config['data attributes']['image_width'])
model_reduction_ratio = int(config['model type']['modelReductionRatio'])
learningRate = float(config['training settings']['learningRate'])
# https://github.com/orobix/retina-unet/blob/master/src/retinaNN_training.py
| 68.541126 | 169 | 0.718373 |
096b3b878c08f6ba21432355cfef1328654cf1dc | 23,998 | py | Python | run.py | kampta/PatchVAE | 816f4b49fd8b836641d7e1068c1e802ae0453742 | [
"MIT"
] | 9 | 2020-10-29T11:56:53.000Z | 2021-11-21T14:34:38.000Z | run.py | kampta/PatchVAE | 816f4b49fd8b836641d7e1068c1e802ae0453742 | [
"MIT"
] | null | null | null | run.py | kampta/PatchVAE | 816f4b49fd8b836641d7e1068c1e802ae0453742 | [
"MIT"
] | 2 | 2020-10-29T03:40:31.000Z | 2021-01-31T20:04:49.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
""" run.py
Code to run the PatchVAE on different datasets
Usage:
# Run with default arguments on mnist
python run.py
Basic VAE borrowed from
https://github.com/pytorch/examples/tree/master/vae
"""
__author__ = "Kamal Gupta"
__email__ = "kampta@cs.umd.edu"
__version__ = "0.1"
import sys
from collections import OrderedDict
import shutil
import numpy as np
import torch
import torch.nn as nn
from torchvision.utils import make_grid
from utils import Timer
from utils.torchsummary import summary
from utils.commons import data_loaders, load_vae_model, count_parameters, EdgeWeights
from loss import BetaVaeLoss, VaeConcreteLoss, BetaVaeConcreteLoss,\
BetaVaeConcretePartsLoss, BetaVaeConcretePartsEntropyLoss, DiscLoss
from model import Discriminator
import utils.commons as commons
from torch.utils.tensorboard import SummaryWriter
if __name__ == '__main__':
import argparse
import os
parser = argparse.ArgumentParser(description='Patchy VAE')
# Dataset
parser.add_argument('--dataset', type=str, default='cifar100',
help='name of the dataset (default: cifar100)')
parser.add_argument('--data-folder', type=str, default='./data',
help='name of the data folder (default: ./data)')
parser.add_argument('--workers', type=int, default=4,
help='number of threads (default: 4)')
parser.add_argument('--pretrained', default=None,
help='path of pre-trained model')
parser.add_argument('--evaluate', action='store_true', default=False,
help='just sample no training (default: False)')
parser.add_argument('--size', type=int, default=64,
help='size of image (default: 64)')
parser.add_argument('--inet', default=False, action='store_true',
help='Whether or not to do imagenet normalization')
# Model
parser.add_argument('--arch', type=str, default='patchy',
help='model architecture (default: patchy)')
parser.add_argument('--encoder-arch', type=str, default='resnet',
help='encoder architecture (default: resnet)')
parser.add_argument('--decoder-arch', type=str, default='pyramid',
help='decoder architecture (default: pyramid)')
parser.add_argument('--independent', action='store_true', default=False,
help='independent decoders (default: False)')
parser.add_argument('--ngf', type=int, default=64,
help='depth of first layer of encoder (default: 64)')
# Optimization
parser.add_argument('--recon-mask', type=str, default=None,
help="Use 'edge' mask for improved reconstruction (default: None.)")
parser.add_argument('--batch-size', type=int, default=128,
help='batch size (default: 128)')
parser.add_argument('--img-per-epoch', type=int, default=50000,
help='images per epoch (default: 50000)')
parser.add_argument('--num-epochs', type=int, default=30,
help='number of epochs (default: 30)')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='enables CUDA training (default: False)')
parser.add_argument('--lr', type=float, default=1e-4,
help='learning rate for Adam optimizer (default: 1e-4)')
parser.add_argument('--beta-a', type=float, default=1.0,
help='contribution of KLD App loss (default: 1.0)')
parser.add_argument('--beta-v', type=float, default=10.,
help='contribution of KLD Vis loss (default: 10.)')
parser.add_argument('--beta-p', type=float, default=0.,
help='contribution of MSE Parts loss (default: 0.)')
parser.add_argument('--beta-ea', type=float, default=0.,
help='contribution of Entropy Across loss (default: 0.)')
parser.add_argument('--beta-ew', type=float, default=0.,
help='contribution of Entropy Within loss (default: 0.)')
# GAN
parser.add_argument('--gan', action='store_true', default=False,
help='enable gan (default: False)')
parser.add_argument('--ndf', type=int, default=64,
help='depth of first layer of discrimnator (default: 64)')
parser.add_argument('--beta-g', type=float, default=1.0,
help='contribution of GAN loss (default: 0.)')
# Latent space
parser.add_argument('--scale', type=int, default=8,
help='scale down by (default: 8)')
parser.add_argument('--num-parts', type=int, default=16,
help='number of parts (default: 16)')
parser.add_argument('--hidden-size', type=int, default=6,
help='size of the latent vectors (default: 6)')
parser.add_argument('--py', type=float, default=None,
help='part visibility prior (default: 1 / num_parts)')
parser.add_argument('--categorical', action='store_true', default=False,
help='take only 1 part per location (default: False)')
# Annealing
parser.add_argument('--hard', action='store_true', default=False,
help='hard samples from bernoulli (default: False)')
parser.add_argument('--temp', type=float, default=1.0,
help='Initial temperature (default: 1.0)')
parser.add_argument('--anneal', type=float, default=0.00003,
help='Anneal rate (default: 00003)')
parser.add_argument('--min-temp', type=float, default=0.1,
help='minimum temperature')
# Miscellaneous
parser.add_argument('--debug-grad', action='store_true', default=False,
help='debug gradients (default: False)')
parser.add_argument('--output-folder', type=str, default='./scratch',
help='name of the output folder (default: ./scratch)')
parser.add_argument('--seed', type=int, default=1,
help='random seed (default: 1)')
parser.add_argument('--log-interval', type=int, default=50,
help='how many batches to wait before logging training status')
parser.add_argument('--save-interval', type=int, default=1,
help='how many batches to wait before logging training status')
args = parser.parse_args()
print("All arguments")
print(args)
print("PID: ", os.getpid())
args.cuda = not args.no_cuda and torch.cuda.is_available()
args.device = torch.device("cuda:0"
if args.cuda and torch.cuda.is_available() else "cpu")
# Slurm
if 'SLURM_JOB_NAME' in os.environ and 'SLURM_JOB_ID' in os.environ:
# running with sbatch and not srun
if os.environ['SLURM_JOB_NAME'] != 'bash':
args.output_folder = os.path.join(args.output_folder,
os.environ['SLURM_JOB_ID'])
print("SLURM_JOB_ID: ", os.environ['SLURM_JOB_ID'])
else:
args.output_folder = os.path.join(args.output_folder, str(os.getpid()))
else:
args.output_folder = os.path.join(args.output_folder, str(os.getpid()))
# Create logs and models folder if they don't exist
if not os.path.exists(args.output_folder):
print("Creating output directory: %s" % args.output_folder)
os.makedirs(args.output_folder)
log_dir = os.path.join(args.output_folder, 'logs')
if not os.path.exists(log_dir):
print("Creating log directory: %s" % log_dir)
os.makedirs(log_dir)
model_dir = os.path.join(args.output_folder, 'models')
if not os.path.exists(model_dir):
print("Creating model directory: %s" % model_dir)
os.makedirs(model_dir)
args.log_dir = log_dir
args.model_dir = model_dir
main()
| 40.063439 | 121 | 0.580548 |
096bb429411e96f2d66472cdc1c72183b691a5bc | 3,521 | py | Python | backend/routes/todolist.py | BurnySc2/sanic-react-typescript-template | 02b1722c9230018402e4c5ffbb11204a0343e73b | [
"MIT"
] | 1 | 2020-12-20T16:09:46.000Z | 2020-12-20T16:09:46.000Z | backend/routes/todolist.py | BurnySc2/sanic-react-typescript-template | 02b1722c9230018402e4c5ffbb11204a0343e73b | [
"MIT"
] | null | null | null | backend/routes/todolist.py | BurnySc2/sanic-react-typescript-template | 02b1722c9230018402e4c5ffbb11204a0343e73b | [
"MIT"
] | null | null | null | import os
import sqlite3
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, List, Optional
from fastapi import Request
from fastapi.routing import APIRouter
from loguru import logger
ENV = os.environ.copy()
USE_MONGO_DB: bool = ENV.get('USE_MONGO_DB', 'True') == 'True'
USE_POSTGRES_DB: bool = ENV.get('USE_POSTGRES_DB', 'True') == 'True'
USE_LOCAL_SQLITE_DB: bool = ENV.get('USE_LOCAL_SQLITE_DB', 'True') == 'True'
SQLITE_FILENAME: str = ENV.get('SQLITE_FILENAME', 'todos.db')
# TODO use different database tables when using stage = dev/staging/prod
todo_list_router = APIRouter()
db: Optional[sqlite3.Connection] = None
# TODO Communicate with postgresql and mongodb
# Alternative to above with request body:
# Alternative to above with model:
| 32.302752 | 90 | 0.672536 |
096bb8869bace9e3c4b6964fc661952242355ebd | 11,602 | py | Python | membership/management/commands/csvbills.py | guaq/sikteeri | 9a80790666edaa058e9cb42cb9e78626cfc0e565 | [
"MIT"
] | null | null | null | membership/management/commands/csvbills.py | guaq/sikteeri | 9a80790666edaa058e9cb42cb9e78626cfc0e565 | [
"MIT"
] | null | null | null | membership/management/commands/csvbills.py | guaq/sikteeri | 9a80790666edaa058e9cb42cb9e78626cfc0e565 | [
"MIT"
] | null | null | null | # encoding: UTF-8
from __future__ import with_statement
import logging
import codecs
import csv
import os
from datetime import datetime, timedelta
from decimal import Decimal
from django.db.models import Q, Sum
from django.core.management.base import BaseCommand
from django.core.exceptions import ObjectDoesNotExist
from django.utils.translation import ugettext as _
from django.contrib.auth.models import User
from membership.models import Bill, BillingCycle, Payment
from membership.utils import log_change
from optparse import make_option
logger = logging.getLogger("membership.csvbills")
def row_to_payment(row):
try:
p = Payment.objects.get(transaction_id__exact=row['transaction'])
return p
except Payment.DoesNotExist:
p = Payment(payment_day=min(datetime.now(), row['date']),
amount=row['amount'],
type=row['event_type_description'],
payer_name=row['fromto'],
reference_number=row['reference'],
message=row['message'],
transaction_id=row['transaction'])
return p
def attach_payment_to_cycle(payment, user=None):
"""
Outside of this module, this function is mainly used by
generate_test_data.py.
"""
if payment.ignore == True or payment.billingcycle != None:
raise Exception("Unexpected function call. This shouldn't happen.")
reference = payment.reference_number
cycle = BillingCycle.objects.get(reference_number=reference)
if cycle.is_paid == False or cycle.amount_paid() < cycle.sum:
payment.attach_to_cycle(cycle, user=user)
else:
# Don't attach a payment to a cycle with enough payments
payment.comment = _('duplicate payment')
payment.duplicate = True
log_user = User.objects.get(id=1)
log_change(payment, log_user, change_message="Payment not attached due to duplicate payment")
payment.save()
return None
return cycle
def process_payments(reader, user=None):
"""
Actual CSV file processing logic
"""
return_messages = []
num_attached = num_notattached = 0
sum_attached = sum_notattached = 0
for row in reader:
if row == None:
continue
if row['amount'] < 0: # Transaction is paid by us, ignored
continue
# Payment in future more than 1 day is a fatal error
if row['date'] > datetime.now() + timedelta(days=1):
raise PaymentFromFutureException("Payment date in future")
payment = row_to_payment(row)
# Do nothing if this payment has already been assigned or ignored
if payment.billingcycle or payment.ignore:
continue
try:
cycle = attach_payment_to_cycle(payment, user=user)
if cycle:
msg = _("Attached payment %(payment)s to cycle %(cycle)s") % {
'payment': unicode(payment), 'cycle': unicode(cycle)}
logger.info(msg)
return_messages.append((None, None, msg))
num_attached = num_attached + 1
sum_attached = sum_attached + payment.amount
else:
# Payment not attached to cycle because enough payments were attached
msg = _("Billing cycle already paid for %s. Payment not attached.") % payment
return_messages.append((None, None, msg))
logger.info(msg)
num_notattached = num_notattached + 1
sum_notattached = sum_notattached + payment.amount
except BillingCycle.DoesNotExist:
# Failed to find cycle for this reference number
if not payment.id:
payment.save() # Only save if object not in database yet
logger.warning("No billing cycle found for %s" % payment.reference_number)
return_messages.append((None, payment.id, _("No billing cycle found for %s") % payment))
num_notattached = num_notattached + 1
sum_notattached = sum_notattached + payment.amount
log_message ="Processed %s payments total %.2f EUR. Unidentified payments: %s (%.2f EUR)" % \
(num_attached + num_notattached, sum_attached + sum_notattached, num_notattached, \
sum_notattached)
logger.info(log_message)
return_messages.append((None, None, log_message))
return return_messages
def process_op_csv(file_handle, user=None):
logger.info("Starting OP payment CSV processing...")
reader = OpDictReader(file_handle)
return process_payments(reader)
def process_procountor_csv(file_handle, user=None):
logger.info("Starting procountor payment CSV processing...")
reader = ProcountorDictReader(file_handle)
return process_payments(reader)
| 38.039344 | 104 | 0.589036 |
096c49cec3a4f594f36896910c20f3ffbf6d0451 | 1,962 | py | Python | apps/site/api/serializers/dataset_serializer.py | LocalGround/localground | aa5a956afe7a84a7763a3b23d62a9fd925831cd7 | [
"Apache-2.0"
] | 9 | 2015-05-29T22:22:20.000Z | 2022-02-01T20:39:00.000Z | apps/site/api/serializers/dataset_serializer.py | LocalGround/localground | aa5a956afe7a84a7763a3b23d62a9fd925831cd7 | [
"Apache-2.0"
] | 143 | 2015-01-22T15:03:40.000Z | 2020-06-27T01:55:29.000Z | apps/site/api/serializers/dataset_serializer.py | LocalGround/localground | aa5a956afe7a84a7763a3b23d62a9fd925831cd7 | [
"Apache-2.0"
] | 5 | 2015-03-16T20:51:49.000Z | 2017-02-07T20:48:49.000Z | from localground.apps.site.api.serializers.base_serializer import \
BaseSerializer, NamedSerializerMixin, ProjectSerializerMixin
from localground.apps.site.api.serializers.field_serializer import \
FieldSerializer
from django.conf import settings
from rest_framework import serializers
from localground.apps.site import models
| 37.730769 | 87 | 0.690622 |
096c52364e36a63ef84c11f7cd157e7b506deae2 | 1,447 | py | Python | example/0_Basic_usage_of_the_library/python_pyppeteer/7_PageClass_Cookie.py | RecluseXU/learning_spider | 45fa790ed7970be57a21b40817cc66856de3d99b | [
"MIT"
] | 38 | 2020-08-30T11:41:53.000Z | 2022-03-23T04:30:26.000Z | example/0_Basic_usage_of_the_library/python_pyppeteer/7_PageClass_Cookie.py | AndersonHJB/learning_spider | b855b7808fb5268e9564180cf73ba5b1fb133f58 | [
"MIT"
] | 2 | 2021-08-20T16:34:12.000Z | 2021-10-08T11:06:41.000Z | example/0_Basic_usage_of_the_library/python_pyppeteer/7_PageClass_Cookie.py | AndersonHJB/learning_spider | b855b7808fb5268e9564180cf73ba5b1fb133f58 | [
"MIT"
] | 10 | 2020-11-24T09:15:42.000Z | 2022-02-25T06:05:16.000Z | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
'''
@File : 7_PageClass_Cookie.py
@Time : 2020-8-23 01:33:25
@Author : Recluse Xu
@Version : 1.0
@Contact : 444640050@qq.com
@Desc : Page Class
https://miyakogi.github.io/pyppeteer/reference.html#pyppeteer.page.Page.target
PagePage
'''
# here put the import lib
import asyncio
from pyppeteer import launch
asyncio.get_event_loop().run_until_complete(main()) | 23.721311 | 87 | 0.601935 |
096e0c82b3230ff3e7a21ae6baf386609c988e98 | 2,354 | py | Python | flask_io/negotiation.py | theelous3/flask-io | 39745083a260bdda20d3a0712287aade9e0d8a97 | [
"MIT"
] | 17 | 2015-12-03T02:09:44.000Z | 2022-03-28T08:33:41.000Z | flask_io/negotiation.py | theelous3/flask-io | 39745083a260bdda20d3a0712287aade9e0d8a97 | [
"MIT"
] | 6 | 2015-12-31T06:31:03.000Z | 2021-05-27T10:13:34.000Z | flask_io/negotiation.py | theelous3/flask-io | 39745083a260bdda20d3a0712287aade9e0d8a97 | [
"MIT"
] | 7 | 2016-06-16T18:55:44.000Z | 2022-02-22T10:33:09.000Z | """
Content negotiation selects a appropriated parser and renderer for a HTTP request.
"""
from abc import ABCMeta, abstractmethod
from .mimetypes import MimeType
| 30.571429 | 93 | 0.641886 |
096e0f5dcdfc6d12aa8ec7627807b41fbeb0e3df | 48,070 | py | Python | tests/test_integ_squash.py | pombredanne/docker-scripts | ecee9f921b22cd44943197635875572185dd015d | [
"MIT"
] | 513 | 2016-04-04T21:44:14.000Z | 2022-03-27T06:18:26.000Z | tests/test_integ_squash.py | pombredanne/docker-scripts | ecee9f921b22cd44943197635875572185dd015d | [
"MIT"
] | 106 | 2016-04-01T11:53:20.000Z | 2022-03-31T00:35:31.000Z | tests/test_integ_squash.py | pombredanne/docker-scripts | ecee9f921b22cd44943197635875572185dd015d | [
"MIT"
] | 75 | 2016-05-11T01:08:47.000Z | 2022-03-25T01:20:06.000Z | import unittest
import mock
import six
import codecs
import os
import json
import logging
import shutil
import tarfile
import io
from io import BytesIO
import uuid
from docker_squash.squash import Squash
from docker_squash.errors import SquashError, SquashUnnecessaryError
from docker_squash.lib import common
if not six.PY3:
import docker_squash.lib.xtarfile
class TestIntegSquash(IntegSquash):
def test_all_files_should_be_in_squashed_layer(self):
"""
We squash all layers in RUN, all files should be in the resulting squashed layer.
"""
dockerfile = '''
FROM %s
RUN touch /somefile_layer1
RUN touch /somefile_layer2
RUN touch /somefile_layer3
''' % TestIntegSquash.BUSYBOX_IMAGE
with self.Image(dockerfile) as image:
with self.SquashedImage(image, 3) as squashed_image:
squashed_image.assertFileDoesNotExist('.wh.somefile_layer1')
squashed_image.assertFileDoesNotExist('.wh.somefile_layer2')
squashed_image.assertFileDoesNotExist('.wh.somefile_layer3')
squashed_image.assertFileExists('somefile_layer1')
squashed_image.assertFileExists('somefile_layer2')
squashed_image.assertFileExists('somefile_layer3')
with self.Container(squashed_image) as container:
container.assertFileExists('somefile_layer1')
container.assertFileExists('somefile_layer2')
container.assertFileExists('somefile_layer3')
# We should have two layers less in the image
self.assertTrue(
len(squashed_image.layers) == len(image.layers) - 2)
def test_only_files_from_squashed_image_should_be_in_squashed_layer(self):
"""
We squash all layers in RUN, all files should be in the resulting squashed layer.
"""
dockerfile = '''
FROM %s
RUN touch /somefile_layer1
RUN touch /somefile_layer2
RUN touch /somefile_layer3
''' % TestIntegSquash.BUSYBOX_IMAGE
with self.Image(dockerfile) as image:
with self.SquashedImage(image, 2) as squashed_image:
squashed_image.assertFileDoesNotExist('.wh.somefile_layer2')
squashed_image.assertFileDoesNotExist('.wh.somefile_layer3')
# This file should not be in the squashed layer
squashed_image.assertFileDoesNotExist('somefile_layer1')
# Nor a marker files for it
squashed_image.assertFileDoesNotExist('.wh.somefile_layer1')
squashed_image.assertFileExists('somefile_layer2')
squashed_image.assertFileExists('somefile_layer3')
with self.Container(squashed_image) as container:
# This file should be in the container
container.assertFileExists('somefile_layer1')
container.assertFileExists('somefile_layer2')
container.assertFileExists('somefile_layer3')
# We should have two layers less in the image
self.assertEqual(
len(squashed_image.layers), len(image.layers) - 1)
def test_there_should_be_a_marker_file_in_the_squashed_layer(self):
"""
Here we're testing that the squashed layer should contain a '.wh.somefile_layer1'
file, because the file was not found in the squashed tar and it is present in
the layers we do not squash.
"""
dockerfile = '''
FROM %s
RUN touch /somefile_layer1
RUN rm /somefile_layer1
RUN touch /somefile_layer3
''' % TestIntegSquash.BUSYBOX_IMAGE
with self.Image(dockerfile) as image:
with self.SquashedImage(image, 2) as squashed_image:
squashed_image.assertFileDoesNotExist('somefile_layer1')
squashed_image.assertFileExists('somefile_layer3')
squashed_image.assertFileExists('.wh.somefile_layer1')
squashed_image.assertFileIsNotHardLink('.wh.somefile_layer1')
with self.Container(squashed_image) as container:
container.assertFileExists('somefile_layer3')
container.assertFileDoesNotExist('somefile_layer1')
# We should have one layer less in the image
self.assertEqual(
len(squashed_image.layers), len(image.layers) - 1)
# https://github.com/goldmann/docker-squash/issues/97
# https://github.com/goldmann/docker-scripts/issues/28
# https://github.com/goldmann/docker-scripts/issues/30
# https://github.com/goldmann/docker-scripts/pull/31
# https://github.com/goldmann/docker-scripts/issues/32
# https://github.com/goldmann/docker-scripts/issues/33
# This is an edge case where we try to squash last 2 layers
# but these layers do not create any content on filesystem
# https://github.com/goldmann/docker-scripts/issues/54
# https://github.com/goldmann/docker-scripts/issues/52
# Test may be misleading, but squashing all layers makes sure we hit
# at least one <missing> layer
# https://github.com/goldmann/docker-scripts/issues/44
# https://github.com/goldmann/docker-squash/issues/80
# https://github.com/goldmann/docker-squash/issues/99
# TODO: try not to use centos:6.6 image - this slows down testsuite
# https://github.com/goldmann/docker-squash/issues/66
# https://github.com/goldmann/docker-squash/issues/94
# https://github.com/goldmann/docker-squash/issues/104
# https://github.com/goldmann/docker-squash/issues/111
# https://github.com/goldmann/docker-squash/issues/112
# https://github.com/goldmann/docker-squash/issues/112
# https://github.com/goldmann/docker-squash/issues/116
# https://github.com/goldmann/docker-squash/issues/118
# https://github.com/goldmann/docker-squash/issues/118
# https://github.com/goldmann/docker-squash/issues/120
# https://github.com/goldmann/docker-squash/issues/122
# https://github.com/goldmann/docker-squash/issues/181
# https://github.com/goldmann/docker-squash/issues/181
# https://github.com/goldmann/docker-squash/issues/186
# https://github.com/goldmann/docker-squash/issues/186
# https://github.com/opencontainers/image-spec/blob/master/layer.md#whiteouts
class NumericValues(IntegSquash):
def test_should_not_squash_more_layers_than_image_has(self):
with six.assertRaisesRegex(self, SquashError, r"Cannot squash 20 layers, the .* image contains only \d layers"):
with self.SquashedImage(NumericValues.image, 20, numeric=True):
pass
if __name__ == '__main__':
unittest.main()
| 41.872822 | 319 | 0.608924 |
096e973dacf00212d057f5f9f7861e73bb7168e0 | 1,627 | py | Python | models/bam.py | AnonymityCode/FastLFnet | cc4c1d9620fef5e75798f40084729d8d7fdd5a9a | [
"MIT"
] | 8 | 2021-10-13T01:31:25.000Z | 2022-03-18T03:13:15.000Z | models/bam.py | zcong17huang/FastLFnet | d86e9b333f6acb62e0b4d30ed14519fe39ef2963 | [
"MIT"
] | 5 | 2021-12-13T07:19:14.000Z | 2022-03-26T13:00:37.000Z | models/bam.py | AnonymityCode/FastLFnet | cc4c1d9620fef5e75798f40084729d8d7fdd5a9a | [
"MIT"
] | 2 | 2021-11-04T04:04:41.000Z | 2021-11-06T07:56:25.000Z | import torch
import math
import torch.nn as nn
import torch.nn.functional as F
| 54.233333 | 146 | 0.671789 |
09706c6eb6ce8046078f05dc861a923a4dfa7d00 | 736 | py | Python | ML/Computer Vision/Lab7_face_detection_real_time.py | richeyphu/ITE-425 | 4210b692609fa04cdd00b76a45d9e1e5baacd6e3 | [
"MIT"
] | null | null | null | ML/Computer Vision/Lab7_face_detection_real_time.py | richeyphu/ITE-425 | 4210b692609fa04cdd00b76a45d9e1e5baacd6e3 | [
"MIT"
] | null | null | null | ML/Computer Vision/Lab7_face_detection_real_time.py | richeyphu/ITE-425 | 4210b692609fa04cdd00b76a45d9e1e5baacd6e3 | [
"MIT"
] | null | null | null | import cv2
faceCascade = cv2.CascadeClassifier(cv2.data.haarcascades + "haarcascade_frontalface_default.xml")
#capture = cv2.VideoCapture(0)
capture = cv2.VideoCapture('Elon Musk 320.mp4')
while True:
_, frame = capture.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
#faces = faceCascade.detectMultiScale(gray, 1.1, 4)
faces = faceCascade.detectMultiScale(
gray,
scaleFactor=1.1,
minNeighbors=5,
#minSize=(30, 30),
flags=cv2.CASCADE_SCALE_IMAGE
)
for (x, y, w, h) in faces:
cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 2)
cv2.imshow('Image', frame)
keyboard = cv2.waitKey(30 & 0xff)
if keyboard==27:
break
capture.release() | 30.666667 | 99 | 0.639946 |
0970b9ad7614a84f468d83f8de90de992c7f521f | 1,110 | py | Python | setup.py | tabac/cprofilev | dd9ee42ef8e68d08dbdde88ddce854aac55ef934 | [
"MIT"
] | null | null | null | setup.py | tabac/cprofilev | dd9ee42ef8e68d08dbdde88ddce854aac55ef934 | [
"MIT"
] | null | null | null | setup.py | tabac/cprofilev | dd9ee42ef8e68d08dbdde88ddce854aac55ef934 | [
"MIT"
] | 1 | 2019-09-15T12:56:29.000Z | 2019-09-15T12:56:29.000Z | from setuptools import setup
import sys
if sys.version_info < (2,5):
raise NotImplementedError(
"Sorry, you need at least Python 2.5 to use cprofilev.")
VERSION = '1.0.4'
__doc__ = """\
An easier way to use cProfile.
Outputs a simpler html view of profiled stats.
Able to show stats while the code is still running!
"""
setup(
name='CProfileV',
version=VERSION,
url='https://github.com/ymichael/cprofilev',
author='Michael Yong',
author_email='wrong92@gmail.com',
py_modules=['cprofilev'],
entry_points="""
[console_scripts]
cprofilev = cprofilev:main
""",
install_requires=["bottle"],
license='MIT',
description='An easier way to use cProfile',
long_description=__doc__,
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: Developers',
'Environment :: Web Environment',
'Framework :: Bottle',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Topic :: Software Development :: Testing',
]
)
| 24.130435 | 64 | 0.636937 |
0971b6f1115e89b5d0376e06557390d061de5c54 | 566 | py | Python | bocce/__init__.py | brianjpetersen/bocce | 20a4845400e8759173c5391ce52f18dafbf4c678 | [
"MIT"
] | null | null | null | bocce/__init__.py | brianjpetersen/bocce | 20a4845400e8759173c5391ce52f18dafbf4c678 | [
"MIT"
] | null | null | null | bocce/__init__.py | brianjpetersen/bocce | 20a4845400e8759173c5391ce52f18dafbf4c678 | [
"MIT"
] | null | null | null | # standard libraries
import os
# third party libraries
pass
# first party libraries
from . import (application, routing, static, surly, requests, responses,
utils, cookies, exceptions, middleware, )
__where__ = os.path.dirname(os.path.abspath(__file__))
__all__ = ('Application', 'application', 'routing', 'Route', 'Routes',
'Request', 'Response', 'exceptions', 'surly', 'Url')
Route = routing.Route
Routes = routing.Routes
Application = application.Application
Url = surly.Url
Request = requests.Request
Response = responses.Response
| 26.952381 | 73 | 0.715548 |
09722db03d2e3d65cdf0b22fef132df0fab89e4d | 5,947 | py | Python | classification.py | Sigmoid-Frontsquat-LLC/classification-model-backend | 7366302063315a245b7ab20219fb22ecf67bd377 | [
"MIT"
] | null | null | null | classification.py | Sigmoid-Frontsquat-LLC/classification-model-backend | 7366302063315a245b7ab20219fb22ecf67bd377 | [
"MIT"
] | null | null | null | classification.py | Sigmoid-Frontsquat-LLC/classification-model-backend | 7366302063315a245b7ab20219fb22ecf67bd377 | [
"MIT"
] | null | null | null | import sys # this is for extracting command line arguments.
activator = ''
optimizer = ''
source = ''
if len(sys.argv) == 1 or (len(sys.argv) - 1) % 2 != 0:
raise ValueError("Usage: [-s image] [-a activator] [-o optimizer]")
else:
# could this be done better?
# sure, but this works for now...
for i in range(1, len(sys.argv) - 1):
flag = sys.argv[i]
value = sys.argv[i + 1]
isActivator, act = parse_activator(flag, value)
if isActivator:
if act != '-o':
activator = act
continue
isOptimizer, opt = parse_optimizer(flag, value)
if isOptimizer:
optimizer = opt
continue
isSource, so = parse_source(flag, value)
if isSource:
source = so
continue
pass
pass
# naive check to ensure no argument is left unfilled
if len(activator) == 0 or len(optimizer) == 0 or len(source) == 0 :
raise ValueError("Usage: [-s image] [-a activator] [-o optimizer]")
# exit(0)
############# Classification Logic ##################
import pandas as pd
import io
import requests
import numpy as np
import os
import logging
import json
import shutil
from sklearn.model_selection import train_test_split
from sklearn import metrics
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Activation, Conv2D, MaxPooling2D, Dropout, Flatten
from tensorflow.keras.callbacks import EarlyStopping
from tensorflow.keras.callbacks import ModelCheckpoint
from tensorflow.keras.applications.vgg16 import VGG16
from PIL import Image, ImageFile, ImageEnhance
from matplotlib.pyplot import imshow
import requests
from io import BytesIO
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
####### warning messages not printed #######
logging.disable(logging.WARNING)
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
# class labels are as follows for the cifar10
# airplane : 0
# automobile : 1
# bird : 2
# cat : 3
# deer : 4
# dog : 5
# frog : 6
# horse : 7
# ship : 8
# truck : 9
class_labels = ['airplane','automobile','bird','cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
num_classes = 10
# Image preprocessing
img = Image.open(source)
img = img.resize((32,32))
enhancer = ImageEnhance.Sharpness(img)
enhanced_im = enhancer.enhance(10.0)
enhanced_im.save('resized.jpg')
img_array = np.asarray(enhanced_im)
img_array = img_array / 255
input_shape = (32,32,3)
# reshape for model
# original model was trained with (32,32,3)
img_array = img_array.reshape((1,32,32,3))
modelo = Sequential()
modelo.add(Conv2D(32, (3, 3), activation=activator, padding='same', input_shape=input_shape))
modelo.add(Conv2D(32, (3, 3), activation=activator, padding='same'))
modelo.add(Conv2D(32, (3, 3), activation=activator, padding='same'))
modelo.add(MaxPooling2D((3, 3)))
modelo.add(Dropout(0.2))
modelo.add(Conv2D(64, (3, 3), activation=activator, padding='same'))
modelo.add(Conv2D(64, (3, 3), activation=activator, padding='same'))
modelo.add(Conv2D(64, (3, 3), activation=activator, padding='same'))
modelo.add(MaxPooling2D((3, 3)))
modelo.add(Dropout(0.2))
modelo.add(Conv2D(128, (3, 3), activation=activator, padding='same'))
modelo.add(Conv2D(128, (3, 3), activation=activator, padding='same'))
modelo.add(MaxPooling2D((3, 3)))
modelo.add(Flatten())
modelo.add(Dense(128, activation=activator))
modelo.add(Dropout(0.2))
modelo.add(Dense(10, activation='softmax'))
modelo.compile(loss='categorical_crossentropy',optimizer=optimizer)
# validate the 'activator'
pass
# validate the 'optimizer'
pass
# Load weights based on activator and optimizer
# probably not needed as we are already passing the optimizer as a variable
if optimizer == 'adam':
# compile with adam
modelo.compile(loss='categorical_crossentropy',optimizer=optimizer)
# activator
if activator == 'relu':
# load adam-relu
modelo.load_weights('dnn/relu-adam2.hdf5')
elif activator == 'sigmoid':
# load sigmoid-adam
modelo.load_weights('dnn/sigmoid-adam2.hdf5')
elif activator == 'tanh':
# load tanh-adam
modelo.load_weights('dnn/tanh-adam2.hdf5')
else:
print('error')
elif optimizer == 'sgd':
# compile with sgd
modelo.compile(loss='categorical_crossentropy',optimizer=optimizer)
if activator == 'relu':
# load relu-sgd
modelo.load_weights('dnn/relu-sgd2.hdf5')
elif activator == 'sigmoid':
# load sigmoid-sgd
modelo.load_weights('dnn/sigmoid-sgd2.hdf5')
elif activator == 'tanh':
# load tanh-sgd
modelo.load_weights('dnn/tanh-sgd2.hdf5')
else:
print('error')
# Get the classification
############# classification ##############
pred = modelo.predict(img_array)
pred = pred[0]
pred_class = class_labels[np.argmax(pred)]
############# JSON ###############
# classification = {k:v for k,v in zip(class_labels,pred)}
classification = [
{
class_labels[0] : pred[0]
},
{
class_labels[1] : pred[1]
},
{
class_labels[2] : pred[2]
},
{
class_labels[3] : pred[3]
},
{
class_labels[4] : pred[4]
},
{
class_labels[5] : pred[5]
},
{
class_labels[6] : pred[6]
},
{
class_labels[7] : pred[7]
},
{
class_labels[8] : pred[8]
},
{
class_labels[9] : pred[9]
},
]
########## output ################
print(classification)
| 23.230469 | 102 | 0.636624 |
0972614a80b05e57c1220dbf0ff54e2fa988f86e | 7,658 | py | Python | wizard/gui/destination_manager.py | Wizard-collab/wizard_2 | a2cb23362e178a0205f6dd0b9b4328c329b5b142 | [
"MIT"
] | 1 | 2021-10-13T15:07:32.000Z | 2021-10-13T15:07:32.000Z | wizard/gui/destination_manager.py | Wizard-collab/wizard_2 | a2cb23362e178a0205f6dd0b9b4328c329b5b142 | [
"MIT"
] | null | null | null | wizard/gui/destination_manager.py | Wizard-collab/wizard_2 | a2cb23362e178a0205f6dd0b9b4328c329b5b142 | [
"MIT"
] | null | null | null | # coding: utf-8
# Author: Leo BRUNEL
# Contact: contact@leobrunel.com
# Python modules
from PyQt5 import QtWidgets, QtCore, QtGui
from PyQt5.QtCore import pyqtSignal
import logging
# Wizard modules
from wizard.core import assets
from wizard.core import project
from wizard.vars import ressources
# Wizard gui modules
from wizard.gui import gui_utils
from wizard.gui import gui_server
logger = logging.getLogger(__name__)
| 42.076923 | 131 | 0.70619 |
0972acc5adf0761541464f2087b1feb90d1044ab | 1,591 | py | Python | algotrading/agents/six_month_cycle_agent.py | vrishank97/AlgoTrading | 41dd44f73d97267283032ed433dd0bfb3bd6c638 | [
"MIT"
] | 92 | 2018-12-21T11:21:17.000Z | 2022-03-27T13:01:45.000Z | build/lib/algotrader/agents/six_month_cycle_agent.py | ajmal017/AlgoTrading-5 | 41dd44f73d97267283032ed433dd0bfb3bd6c638 | [
"MIT"
] | 3 | 2018-12-19T16:33:36.000Z | 2019-05-28T10:08:40.000Z | build/lib/algotrader/agents/six_month_cycle_agent.py | ajmal017/AlgoTrading-5 | 41dd44f73d97267283032ed433dd0bfb3bd6c638 | [
"MIT"
] | 34 | 2019-05-28T21:31:51.000Z | 2022-02-06T20:25:54.000Z | from .BaseAgent import BaseAgent
import pandas as pd
import numpy as np
from itertools import islice | 29.462963 | 123 | 0.588938 |
09735c1ad6c8d9ce147599ce6ceedfef6549b227 | 116 | py | Python | 001085StepikPythonIntrO/Stepik001085PythonIntrOсh01p04st05С05_20200410.py | SafonovMikhail/python_000577 | 739f764e80f1ca354386f00b8e9db1df8c96531d | [
"Apache-2.0"
] | null | null | null | 001085StepikPythonIntrO/Stepik001085PythonIntrOсh01p04st05С05_20200410.py | SafonovMikhail/python_000577 | 739f764e80f1ca354386f00b8e9db1df8c96531d | [
"Apache-2.0"
] | null | null | null | 001085StepikPythonIntrO/Stepik001085PythonIntrOсh01p04st05С05_20200410.py | SafonovMikhail/python_000577 | 739f764e80f1ca354386f00b8e9db1df8c96531d | [
"Apache-2.0"
] | null | null | null | n = float(input())
m = float(input())
max = max(n, m)
min = min(n, m)
time = max - min
print(time // 60, time % 60)
| 16.571429 | 28 | 0.560345 |
0973f9ff4b18475410c9ec73581276a3c910551c | 485 | py | Python | modu/linear_regression/linear_regression_cost_plot.py | godong9/ml | 2c735376f4366000685cd97de5df31aabc1c597e | [
"MIT"
] | null | null | null | modu/linear_regression/linear_regression_cost_plot.py | godong9/ml | 2c735376f4366000685cd97de5df31aabc1c597e | [
"MIT"
] | null | null | null | modu/linear_regression/linear_regression_cost_plot.py | godong9/ml | 2c735376f4366000685cd97de5df31aabc1c597e | [
"MIT"
] | null | null | null | import tensorflow as tf
import matplotlib.pyplot as plt
X = [1, 2, 3]
Y = [1, 2, 3]
W = tf.placeholder(tf.float32)
hypothesis = X * W
cost = tf.reduce_mean(tf.square(hypothesis - Y))
sess = tf.Session()
sess.run(tf.global_variables_initializer())
W_val = []
cost_val = []
for i in range(-30, 50):
feed_W = i * 0.1
curr_cost, curr_W = sess.run([cost, W], feed_dict={W: feed_W})
W_val.append(curr_W)
cost_val.append(curr_cost)
plt.plot(W_val, cost_val)
plt.show() | 18.653846 | 66 | 0.665979 |
09744b77cc03b6489272302bf793ec2a6a1a7ea2 | 2,078 | py | Python | dyno_pods/test/test_multisampling.py | louisXW/PODS-DYNO | 5cd3cced8f0556a5c42d9021ff1d965880f360dd | [
"MIT"
] | null | null | null | dyno_pods/test/test_multisampling.py | louisXW/PODS-DYNO | 5cd3cced8f0556a5c42d9021ff1d965880f360dd | [
"MIT"
] | 1 | 2022-03-24T18:17:50.000Z | 2022-03-24T18:17:50.000Z | dyno_pods/test/test_multisampling.py | louisXW/PODS-DYNO | 5cd3cced8f0556a5c42d9021ff1d965880f360dd | [
"MIT"
] | 1 | 2021-08-01T12:57:30.000Z | 2021-08-01T12:57:30.000Z | """
.. module:: test_multisampling
:synopsis: Test multisampling strategy
.. moduleauthor:: David Eriksson <dme65@cornell.edu>
"""
from pySOT import Ackley, CandidateDYCORS, GeneticAlgorithm, \
MultiStartGradient, SyncStrategyNoConstraints, \
RBFInterpolant, CubicKernel, LinearTail, \
SymmetricLatinHypercube, MultiSampling
from poap.controller import SerialController
import numpy as np
import os.path
import logging
if __name__ == '__main__':
main()
| 34.065574 | 95 | 0.682387 |
09745d72d59a6783162603fbdf15fcd5912b5ca1 | 1,172 | py | Python | play_game.py | sanderland/SelfplayLab | 4ce5b8ffd8cfb5465196dddaa0142b2843570b98 | [
"MIT"
] | 2 | 2020-12-10T17:11:23.000Z | 2021-05-09T04:14:00.000Z | play_game.py | sanderland/SelfplayLab | 4ce5b8ffd8cfb5465196dddaa0142b2843570b98 | [
"MIT"
] | null | null | null | play_game.py | sanderland/SelfplayLab | 4ce5b8ffd8cfb5465196dddaa0142b2843570b98 | [
"MIT"
] | 2 | 2021-05-09T04:14:05.000Z | 2021-05-09T04:14:34.000Z | import torch
import argparse
from selfplaylab.game.go import CaptureGoState, PixelCaptureGoState, GoState
from selfplaylab.game.gomoku import GoMokuState, GoMokuStateAugmented, TicTacToe, TicTacToeAugmented
from selfplaylab.game.nim import NimState
from selfplaylab.game.othello import OthelloState
from selfplaylab.play import play_game
parser = argparse.ArgumentParser(description="Self-play visualization.")
parser.add_argument("--game", type=str, help="Game to play")
parser.add_argument("--tag", type=str, help="Tag for experiment", default="")
args = parser.parse_args()
game = args.game
if game == "cg":
game_class = CaptureGoState
elif game == "pxcg":
game_class = PixelCaptureGoState
elif game == "nim":
game_class = NimState
elif game == "oth":
game_class = OthelloState
else:
raise Exception("unknown game")
net = game_class.create_net(tag=args.tag)
options = {}
print(f"Loaded net {net.metadata['filename']} on cuda? {net.device}")
temp_fn = lambda mv: 1.0 if mv < 2 else 0.1
with torch.no_grad():
game_states = play_game(
net_evaluator=net.evaluate_sample, game_class=game_class, temperature=temp_fn, verbose=True,
)
| 31.675676 | 100 | 0.75256 |
09761ad36a8d0fda1e1934f1d5836f763526e5ae | 558 | py | Python | tests/test_version.py | rndazurescript/Coco2CustomVision | c189109413b185a77f5d1de51fb2dbcc96139ff6 | [
"MIT"
] | null | null | null | tests/test_version.py | rndazurescript/Coco2CustomVision | c189109413b185a77f5d1de51fb2dbcc96139ff6 | [
"MIT"
] | 1 | 2022-02-23T13:01:38.000Z | 2022-02-23T13:01:38.000Z | tests/test_version.py | rndazurescript/Coco2CustomVision | c189109413b185a77f5d1de51fb2dbcc96139ff6 | [
"MIT"
] | null | null | null | import re
def test_version():
"""Test version string"""
from coco2customvision import __version__
version_parts = re.split("[.-]", __version__)
if __version__ != "UNKNOWN":
assert 3 <= len(version_parts), "must have at least Major.minor.patch"
assert all(
not try_parse_int(i) is None for i in version_parts[:2]
), f"Version Major.minor must be 2 integers. Received {__version__}"
| 26.571429 | 78 | 0.641577 |
0976f09aff61c07c694ac80f44c6f37d65e2b8b2 | 1,266 | py | Python | Python/Pages/ITProPage.py | hirokundayon/koedo | 1d6fc0bb6045edb24253f039628104256896bd1a | [
"Apache-2.0"
] | 1 | 2019-02-04T15:13:51.000Z | 2019-02-04T15:13:51.000Z | Python/Pages/ITProPage.py | hirokundayon/koedo | 1d6fc0bb6045edb24253f039628104256896bd1a | [
"Apache-2.0"
] | null | null | null | Python/Pages/ITProPage.py | hirokundayon/koedo | 1d6fc0bb6045edb24253f039628104256896bd1a | [
"Apache-2.0"
] | 1 | 2018-02-26T15:12:04.000Z | 2018-02-26T15:12:04.000Z | # -*- coding: utf-8 -*-
from Pages.PageObject import PageObject
import time
| 32.461538 | 98 | 0.598736 |
0979c9e1fe818ea6f9a20142144fdc173a453a83 | 1,328 | py | Python | session04/movie.py | heawon99/Repository-NEXT_HW_new | e73fac56469b7518034322f0d2fefe5f95c8c164 | [
"MIT"
] | null | null | null | session04/movie.py | heawon99/Repository-NEXT_HW_new | e73fac56469b7518034322f0d2fefe5f95c8c164 | [
"MIT"
] | null | null | null | session04/movie.py | heawon99/Repository-NEXT_HW_new | e73fac56469b7518034322f0d2fefe5f95c8c164 | [
"MIT"
] | null | null | null | import requests
from bs4 import BeautifulSoup
notebook_html = requests.get('https://search.shopping.naver.com/search/all?pagingIndex=2&pagingSize=80&query=')
notebook_soup = BeautifulSoup(notebook_html.text,"html.parser")
notebook_list_box = notebook_soup.find("ul", {"class" : "list_basis"})
notebook_list = notebook_list_box.find_all('li', {"class" : "basicList_item__2XT81"})
result = []
for notebook in notebook_list:
title = notebook.find("div",{"class":"basicList_title__3P9Q7"}).find("a").string
price = notebook.find("div",{"class":"basicList_price_area__1UXXR"}).find("span",{"class":"price_num__2WUXn"}).text
notebook_info = {
'title' : title,
'price' : price
}
result.append(notebook_info)
print(result)
# notebook_list = notebook_soup.select_one('#__next > div > div.style_container__1YjHN > div.style_inner__18zZX > div.style_content_wrap__1PzEo > div.style_content__2T20F > ul > div > div:nth-child(1) > li > div > div.basicList_info_area__17Xyo > div.basicList_title__3P9Q7 > a').string
notebook_list=notebook_soup.select_one('#__next > div > div.style_container__1YjHN > div.style_inner__18zZX > div.style_content_wrap__1PzEo > div.style_content__2T20F > ul > div > div:nth-child(1) > li > div > div.basicList_info_area__17Xyo > div.basicList_title__3P9Q7 > a')
| 47.428571 | 286 | 0.745482 |
097a67824a1ea5f6c93e2208bf5602c06cf66bd7 | 9,891 | py | Python | Chapter05/5B_MnA/5B_MnAPrediction.py | uyenphuong18406/Hands-On-Artificial-Intelligence-for-Banking | 3a10a14194368478bb8b78d3d17e9c6a7b7253db | [
"MIT"
] | 115 | 2020-06-18T15:00:58.000Z | 2022-03-02T10:13:19.000Z | Chapter05/5B_MnA/5B_MnAPrediction.py | uyenphuong18406/Hands-On-Artificial-Intelligence-for-Banking | 3a10a14194368478bb8b78d3d17e9c6a7b7253db | [
"MIT"
] | 2 | 2020-11-06T11:02:31.000Z | 2021-01-22T12:44:35.000Z | Chapter05/5B_MnA/5B_MnAPrediction.py | uyenphuong18406/Hands-On-Artificial-Intelligence-for-Banking | 3a10a14194368478bb8b78d3d17e9c6a7b7253db | [
"MIT"
] | 60 | 2020-07-22T14:53:10.000Z | 2022-03-23T10:17:59.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
QUANDLKEY = '<Enter your Quandl APT key here>'
"""
Created on Fri Oct 5 23:24:35 2018
@author: jeff
"""
'''*************************************
#1. Import libraries and define key variables
'''
import pandas as pd
import numpy as np
import quandl
import matplotlib.pyplot as plt
from sklearn.metrics import classification_report,roc_curve, auc,confusion_matrix,f1_score
from sklearn.model_selection import train_test_split
from sklearn import tree
from sklearn.neural_network import MLPClassifier
from sklearn.preprocessing import StandardScaler
import pickle
import graphviz
#KPI keys
quandl.ApiConfig.api_key = QUANDLKEY
'''*************************************
#2. Definition of functions
'''
#2a.Download tickers
#tkr = 'AMZN'
#df_tmp = download_tkr(tkr)
#2b.Train tree
##2C Neural Network
#2Ci. Grid search that simulate the performance of different neural network design
#2Cii. Train Neural Network
'''*************************************
3. Execute the program
#3a. filter the industry in scope
'''
groupby_fld = 'sicsector'
min_size = 30
df_tkr = pd.read_csv('industry_tickers_list.csv')
dict_ind_tkr = {}
f1_list = []
df_tkr_ind = pd.DataFrame()
df_tkr_ind['cnt'] = df_tkr.groupby(groupby_fld)['ticker'].count()
df_tkr_ind_select = df_tkr_ind[df_tkr_ind['cnt']>=min_size]
list_scope = list(df_tkr_ind_select.index)
#collect ticker in each industry
for index, row in df_tkr.iterrows():
ind = row[groupby_fld]
tkr = row['ticker']
if ind in list_scope:
if ind in dict_ind_tkr:
dict_ind_tkr[ind].append(tkr)
else:
dict_ind_tkr[ind] = [tkr]
#loop through the dictionary - one industry at a time
for ind, list_tkr in dict_ind_tkr.items():
df_X = pd.DataFrame({})
df_Y = pd.DataFrame({})
print(ind)
#Go through the ticker list to Download data from source
#loop through tickers from that industry
for tkr in list_tkr:
print(tkr)
try:
df_tmp,X_tmp,Y_tmp = download_tkr(tkr)
except Exception:
continue
if len(df_X)==0:
#df_all = df_tmp
df_X = X_tmp
df_Y = Y_tmp
else:
#df_all = pd.concat([df_all,df_tmp])
df_X = pd.concat([df_X,X_tmp])
df_Y = pd.concat([df_Y,Y_tmp])
'''
*************************************
3b. prepare features for clustering for the industry
'''
#convert to float and calc the difference across rows
df_X = df_X.astype(float)
df_Y = df_Y.astype(float)
#remove zero records
df_X = df_X.replace([np.inf ], 999999999)
df_X = df_X.fillna(0)
df_Y = df_Y.fillna(0)
#neural network
nn_clf,f1_score_temp = train_NN(df_X,df_Y,ind)
f1_list.append(f1_score_temp)
nn_clf.get_params()
#decision tree
try:
tree_clf,f1_score_temp = train_tree(df_X,df_Y,ind)
except Exception:
continue
f1_list.append(f1_score_temp)
tree_clf.get_params()
'''
#3c. Visualize the result
'''
fields_list = df_tmp.columns
print('********************')
print('f1 of the models')
print(f1_list)
print('********************')
#for visualization of decision tree
x_feature_name = fields_list[6:-8]
y_target_name = fields_list[-1]
d_tree_out_file = 'decision_tree_'+ind
dot_data = tree.export_graphviz(tree_clf, out_file=None,
feature_names=x_feature_name,
class_names=y_target_name,
filled=True, rounded=True,
special_characters=True)
graph = graphviz.Source(dot_data)
graph.render(d_tree_out_file)
| 32.860465 | 126 | 0.64958 |
097bf627e47851400865727fdcce309364d45996 | 530 | py | Python | import random.py | joelkalonji/PasswordGeerator | 0f9ae4208f0c228c0b1e85241501f5d95b2b5013 | [
"MIT"
] | null | null | null | import random.py | joelkalonji/PasswordGeerator | 0f9ae4208f0c228c0b1e85241501f5d95b2b5013 | [
"MIT"
] | null | null | null | import random.py | joelkalonji/PasswordGeerator | 0f9ae4208f0c228c0b1e85241501f5d95b2b5013 | [
"MIT"
] | null | null | null | import random
uppercase_letters = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
lowercase_letters = "abcdefghijklmnopqrstuvwxyz"
digits = "0123456789"
symbols = "!@#$%^&*()_+-=[]{}|;':,./<>?"
upper, lower, digit, symbol = True, True, True, True
all = ""
if upper :
all += uppercase_letters
if lower :
all += lowercase_letters
if digit :
all += digits
if symbol :
all += symbols
length = 20
amount = 10
for x in range(amount) :
password = "".join(random.sample(all, length))
print(password) | 20.384615 | 53 | 0.620755 |
097c50a96728dff3d3f2f66802f6917cbcd87b74 | 20,517 | py | Python | scripts/all_to_all_analyzer.py | jweckstr/westmetro_scripts | a16385b00ac8d80f0068f348226ed89e2d0425a9 | [
"MIT"
] | null | null | null | scripts/all_to_all_analyzer.py | jweckstr/westmetro_scripts | a16385b00ac8d80f0068f348226ed89e2d0425a9 | [
"MIT"
] | null | null | null | scripts/all_to_all_analyzer.py | jweckstr/westmetro_scripts | a16385b00ac8d80f0068f348226ed89e2d0425a9 | [
"MIT"
] | null | null | null | import sqlite3
import pandas
import itertools
import networkx as nx
from gtfspy.gtfs import GTFS
from gtfspy.util import timeit
from scripts.all_to_all_settings import *
"""
AllToAllDifferenceAnalyzer calculates the difference between various summary statistics of temporal distance and number
of boardings, stores the values in a database and handles calls to this database.
"""
if __name__ == "__main__":
for time in TIMES:
a2aa = AllToAllDifferenceAnalyzer(GTFS_PATH, get_a2aa_db_path(time, "old"), get_a2aa_db_path(time, "lm"),
get_a2aa_db_path(time, "output"))
ignore_list = stops_to_exclude(return_sqlite_list=True)
a2aa.diff_table(groupby="to_stop_I", measure="n_boardings", ignore_stops=ignore_list)
a2aa.diff_table(groupby="from_stop_I", measure="n_boardings", ignore_stops=ignore_list)
a2aa.diff_table(groupby="to_stop_I", measure="temporal_distance", ignore_stops=ignore_list)
a2aa.diff_table(groupby="from_stop_I", measure="temporal_distance", ignore_stops=ignore_list)
#a2aa.diff_table(groupby="to_stop_I", measure="journey_duration", ignore_stops=ignore_list)
#a2aa.diff_table(groupby="from_stop_I", measure="journey_duration", ignore_stops=ignore_list) | 53.569191 | 173 | 0.561096 |
097cf870cfdf8eb690e3cbf5e80ead9f28adc1b0 | 2,258 | py | Python | tests/test_flow/test_snakemake_tutorial.py | flowsaber/flowsaber | 7d68d085bbd9165d2bc0e0acd7826e70569c5fa3 | [
"MIT"
] | 31 | 2021-05-08T06:35:07.000Z | 2022-03-05T05:58:24.000Z | tests/test_flow/test_snakemake_tutorial.py | flowsaber/flowsaber | 7d68d085bbd9165d2bc0e0acd7826e70569c5fa3 | [
"MIT"
] | 3 | 2021-05-10T12:36:57.000Z | 2021-05-15T14:01:15.000Z | tests/test_flow/test_snakemake_tutorial.py | zhqu1148980644/flowsaber | 7d68d085bbd9165d2bc0e0acd7826e70569c5fa3 | [
"MIT"
] | 1 | 2021-03-09T06:18:17.000Z | 2021-03-09T06:18:17.000Z | from flowsaber.api import *
def test_snakemake_workflow():
# EnvTask is the real dependent task when using conda/image option
prefix = 'tests/test_flow/snamke-demo.nosync/data'
with flowsaber.context({
"fa": f'{prefix}/genome.fa',
"fastq": [f'{prefix}/samples/{sample}' for sample in ['A.fastq', 'B.fastq', 'C.fastq']]
}):
# resolve dependency
workflow = call_vcf_flow()
run(workflow)
if __name__ == "__main__":
test_snakemake_workflow()
pass
| 30.931507 | 105 | 0.59876 |
097d4bce40bb327df22e412c466f7654eaea0ad3 | 282 | py | Python | code/chapter-2/exercise2_8.py | Kevin-Oudai/python-solutions | d67f6b14723b000fec0011c3e8156b805eb288f7 | [
"MIT"
] | null | null | null | code/chapter-2/exercise2_8.py | Kevin-Oudai/python-solutions | d67f6b14723b000fec0011c3e8156b805eb288f7 | [
"MIT"
] | null | null | null | code/chapter-2/exercise2_8.py | Kevin-Oudai/python-solutions | d67f6b14723b000fec0011c3e8156b805eb288f7 | [
"MIT"
] | null | null | null | mass = eval(input("Enter the amount of water in kilograms: "))
initial_temp = eval(input("Enter the initial temperature: "))
final_temp = eval(input("Enter the final temperature: "))
energy = mass * (final_temp - initial_temp) * 4184
print("The energy needed is {}".format(energy))
| 47 | 62 | 0.72695 |
097f150675699f9f0fb29a948db31cc5196c818d | 129 | py | Python | portal_core/apps.py | kacotam-si/portal-core | 1580319520cc002371dcccb1bb4cb11125853121 | [
"MIT"
] | null | null | null | portal_core/apps.py | kacotam-si/portal-core | 1580319520cc002371dcccb1bb4cb11125853121 | [
"MIT"
] | 2 | 2020-06-05T20:45:19.000Z | 2020-10-13T04:25:55.000Z | portal_core/apps.py | kacotam-si/portal-core | 1580319520cc002371dcccb1bb4cb11125853121 | [
"MIT"
] | null | null | null | from django.apps import AppConfig
from django.conf import settings
| 18.428571 | 34 | 0.790698 |
097f69d052a1e922b006db68fee1c80d1a486da1 | 1,482 | py | Python | kaggle-lung-cancer-approach2/modules/ctscan.py | flaviostutz/datascience-snippets | 768083c4eda972bc1f6548baa86751e0405bda9b | [
"MIT"
] | 2 | 2017-06-05T17:25:55.000Z | 2018-02-04T04:01:13.000Z | kaggle-lung-cancer-approach2/modules/ctscan.py | flaviostutz/datascience-snippets | 768083c4eda972bc1f6548baa86751e0405bda9b | [
"MIT"
] | null | null | null | kaggle-lung-cancer-approach2/modules/ctscan.py | flaviostutz/datascience-snippets | 768083c4eda972bc1f6548baa86751e0405bda9b | [
"MIT"
] | null | null | null | import glob
import SimpleITK as sitk
import numpy as np
| 30.244898 | 134 | 0.593117 |
098125b6bdbfea383598e527bbb70b034cf26260 | 1,324 | py | Python | py_ad_1_4.py | aisolab/con-par-python | e74cb9c30acfdd78c12c9f7aba039d16ed1f7e78 | [
"MIT"
] | 1 | 2022-02-20T03:14:50.000Z | 2022-02-20T03:14:50.000Z | py_ad_1_4.py | aisolab/con-par-python | e74cb9c30acfdd78c12c9f7aba039d16ed1f7e78 | [
"MIT"
] | null | null | null | py_ad_1_4.py | aisolab/con-par-python | e74cb9c30acfdd78c12c9f7aba039d16ed1f7e78 | [
"MIT"
] | null | null | null | """
Section 1
Multithreading - Thread (2) - Daemon, Join
Keyword - DaemonThread, Join
"""
"""
DaemonThread()
(1).
(2). ( .)
(3). -> JVM( ),
(4).
"""
import logging
import threading
#
#
if __name__ == "__main__":
# Logging format
format = "%(asctime)s: %(message)s"
logging.basicConfig(format=format, level=logging.INFO, datefmt="%H:%M:%S")
logging.info("Main-Thread: before creating thread")
#
# Daemon: Default False
x = threading.Thread(target=thread_func, args=("First", range(200)), daemon=True)
y = threading.Thread(target=thread_func, args=("Two", range(10)), daemon=False)
logging.info("Main-Thread: before running thread")
#
x.start()
y.start()
# DaemonThread
print(x.isDaemon())
print(y.isDaemon())
#
# x.join() # , .
# y.join()
logging.info("Main-Thread: wait for the thread to finish")
logging.info("Main-Thread: all done")
| 24.981132 | 85 | 0.632175 |
09821682a814779b24686f7214f05d5600259f1a | 287 | py | Python | listTest.py | diallog/GCPpy | dabd55ece1c12c1a390a228cd04cb7eb110e564b | [
"Unlicense"
] | null | null | null | listTest.py | diallog/GCPpy | dabd55ece1c12c1a390a228cd04cb7eb110e564b | [
"Unlicense"
] | null | null | null | listTest.py | diallog/GCPpy | dabd55ece1c12c1a390a228cd04cb7eb110e564b | [
"Unlicense"
] | null | null | null | #!/usr/bin/env python3
# PURPOSE: studying function side effects
import os
os.system('clear')
orgList = [5, 3, 2, 1, 4]
print(sumList(orgList))
print(orgList)
| 16.882353 | 41 | 0.655052 |
09828a4b8ceea5e0df2ba0674a51b0b2f6523586 | 2,029 | py | Python | class/pandas_class.py | Danigore25/python2 | de6d582fcc35107aa21a1bd73fdf04a0d4209d31 | [
"MIT"
] | null | null | null | class/pandas_class.py | Danigore25/python2 | de6d582fcc35107aa21a1bd73fdf04a0d4209d31 | [
"MIT"
] | null | null | null | class/pandas_class.py | Danigore25/python2 | de6d582fcc35107aa21a1bd73fdf04a0d4209d31 | [
"MIT"
] | 2 | 2021-09-07T00:30:49.000Z | 2021-10-19T15:14:54.000Z | import pandas as pd
import numpy as np
serie = pd.Series(['a', 'b', 'c', 'd', 'e'],
index=['a', 'b', 'c', 'd', 'e'],
name="Ejemplo Serie")
print(serie)
ecoli_matraz = pd.Series([0.1, 0.15, 0.19, 0.5,
0.9, 1.4, 1.8, 2.1, 2.3],
index=['t1', 't2', 't3', 't4',
't5', 't6', 't7', 't8', 't9'],
name='Matraz')
print(ecoli_matraz)
ODs = pd.Series([0.2, 0.2, 0.4, 0.1, 0.2, 0.1, 0.2, 0.4, 0.1],
index=[8, 4, 1, 2, 3, 0, 5, 7, 6],
name='Ajustes')
# EJERCICIO 1 ----------------------------------------------------------------------
produccion = pd.Series([5, 11, 4, 7, 2], index=['gen1', 'gen2', 'gen3', 'gen4', 'gen5'])
costos = pd.Series([5, 4.3, 7, 3.5], index=['gen1', 'gen2', 'gen3', 'gen5'])
costo_unitario = costos/produccion.T
print(costo_unitario)
print(costo_unitario.min())
# -----------------------------------------------------
nan_test = pd.Series([0.1, None, 2.1, 2.3], name='Matraz')
print(nan_test.count())
# loc y iloc
series_test = pd.Series([5.1, 2.2, 1.1, 3.1, 4.2], index=[5, 2, 1, 3, 4])
print(series_test)
print(series_test.loc[1])
print(series_test.iloc[1])
# EJERCICIO 2 ------------------------------------------------------------------
bool_min = costo_unitario == costo_unitario.min()
bool_max = costo_unitario == costo_unitario.max()
print(costo_unitario[bool_min | bool_max])
# Repetir ndices
regulon = pd.Series(['aidB', 'alaS', 'accB', 'accC', 'bhsA'], index=['AidB', 'AlaS', 'AccB', 'AccB', 'ComR'],
name='Genes regulados')
print(regulon.loc['AccB'])
print(regulon.loc['AidB'])
# Clases en series
array_clase = pd.Series([np.sum, 'a', Mamifero], name='objetos')
jerbo = array_clase.iloc[2]
print(jerbo.haz_ruido())
| 30.283582 | 110 | 0.485461 |
0986c2b4d466c529bcf1de02d35647e1f00797b3 | 6,209 | py | Python | scripts/datasets/mit67_install.py | cclauss/archai | a5fb8f937f7f1319e3204120803b2a045e9f768b | [
"MIT"
] | 344 | 2020-06-12T22:12:56.000Z | 2022-03-29T06:48:20.000Z | scripts/datasets/mit67_install.py | cclauss/archai | a5fb8f937f7f1319e3204120803b2a045e9f768b | [
"MIT"
] | 29 | 2020-06-13T19:56:49.000Z | 2022-03-30T20:26:48.000Z | scripts/datasets/mit67_install.py | cclauss/archai | a5fb8f937f7f1319e3204120803b2a045e9f768b | [
"MIT"
] | 68 | 2020-06-12T19:32:43.000Z | 2022-03-05T06:58:40.000Z | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
""" Script to prepare mit67 dataset for pytorch dataloader.
"""
from typing import List, Dict, Tuple, Union, Optional
import os
import pdb
import time
import argparse
import os
import tempfile
import requests
from torchvision.datasets.utils import download_and_extract_archive, download_url
from torch.utils.model_zoo import tqdm
from PIL import Image
import shutil
from collections import defaultdict
import pathlib
from archai.common import utils
def load_test_csv_data(filename: str) -> Dict[str, List[str]]:
''' Loads the data in csv files into a dictionary with
class names as keys and list of image names as values. Works only for test data csv'''
data_dict = defaultdict(list)
with open(filename, 'r') as f:
lines = f.readlines()
assert len(lines) > 0
for line in lines[1:]:
words = line.rstrip().split(',')
assert len(words) > 0
data_dict[words[0]] = words[1:]
return data_dict
def load_train_csv_data(filename: str) -> Dict[str, List[str]]:
''' Loads the data in csv files into a dictionary with
class names as keys and list of image names as values. Works only for train data csv '''
data_dict = defaultdict(list)
with open(filename, 'r') as f:
lines = f.readlines()
assert len(lines) > 0
for line in lines[1:]:
words = line.rstrip().split(',')
assert len(words) > 0
data_dict[words[1]] = words[2:]
return data_dict
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--dataroot', type=str, default='C:\\Users\\dedey\\dataroot',
help='root directory where mit67 folder is intended to exist. If mit67 already exists in the format required this script will skip downloading')
args = parser.parse_args()
# check that dataset is in format required
# else download and prepare dataset
if not check_mit67(args.dataroot):
# make mit67 directory
mit67 = os.path.join(args.dataroot, 'mit67')
train = os.path.join(mit67, 'train')
test = os.path.join(mit67, 'test')
meta = os.path.join(mit67, 'meta')
os.makedirs(mit67, exist_ok=True)
os.makedirs(train, exist_ok=True)
os.makedirs(test, exist_ok=True)
os.makedirs(meta, exist_ok=True)
# this step will create folder mit67/Images
# which has all the images for each class in its own subfolder
download(mit67)
# download the csv files for the train and test split
# from 'NAS Evaluation is Frustrating' repo
# note that download_url doesn't work in vscode debug mode
test_file_url = 'https://raw.githubusercontent.com/antoyang/NAS-Benchmark/master/data/MIT67_test.csv'
train_file_urls = ['https://raw.githubusercontent.com/antoyang/NAS-Benchmark/master/data/MIT67_train1.csv', 'https://raw.githubusercontent.com/antoyang/NAS-Benchmark/master/data/MIT67_train2.csv',
'https://raw.githubusercontent.com/antoyang/NAS-Benchmark/master/data/MIT67_train3.csv', 'https://raw.githubusercontent.com/antoyang/NAS-Benchmark/master/data/MIT67_train4.csv']
download_url(test_file_url, meta, filename=None, md5=None)
for tu in train_file_urls:
download_url(tu, meta, filename=None, md5=None)
prepare_data(mit67)
| 35.683908 | 204 | 0.671445 |
0986ca341593898178573e0a204ed21602be920f | 99 | py | Python | tail/__init__.py | 0eu/tail-assignment | a86cdcbee88a6d0bf07b7ab7175a7742a5188a2f | [
"MIT"
] | 1 | 2020-12-01T15:05:21.000Z | 2020-12-01T15:05:21.000Z | tail/__init__.py | 0eu/tail-assignment | a86cdcbee88a6d0bf07b7ab7175a7742a5188a2f | [
"MIT"
] | null | null | null | tail/__init__.py | 0eu/tail-assignment | a86cdcbee88a6d0bf07b7ab7175a7742a5188a2f | [
"MIT"
] | null | null | null | from tail.core import read_last_lines, follow_lines
__all__ = ["read_last_lines", "follow_lines"]
| 24.75 | 51 | 0.79798 |
09871d53ff57c8147e01e850c7f2f9d9467db849 | 89,305 | py | Python | tests/unit/pywbemcli/test_class_cmds.py | pywbem/pywbemtools | 6b7c3f124324fd3ab7cffb82bc98c8f9555317e4 | [
"Apache-2.0"
] | 8 | 2017-04-01T13:55:00.000Z | 2022-03-15T18:28:47.000Z | tests/unit/pywbemcli/test_class_cmds.py | pywbem/pywbemtools | 6b7c3f124324fd3ab7cffb82bc98c8f9555317e4 | [
"Apache-2.0"
] | 918 | 2017-03-03T14:29:03.000Z | 2022-03-29T15:32:16.000Z | tests/unit/pywbemcli/test_class_cmds.py | pywbem/pywbemtools | 6b7c3f124324fd3ab7cffb82bc98c8f9555317e4 | [
"Apache-2.0"
] | 2 | 2020-01-17T15:56:46.000Z | 2020-02-12T18:49:30.000Z | # Copyright 2018 IBM Corp. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Tests the class command
"""
from __future__ import absolute_import, print_function
import sys
import os
from packaging.version import parse as parse_version
import pytest
from pywbem import __version__ as pywbem_version
from .cli_test_extensions import CLITestsBase, PYWBEM_0, \
FAKEURL_STR
from .common_options_help_lines import CMD_OPTION_NAMES_ONLY_HELP_LINE, \
CMD_OPTION_HELP_HELP_LINE, CMD_OPTION_SUMMARY_HELP_LINE, \
CMD_OPTION_NAMESPACE_HELP_LINE, CMD_OPTION_PROPERTYLIST_HELP_LINE, \
CMD_OPTION_INCLUDE_CLASSORIGIN_HELP_LINE, \
CMD_OPTION_LOCAL_ONLY_CLASS_HELP_LINE, CMD_OPTION_NO_QUALIFIERS_HELP_LINE, \
CMD_OPTION_MULTIPLE_NAMESPACE_HELP_LINE, \
CMD_OPTION_ASSOCIATION_FILTER_HELP_LINE, \
CMD_OPTION_INDICATION_FILTER_HELP_LINE, \
CMD_OPTION_EXPERIMENTAL_FILTER_HELP_LINE, \
CMD_OPTION_DEPRECATED_FILTER_HELP_LINE, \
CMD_OPTION_SINCE_FILTER_HELP_LINE, \
CMD_OPTION_SCHEMA_FILTER_HELP_LINE, \
CMD_OPTION_SUBCLASSOF_FILTER_HELP_LINE, \
CMD_OPTION_LEAFCLASSES_FILTER_HELP_LINE
_PYWBEM_VERSION = parse_version(pywbem_version)
# pywbem 1.0.0 or later
PYWBEM_1_0_0 = _PYWBEM_VERSION.release >= (1, 0, 0)
# Mock scripts with setup() function are supported
MOCK_SETUP_SUPPORTED = sys.version_info >= (3, 5)
TEST_DIR = os.path.dirname(__file__)
# A mof file that defines basic qualifier decls, classes, and instances
# but not tied to the DMTF classes.
SIMPLE_MOCK_FILE = 'simple_mock_model.mof'
INVOKE_METHOD_MOCK_FILE_0 = 'simple_mock_invokemethod_v0.py'
INVOKE_METHOD_MOCK_FILE_1 = 'simple_mock_invokemethod_v1old.py'
INVOKE_METHOD_MOCK_FILE = INVOKE_METHOD_MOCK_FILE_0 if PYWBEM_0 else \
INVOKE_METHOD_MOCK_FILE_1
SIMPLE_ASSOC_MOCK_FILE = 'simple_assoc_mock_model.mof'
QUALIFIER_FILTER_MODEL = 'qualifier_filter_model.mof'
TREE_TEST_MOCK_FILE = 'tree_test_mock_model.mof'
SIMPLE_INTEROP_MOCK_FILE = 'simple_interop_mock_script.py'
#
# The following list defines the help for each command in terms of particular
# parts of lines that are to be tested.//FakedUrl:5988
# For each test, try to include:
# 1. The usage line and in particular the argument component
# 2. The single
# 2. The last line CMD_OPTION_HELP_HELP_LINE
#
CLASS_HELP_LINES = [
'Usage: pywbemcli [GENERAL-OPTIONS] class COMMAND [ARGS] [COMMAND-OPTIONS]',
'Command group for CIM classes.',
CMD_OPTION_HELP_HELP_LINE,
'associators List the classes associated with a class.',
'delete Delete a class.',
'enumerate List top classes or subclasses of a class in a namespace.',
'find List the classes with matching class names on the server.',
'get Get a class.',
'invokemethod Invoke a method on a class.',
'references List the classes referencing a class.',
'tree Show the subclass or superclass hierarchy for a class.',
]
CLASS_ASSOCIATORS_HELP_LINES = [
'Usage: pywbemcli [GENERAL-OPTIONS] class associators CLASSNAME '
'[COMMAND-OPTIONS]',
'List the classes associated with a class.',
'--ac, --assoc-class CLASSNAME Filter the result set by association class',
'--rc, --result-class CLASSNAME Filter the result set by result class',
'-r, --role PROPERTYNAME Filter the result set by source end role',
'--rr, --result-role PROPERTYNAME Filter the result set by far end role',
CMD_OPTION_NO_QUALIFIERS_HELP_LINE,
CMD_OPTION_INCLUDE_CLASSORIGIN_HELP_LINE,
CMD_OPTION_PROPERTYLIST_HELP_LINE,
CMD_OPTION_NAMES_ONLY_HELP_LINE,
CMD_OPTION_NAMESPACE_HELP_LINE,
CMD_OPTION_SUMMARY_HELP_LINE,
CMD_OPTION_HELP_HELP_LINE,
]
CLASS_DELETE_HELP_LINES = [
'Usage: pywbemcli [GENERAL-OPTIONS] class delete CLASSNAME '
'[COMMAND-OPTIONS]',
'Delete a class.',
'-f, --force Same as --include-instances.',
'--include-instances Delete any instances of the class as well.',
CMD_OPTION_NAMESPACE_HELP_LINE,
CMD_OPTION_HELP_HELP_LINE,
]
CLASS_ENUMERATE_HELP_LINES = [
'Usage: pywbemcli [GENERAL-OPTIONS] class enumerate CLASSNAME '
'[COMMAND-OPTIONS]',
'List top classes or subclasses of a class in a namespace.',
'--di, --deep-inheritance Include the complete subclass hierarchy',
CMD_OPTION_LOCAL_ONLY_CLASS_HELP_LINE,
CMD_OPTION_NO_QUALIFIERS_HELP_LINE,
CMD_OPTION_INCLUDE_CLASSORIGIN_HELP_LINE,
CMD_OPTION_NAMES_ONLY_HELP_LINE,
CMD_OPTION_NAMESPACE_HELP_LINE,
CMD_OPTION_SUMMARY_HELP_LINE,
# NOTE: The FILTER options are a group. Define all of them.
CMD_OPTION_ASSOCIATION_FILTER_HELP_LINE,
CMD_OPTION_INDICATION_FILTER_HELP_LINE,
CMD_OPTION_EXPERIMENTAL_FILTER_HELP_LINE,
CMD_OPTION_DEPRECATED_FILTER_HELP_LINE,
CMD_OPTION_SINCE_FILTER_HELP_LINE,
CMD_OPTION_SCHEMA_FILTER_HELP_LINE,
CMD_OPTION_SUBCLASSOF_FILTER_HELP_LINE,
CMD_OPTION_LEAFCLASSES_FILTER_HELP_LINE,
CMD_OPTION_HELP_HELP_LINE,
]
CLASS_FIND_HELP_LINES = [
'Usage: pywbemcli [GENERAL-OPTIONS] class find CLASSNAME-GLOB '
'[COMMAND-OPTIONS]',
'List the classes with matching class names on the server.',
'-s, --sort Sort by namespace. Default is to sort by',
CMD_OPTION_MULTIPLE_NAMESPACE_HELP_LINE,
# FILTER OPTIONS
CMD_OPTION_ASSOCIATION_FILTER_HELP_LINE,
CMD_OPTION_INDICATION_FILTER_HELP_LINE,
CMD_OPTION_EXPERIMENTAL_FILTER_HELP_LINE,
CMD_OPTION_DEPRECATED_FILTER_HELP_LINE,
CMD_OPTION_SINCE_FILTER_HELP_LINE,
CMD_OPTION_SCHEMA_FILTER_HELP_LINE,
CMD_OPTION_SUBCLASSOF_FILTER_HELP_LINE,
CMD_OPTION_LEAFCLASSES_FILTER_HELP_LINE,
CMD_OPTION_HELP_HELP_LINE,
]
CLASS_GET_HELP_LINES = [
'Usage: pywbemcli [GENERAL-OPTIONS] class get CLASSNAME [COMMAND-OPTIONS]',
'Get a class.',
CMD_OPTION_LOCAL_ONLY_CLASS_HELP_LINE,
CMD_OPTION_NO_QUALIFIERS_HELP_LINE,
CMD_OPTION_INCLUDE_CLASSORIGIN_HELP_LINE,
CMD_OPTION_PROPERTYLIST_HELP_LINE,
CMD_OPTION_NAMESPACE_HELP_LINE,
CMD_OPTION_HELP_HELP_LINE,
]
CLASS_INVOKEMETHOD_HELP_LINES = [
'Usage: pywbemcli [GENERAL-OPTIONS] class invokemethod CLASSNAME '
'METHODNAME [COMMAND-OPTIONS]',
'Invoke a method on a class.',
'-p, --parameter PARAMETERNAME=VALUE Specify a method input parameter',
CMD_OPTION_NAMESPACE_HELP_LINE,
CMD_OPTION_HELP_HELP_LINE,
]
CLASS_REFERENCES_HELP_LINES = [
'Usage: pywbemcli [GENERAL-OPTIONS] class references CLASSNAME '
'[COMMAND-OPTIONS]',
'List the classes referencing a class.',
'--rc, --result-class CLASSNAME Filter the result set by result class',
'-r, --role PROPERTYNAME Filter the result set by source end role',
CMD_OPTION_NO_QUALIFIERS_HELP_LINE,
CMD_OPTION_INCLUDE_CLASSORIGIN_HELP_LINE,
CMD_OPTION_PROPERTYLIST_HELP_LINE,
CMD_OPTION_NAMES_ONLY_HELP_LINE,
CMD_OPTION_NAMESPACE_HELP_LINE,
CMD_OPTION_SUMMARY_HELP_LINE,
CMD_OPTION_HELP_HELP_LINE,
]
CLASS_TREE_HELP_LINES = [
'Usage: pywbemcli [GENERAL-OPTIONS] class tree CLASSNAME [COMMAND-OPTIONS]',
'Show the subclass or superclass hierarchy for a class.',
'-s, --superclasses Show the superclass hierarchy.',
' -d, --detail Show details about the class: the Version',
CMD_OPTION_NAMESPACE_HELP_LINE,
CMD_OPTION_HELP_HELP_LINE,
]
# pylint: disable=line-too-long
CIMFOO_SUB_SUB = """
[Description ( "Subclass of CIM_Foo_sub" )]
class CIM_Foo_sub_sub : CIM_Foo_sub {
string cimfoo_sub_sub;
string cimfoo_sub;
[Key ( true ),
Description ( "This is key property." )]
string InstanceID;
[Description ( "This is Uint32 property." )]
uint32 IntegerProp;
[Description ( "Embedded instance property" ),
EmbeddedInstance ( "CIM_FooEmb3" )]
string cimfoo_emb3;
[Description ( "Sample method with input and output parameters" )]
uint32 Method1(
[IN ( false ),
OUT ( true ),
Description ( "Response param 2" )]
string OutputParam2);
[Description ( "Method with in and out parameters" )]
uint32 Fuzzy(
[IN ( true ),
OUT ( true ),
Description ( "Define data to be returned in output parameter" )]
string TestInOutParameter,
[IN ( true ),
OUT ( true ),
Description ( "Test of ref in/out parameter" )]
CIM_FooRef1 REF TestRef,
[IN ( false ),
OUT ( true ),
Description ( "Rtns method name if exists on input" )]
string OutputParam,
[IN ( true ),
Description ( "Defines return value if provided." )]
uint32 OutputRtnValue);
[Description ( "Static method with in and out parameters" ),
Static ( true )]
uint32 FuzzyStatic(
[IN ( true ),
OUT ( true ),
Description ( "Define data to be returned in output parameter" )]
string TestInOutParameter,
[IN ( true ),
OUT ( true ),
Description ( "Test of ref in/out parameter" )]
CIM_Foo REF TestRef,
[IN ( false ),
OUT ( true ),
Description ( "Rtns method name if exists on input" )]
string OutputParam,
[IN ( true ),
Description ( "Defines return value if provided." )]
uint32 OutputRtnValue,
[IN ( true ),
Description ( "Embedded instance parameter" ),
EmbeddedInstance ( "CIM_FooEmb1" )]
string cimfoo_emb1);
[Description ( "Method with no parameters but embedded instance return" ),
EmbeddedInstance ( "CIM_FooEmb2" )]
string DeleteNothing();
};
""" # noqa: E501
# pylint: enable=line-too-long
CIMFOO_SUB_SUB_NO_QUALS = """
class CIM_Foo_sub_sub : CIM_Foo_sub {
string cimfoo_sub_sub;
string cimfoo_sub;
string InstanceID;
uint32 IntegerProp;
string cimfoo_emb3;
uint32 Method1(
string OutputParam2);
uint32 Fuzzy(
string TestInOutParameter,
CIM_FooRef1 REF TestRef,
string OutputParam,
uint32 OutputRtnValue);
uint32 FuzzyStatic(
string TestInOutParameter,
CIM_Foo REF TestRef,
string OutputParam,
uint32 OutputRtnValue,
string cimfoo_emb1);
string DeleteNothing();
};
"""
# TODO: This never referenced
REFERENCES_CLASS_RTN = [
FAKEURL_STR + '/root/cimv2:TST_Lineage',
'class TST_Lineage {',
'',
' string InstanceID;',
'',
' TST_Person REF parent;',
'',
' TST_Person REF child;',
'',
'};',
'',
FAKEURL_STR + '/root/cimv2:TST_MemberOfFamilyCollection',
'class TST_MemberOfFamilyCollection {',
'',
' TST_Person REF family;',
'',
' TST_Person REF member;',
'',
'};',
'']
# TODO: This never referenced
REFERENCES_CLASS_RTN2 = [
FAKEURL_STR + '/root/cimv2:TST_MemberOfFamilyCollection',
'class TST_MemberOfFamilyCollection {',
'',
' TST_Person REF family;',
'',
' TST_Person REF member;',
'',
'};',
'',
'']
REFERENCES_CLASS_RTN_QUALS2 = [
FAKEURL_STR + '/root/cimv2:TST_MemberOfFamilyCollection',
' [Association ( true ),',
' Description ( " Family gathers person to family." )]',
'class TST_MemberOfFamilyCollection {',
' [key ( true )]',
' TST_Person REF family;',
' [key ( true )]',
' TST_Person REF member;',
'};']
OK = True # mark tests OK when they execute correctly
RUN = True # Mark OK = False and current test case being created RUN
FAIL = False # Any test currently FAILING or not tested yet
TEST_CASES = [
# List of testcases.
# Each testcase is a list with the following items:
# * desc: Description of testcase.
# * inputs: String, or tuple/list of strings, or dict of 'env', 'args',
# 'general', and 'stdin'. See the 'inputs' parameter of
# CLITestsBase.command_test() in cli_test_extensions.py for detailed
# documentation.
# * exp_response: Dictionary of expected responses (stdout, stderr, rc) and
# test definition (test: <testname>). See the 'exp_response' parameter
# of CLITestsBase.command_test() in cli_test_extensions.py for
# detailed documentation.
# * mock: None, name of file (.mof or .py), or list thereof.
# * condition: If True the test is executed, if 'pdb' the test breaks in the
# the debugger, if 'verbose' print verbose messages, if False the test
# is skipped.
['Verify class command --help response',
['--help'],
{'stdout': CLASS_HELP_LINES,
'test': 'innows'},
None, OK],
['Verify class command --help command order',
['--help'],
{'stdout': r'Commands:'
'.*\n enumerate'
'.*\n get'
'.*\n delete'
'.*\n invokemethod'
'.*\n references'
'.*\n associators'
'.*\n find'
'.*\n tree',
'test': 'regex'},
None, OK],
['Verify class command -h response',
['-h'],
{'stdout': CLASS_HELP_LINES,
'test': 'innows'},
None, OK],
#
# Enumerate command and its options
#
['Verify class command enumerate --help response',
['enumerate', '--help'],
{'stdout': CLASS_ENUMERATE_HELP_LINES,
'test': 'innows'},
None, OK],
['Verify class command enumerate -h response',
['enumerate', '-h'],
{'stdout': CLASS_ENUMERATE_HELP_LINES,
'test': 'innows'},
None, OK],
['Verify class command enumerate CIM_Foo',
['enumerate', 'CIM_Foo'],
{'stdout': ['[Description ( "Subclass of CIM_Foo" )]'],
'test': 'regex'},
SIMPLE_MOCK_FILE, OK],
['Verify class command enumerate CIM_Foo --lo',
['enumerate', 'CIM_Foo', '--lo'],
{'stdout':
' [Description ( "Subclass of CIM_Foo" )]',
'test': 'startswith'},
SIMPLE_MOCK_FILE, OK],
['Verify class command enumerate CIM_Foo --lo',
['enumerate', 'CIM_Foo', '--local-only'],
{'stdout':
' [Description ( "Subclass of CIM_Foo" )]',
'test': 'startswith'},
SIMPLE_MOCK_FILE, OK],
['Verify class command enumerate CIM_Foo_sub',
['enumerate', 'CIM_Foo_sub'],
{'stdout': CIMFOO_SUB_SUB,
'test': 'linesnows'},
SIMPLE_MOCK_FILE, OK],
['Verify class command enumerate CIM_Foo local-only',
['enumerate', 'CIM_Foo', '--local-only'],
{'stdout':
' [Description ( "Subclass of CIM_Foo" )]',
'test': 'startswith'},
SIMPLE_MOCK_FILE, OK],
['Verify class command enumerate CIM_Foo -no-qualifiers',
['enumerate', 'CIM_Foo_sub', '--no-qualifiers'],
{'stdout': CIMFOO_SUB_SUB_NO_QUALS,
'test': 'linesnows'},
SIMPLE_MOCK_FILE, OK],
['Verify class command enumerate CIM_Foo --di',
['enumerate', 'CIM_Foo', '--di'],
{'stdout':
' [Description ( "Subclass of CIM_Foo" )]',
'test': 'startswith'},
SIMPLE_MOCK_FILE, OK],
['Verify class command enumerate CIM_Foo --deep-inheritance',
['enumerate', 'CIM_Foo', '--deep-inheritance'],
{'stdout':
' [Description ( "Subclass of CIM_Foo" )]',
'test': 'startswith'},
SIMPLE_MOCK_FILE, OK],
['Verify class command enumerate CIM_Foo --ico',
['enumerate', 'CIM_Foo', '--ico'],
{'stdout':
' [Description ( "Subclass of CIM_Foo" )]',
'test': 'startswith'},
SIMPLE_MOCK_FILE, OK],
['Verify class command enumerate CIM_Foo --include-classorigin',
['enumerate', 'CIM_Foo', '--include-classorigin'],
{'stdout':
' [Description ( "Subclass of CIM_Foo" )]',
'test': 'startswith'},
SIMPLE_MOCK_FILE, OK],
['Verify class command enumerate CIM_Foo --no names only',
['enumerate', 'CIM_Foo', '--no'],
{'stdout': ['CIM_Foo', 'CIM_Foo_sub', 'CIM_Foo_sub2'],
'test': 'in'},
SIMPLE_MOCK_FILE, OK],
['Verify class command enumerate CIM_Foo --names only',
['enumerate', 'CIM_Foo', '--names-only'],
{'stdout': ['CIM_Foo', 'CIM_Foo_sub', 'CIM_Foo_sub2'],
'test': 'in'},
SIMPLE_MOCK_FILE, OK],
['Verify class command enumerate --no names only - table',
{'args': ['enumerate', '--no'],
'general': ['--output-format', 'table']},
{'stdout': """Classnames:
+--------------+
| Class Name |
|--------------|
| CIM_BaseEmb |
| CIM_BaseRef |
| CIM_Foo |
| CIM_FooAssoc |
+--------------+
""",
'test': 'in'},
SIMPLE_MOCK_FILE, OK],
['Verify class command enumerate CIM_Foo --no names only - table',
{'args': ['enumerate', 'CIM_Foo', '--no'],
'general': ['--output-format', 'table']},
{'stdout': """Classnames:
+--------------+
| Class Name |
|--------------|
| CIM_Foo_sub |
| CIM_Foo_sub2 |
+--------------+
""",
'test': 'in'},
SIMPLE_MOCK_FILE, OK],
['Verify class command enumerate CIM_Foo --names-only',
['enumerate', 'CIM_Foo', '--names-only'],
{'stdout': ['CIM_Foo', 'CIM_Foo_sub', 'CIM_Foo_sub2'],
'test': 'in'},
SIMPLE_MOCK_FILE, OK],
['Verify class command enumerate CIM_Foo summary table',
['enumerate', 'CIM_Foo', '-s'],
{'stdout': ['2 CIMClass(s) returned'],
'test': 'innows'},
SIMPLE_MOCK_FILE, OK],
['Verify class command enumerate CIM_Foo summary, table',
['enumerate', 'CIM_Foo', '--summary'],
{'stdout': ['2 CIMClass(s) returned'],
'test': 'innows'},
SIMPLE_MOCK_FILE, OK],
['Verify class command enumerate CIM_Foo summary table output',
{'args': ['enumerate', 'CIM_Foo', '--summary'],
'general': ['--output-format', 'table']},
{'stdout': ["""Summary of CIMClass returned
+---------+------------+
| Count | CIM Type |
|---------+------------|
| 2 | CIMClass |
+---------+------------+
"""],
'test': 'linesnows'},
SIMPLE_MOCK_FILE, OK],
['Verify class command enumerate CIM_Foo names and --di --no',
['enumerate', 'CIM_Foo', '--di', '--no'],
{'stdout': ['CIM_Foo_sub', 'CIM_Foo_sub2', 'CIM_Foo_sub_sub'],
'test': 'in'},
SIMPLE_MOCK_FILE, OK],
['Verify class command enumerate CIM_Foo names and --deep-inheritance '
'--names-only',
['enumerate', 'CIM_Foo', '--names-only', '--deep-inheritance'],
{'stdout': ['CIM_Foo_sub', 'CIM_Foo_sub2', 'CIM_Foo_sub_sub'],
'test': 'in'},
SIMPLE_MOCK_FILE, OK],
['Verify class command enumerate CIM_Foo include qualifiers',
['enumerate', 'CIM_Foo'],
{'stdout': ['Key ( true )', '[Description (', 'class CIM_Foo'],
'test': 'in'},
SIMPLE_MOCK_FILE, OK],
['Verify class command get with xml output format).',
{'args': ['enumerate'],
'general': ['--output-format', 'repr']},
{'stdout': [r"CIMClass\(classname='CIM_Foo', superclass=None,",
r"'InstanceID': CIMProperty\(name='InstanceID', value=None,"],
'test': 'regex'},
SIMPLE_MOCK_FILE, OK],
['Verify class command get with repr output format).',
{'args': ['enumerate'],
'general': ['--output-format', 'txt']},
{'stdout': ["CIMClass(classname='CIM_BaseEmb', ...)",
"CIMClass(classname='CIM_BaseRef', ...)",
"CIMClass(classname='CIM_Foo', ...)",
"CIMClass(classname='CIM_FooAssoc', ...)"],
'test': 'lines'},
SIMPLE_MOCK_FILE, OK],
['Verify class command enumerate with repr output format).',
{'args': ['enumerate'],
'general': ['--output-format', 'xml']},
{'stdout': ['<CLASS( | .+ )NAME="CIM_Foo">',
'<PROPERTY( | .+ )NAME="InstanceID"',
'<PROPERTY( | .+ )NAME="IntegerProp"',
'<METHOD( | .+ )NAME="DeleteNothing"'],
'test': 'regex'},
SIMPLE_MOCK_FILE, OK],
['Verify class command enumerate --di --no --namespace',
['enumerate', '--di', '--no', '-n', 'interop'],
{'stdout': ['CIM_Namespace', 'CIM_ObjectManager'],
'test': 'in'},
SIMPLE_INTEROP_MOCK_FILE, OK],
#
# Enumerate commands with the filter options
#
['Verify class command enumerate with --association filter.',
['enumerate', '--association', '--names-only'],
{'stdout': ['TST_Lineage', 'TST_MemberOfFamilyCollection'],
'test': 'innows'},
SIMPLE_ASSOC_MOCK_FILE, OK],
['Verify class command enumerate with --association filter --summary.',
['enumerate', '--association', '--summary'],
{'stdout': ['2 CIMClass(s) returned'],
'test': 'innows'},
SIMPLE_ASSOC_MOCK_FILE, OK],
['Verify class command enumerate with --association filter.',
['enumerate', '--association', '--names-only'],
{'stdout': ['TST_Lineage', 'TST_MemberOfFamilyCollection',
'TST_MemberOfFamilyCollectionDep',
'TST_MemberOfFamilyCollectionExp'],
'test': 'innows'},
QUALIFIER_FILTER_MODEL, OK],
['Verify class command enumerate with --association filter --summary.',
['enumerate', '--association', '--summary'],
{'stdout': ['4 CIMClass(s) returned'],
'test': 'innows'},
QUALIFIER_FILTER_MODEL, OK],
['Verify class command enumerate with --association filter and no '
'qualifiers.',
['enumerate', '--association', '--nq'],
{'stdout': ['class TST_Lineage {',
'string InstanceID;',
'TST_Person REF parent;',
'TST_Person REF child;',
'class TST_MemberOfFamilyCollection {',
'TST_Person REF family;',
'TST_Person REF member;', '};'],
'test': 'innows'},
SIMPLE_ASSOC_MOCK_FILE, OK],
['Verify class command enumerate with --no-association filter and '
'no-qualifiers. Tests no qualifiers on parameters',
['enumerate', '--no-association', '--no-qualifiers'],
{'stdout': ['class CIM_Foo {',
'string InstanceID;',
'uint32 IntegerProp;',
'string cimfoo_emb3;',
'uint32 Fuzzy(',
'string TestInOutParameter,',
'CIM_FooRef1 REF TestRef,',
'string OutputParam,',
'uint32 OutputRtnValue);',
'uint32 FuzzyStatic(',
'string TestInOutParameter,',
'CIM_Foo REF TestRef,',
'string OutputParam,',
'uint32 OutputRtnValue,',
'string cimfoo_emb1);',
'string DeleteNothing();',
'};'],
'test': 'innows'},
SIMPLE_MOCK_FILE, OK],
['Verify class command enumerate with --no-association filter, simple mod.',
['enumerate', '--no-association', '--names-only'],
{'stdout': ['TST_FamilyCollection', 'TST_Person'],
'test': 'innows'},
SIMPLE_ASSOC_MOCK_FILE, OK],
['Verify class command enumerate with --no-association, --summary.',
['enumerate', '--no-association', '--summary'],
{'stdout': ['2 CIMClass(s) returned'],
'test': 'innows'},
SIMPLE_ASSOC_MOCK_FILE, OK],
['Verify class command enumerate with --no-association filter qual filt.',
['enumerate', '--no-association', '--names-only'],
{'stdout': ['BLA_Person', 'EXP_TestExperimental1',
'EXP_TestExperimental2', 'EXP_TestExperimental3',
'EXP_TestExperimental4', 'TST_FamilyCollection',
'TST_Indication', 'TST_IndicationDeprecated',
'TST_IndicationExperimental', 'TST_Person'],
'test': 'innows'},
QUALIFIER_FILTER_MODEL, OK],
['Verify class command enumerate with --no-association, --summary.',
['enumerate', '--no-association', '--summary'],
{'stdout': ['10 CIMClass(s) returned'],
'test': 'innows'},
QUALIFIER_FILTER_MODEL, OK],
['Verify class command enumerate with --indication filter.',
['enumerate', '--indication', '--names-only'],
{'stdout': ['TST_Indication', 'TST_IndicationDeprecated',
'TST_IndicationExperimental'],
'test': 'innows'},
QUALIFIER_FILTER_MODEL, OK],
['Verify class command enumerate with --indication filter --summary.',
['enumerate', '--indication', '--summary'],
{'stdout': ['3 CIMClass(s) returned'],
'test': 'innows'},
QUALIFIER_FILTER_MODEL, OK],
['Verify class command enumerate with --no-indication filter.',
['enumerate', '--no-indication', '--names-only'],
{'stdout': ['BLA_Person', 'EXP_TestExperimental1',
'EXP_TestExperimental2', 'EXP_TestExperimental3',
'EXP_TestExperimental4', 'TST_FamilyCollection',
'TST_FamilyCollection',
'TST_Lineage',
'TST_MemberOfFamilyCollection',
'TST_MemberOfFamilyCollectionExp',
'TST_Person'],
'test': 'innows'},
QUALIFIER_FILTER_MODEL, OK],
['Verify class command enumerate with --no-indication filter, --summary.',
['enumerate', '--no-indication', '--summary'],
{'stdout': ['11 CIMClass(s) returned'],
'test': 'innows'},
QUALIFIER_FILTER_MODEL, OK],
['Verify class command enumerate with --experimentat filter.',
['enumerate', '--experimental', '--names-only'],
{'stdout': ['EXP_TestExperimental1', ' EXP_TestExperimental2',
'EXP_TestExperimental3', 'EXP_TestExperimental4',
'TST_IndicationExperimental',
'TST_MemberOfFamilyCollectionExp'],
'test': 'innows'},
QUALIFIER_FILTER_MODEL, OK],
['Verify class command enumerate with --experimentat filter -- summary.',
['enumerate', '--experimental', '--summary'],
{'stdout': ['6 CIMClass(s) returned'],
'test': 'innows'},
QUALIFIER_FILTER_MODEL, OK],
['Verify class command enumerate with --no-experimental filter.',
['enumerate', '--no-experimental', '--names-only'],
{'stdout': ['BLA_Person',
'TST_FamilyCollection',
'TST_Indication',
'TST_IndicationDeprecated',
'TST_Lineage',
'TST_MemberOfFamilyCollection',
'TST_MemberOfFamilyCollectionDep',
'TST_Person'],
'test': 'innows'},
QUALIFIER_FILTER_MODEL, OK],
['Verify class command enumerate with --no-experimental, --summary.',
['enumerate', '--no-experimental', '--summary'],
{'stdout': ['8 CIMClass(s) returned'],
'test': 'innows'},
QUALIFIER_FILTER_MODEL, OK],
['Verify class command enumerate with --experimental, --association.',
['enumerate', '--experimental', '--association', '--names-only'],
{'stdout': ['EXP_TestExperimental1',
'EXP_TestExperimental2',
'EXP_TestExperimental3',
'EXP_TestExperimental4',
'TST_IndicationExperimental',
'TST_MemberOfFamilyCollectionExp'],
'test': 'innows'},
QUALIFIER_FILTER_MODEL, OK],
['Verify class command enumerate with --experimental, --association, '
'--summary',
['enumerate', '--experimental', '--association', '--summary'],
{'stdout': ['6 CIMClass(s) returned'],
'test': 'innows'},
QUALIFIER_FILTER_MODEL, OK],
['Verify class command enumerate with --experimental , --no-association.',
['enumerate', '--experimental', '--no-association', '--names-only'],
{'stdout': ['EXP_TestExperimental1', 'EXP_TestExperimental2',
'EXP_TestExperimental3', 'EXP_TestExperimental4',
'TST_IndicationExperimental'],
'test': 'innows'},
QUALIFIER_FILTER_MODEL, OK],
['Verify class command enumerate with --indication and --experimental.',
['enumerate', '--experimental', '--indication', '--names-only'],
{'stdout': ['TST_IndicationExperimental'],
'test': 'innows'},
QUALIFIER_FILTER_MODEL, OK],
['Verify class command enumerate with --indication, --no-experimental.',
['enumerate', '--no-experimental', '--indication', '--names-only'],
{'stdout': ['TST_Indication', 'TST_IndicationDeprecated'],
'test': 'innows'},
QUALIFIER_FILTER_MODEL, OK],
['Verify class command enumerate with --no-indication, --no-experimental, '
'--no-association',
['enumerate', '--no-experimental', '--no-indication', '--no-association',
'--names-only'],
{'stdout': ['BLA_Person',
'TST_FamilyCollection',
'TST_Person'],
'test': 'innows'},
QUALIFIER_FILTER_MODEL, OK],
['Verify class command enumerate with --deprecated, --no-association.',
['enumerate', '--deprecated', '--no-association', '--names-only'],
{'stdout': ['TST_IndicationDeprecated',
'TST_MemberOfFamilyCollectionDep'],
'test': 'innows'},
QUALIFIER_FILTER_MODEL, OK],
['Verify class command enumerate with --deprecated, --no-association, '
'--summary',
['enumerate', '--deprecated', '--no-association', '--summary'],
{'stdout': ['2 CIMClass(s) returned'],
'test': 'innows'},
QUALIFIER_FILTER_MODEL, OK],
['Verify class command enumerate with --no-deprecated, --association',
['enumerate', '--no-deprecated', '--association', '--names-only'],
{'stdout': ['BLA_Person', 'EXP_TestExperimental1',
'EXP_TestExperimental2', 'EXP_TestExperimental3',
'EXP_TestExperimental4', 'TST_FamilyCollection',
'TST_Indication', 'TST_IndicationExperimental',
'TST_Lineage', 'TST_MemberOfFamilyCollection',
'TST_MemberOfFamilyCollectionExp', 'TST_Person'],
'test': 'innows'},
QUALIFIER_FILTER_MODEL, OK],
['Verify class command enumerate with --no-deprecated, --no-association'
'--summary',
['enumerate', '--no-deprecated', '--association', '--summary'],
{'stdout': ['12 CIMClass(s) returned'],
'test': 'innows'},
QUALIFIER_FILTER_MODEL, OK],
['Verify class command enumerate with --experimental, --since 2.42.0.',
['enumerate', '--experimental', '--since', '2.42.0', '--names-only'],
{'stdout': ['TST_IndicationExperimental'],
'test': 'innows'},
QUALIFIER_FILTER_MODEL, OK],
['Verify class command enumerate with --experimental and --since 2.42.0'
'--summary',
['enumerate', '--experimental', '--since', '2.42.0', '--summary'],
{'stdout': ['3 CIMClass(s) returned'],
'test': 'innows'},
QUALIFIER_FILTER_MODEL, OK],
['Verify class command enumerate with --experimental and --since 2.45.0.',
['enumerate', '--experimental', '--since', '2.45.0', '--names-only'],
{'stdout': [],
'test': 'innows'},
QUALIFIER_FILTER_MODEL, OK],
['Verify class command enumerate with --experimental and --since 2.45.x.',
['enumerate', '--experimental', '--since', '2.45.x', '--names-only'],
{'stderr': ['--since option value invalid. ',
'Must contain 3 integer elements',
'2.45.x'],
'rc': 1,
'test': 'innows'},
QUALIFIER_FILTER_MODEL, OK],
['Verify class command enumerate with --indication and --since 2.45.',
['enumerate', '--experimental', '--since', '2.45', '--names-only'],
{'stderr': ['Version value must contain 3 integer elements (int.int.int)',
'2.45'],
'rc': 1,
'test': 'innows'},
QUALIFIER_FILTER_MODEL, OK],
['Verify class command enumerate with --schema "TST".',
['enumerate', '--schema', 'TST', '--names-only'],
{'stdout': ['TST_FamilyCollection', 'TST_Indication',
'TST_IndicationDeprecated', 'TST_IndicationExperimental',
'TST_Lineage', 'TST_MemberOfFamilyCollection',
'TST_MemberOfFamilyCollectionDep',
'TST_MemberOfFamilyCollectionExp', 'TST_Person', ],
'test': 'innows'},
QUALIFIER_FILTER_MODEL, OK],
['Verify class command enumerate with --schema "BLA".',
['enumerate', '--schema', 'BLA', '--names-only'],
{'stdout': ['BLA_Person', ],
'test': 'innows'},
QUALIFIER_FILTER_MODEL, OK],
['Verify class command enumerate with --schema "EXP".',
['enumerate', '--schema', 'EXP', '--names-only'],
{'stdout': ['EXP_TestExperimental1', 'EXP_TestExperimental2',
'EXP_TestExperimental3', 'EXP_TestExperimental4'],
'test': 'innows'},
QUALIFIER_FILTER_MODEL, OK],
['Verify class command enumerate with --schema "EXP" --summary',
['enumerate', '--schema', 'EXP', '--summary'],
{'stdout': ['4 CIMClass(s) returned'],
'test': 'innows'},
QUALIFIER_FILTER_MODEL, OK],
['Verify class command enumerate with --schema "EXP" and --experimental.',
['enumerate', '--schema', 'EXP', '--experimental', '--names-only'],
{'stdout': ['EXP_TestExperimental1', 'EXP_TestExperimental2',
'EXP_TestExperimental3', 'EXP_TestExperimental4'],
'test': 'innows'},
QUALIFIER_FILTER_MODEL, OK],
['Verify class command enumerate with --schema "EXP" and --experimental.',
['enumerate', '--schema', 'EXP', '--experimental', '--summary'],
{'stdout': ['4 CIMClass(s) returned'],
'test': 'innows'},
QUALIFIER_FILTER_MODEL, OK],
['Verify class command enumerate with --schema "EXP",--experimental, '
'--summary.',
['enumerate', '--schema', 'EXP', '--experimental', '--summary'],
{'stdout': ['4 CIMClass(s) returned'],
'test': 'innows'},
QUALIFIER_FILTER_MODEL, OK],
['Verify class command enumerate with --schema "EXP" , --no-experimental.',
['enumerate', '--schema', 'EXP', '--no-experimental', '--names-only'],
{'stdout': [],
'test': 'innows'},
QUALIFIER_FILTER_MODEL, OK],
['Verify class command enumerate with --schema "EXP" , --no-experimental '
'--summary',
['enumerate', '--schema', 'EXP', '--no-experimental', '--summary'],
{'stdout': ['0 objects returned'],
'test': 'innows'},
QUALIFIER_FILTER_MODEL, OK],
['Verify class command enumerate with --schema "NOT_EXIST".',
['enumerate', '--schema', 'NOT_EXIST', '--names-only'],
{'stdout': [],
'test': 'innows'},
QUALIFIER_FILTER_MODEL, OK],
['Verify class command enumerate with --subclass-of TST_Person.',
['enumerate', '--subclass-of', 'TST_Person', '--di', '--names-only'],
{'stdout': ['TST_PersonClsDep', 'TST_PersonDep',
'TST_PersonExp', 'TST_PersonExpProperty',
'TST_PersonPropDep', 'TST_PersonSub'],
'test': 'innows'},
QUALIFIER_FILTER_MODEL, OK],
['Verify class command enumerate with --subclass-of TST_Person --summary.',
['enumerate', '--subclass-of', 'TST_Person', '--di', '--summary'],
{'stdout': ['6 CIMClass(s) returned'],
'test': 'innows'},
QUALIFIER_FILTER_MODEL, OK],
['Verify class command enumerate with --subclass-of TST_Person '
'-- association--summary .',
['enumerate', '--association', '--subclass-of', 'TST_Person', '--di',
'--summary'],
{'stdout': ['0 objects returned'],
'test': 'innows'},
QUALIFIER_FILTER_MODEL, OK],
['Verify class command enumerate with --subclass-of TST_PersonDep.',
['enumerate', '--subclass-of', 'TST_PersonDep', '--di', '--names-only'],
{'stdout': [],
'test': 'innows'},
QUALIFIER_FILTER_MODEL, OK],
['Verify class command enumerate with --subclass-of TST_PersonDep '
'--summary.',
['enumerate', '--subclass-of', 'TST_PersonDep', '--di', '--summary'],
{'stdout': ['0 objects returned'],
'test': 'innows'},
QUALIFIER_FILTER_MODEL, OK],
['Verify class command enumerate with --subclass-of NOT_EXIST excepts.',
['enumerate', '--subclass-of', 'NOT_EXIST', '--names-only'],
{'stderr': ['Classname NOT_EXIST for "subclass-of" not found'],
'rc': 1,
'test': 'innows'},
QUALIFIER_FILTER_MODEL, OK],
['Verify instance command enumerate CIM_Foo_sub2, w --verbose rtns msg.',
{'args': ['enumerate', 'CIM_Foo_sub2'],
'general': ['--verbose']},
{'stdout': 'No objects returned',
'test': 'innows'},
SIMPLE_MOCK_FILE, OK],
#
# Enumerate errors
#
['Verify class command enumerate nonexistent class name',
['enumerate', 'CIM_FClassDoesNotExist'],
{'stderr': ['CIMError', 'CIM_ERR_INVALID_CLASS'],
'rc': 1,
'test': 'regex'},
SIMPLE_MOCK_FILE, OK],
['Verify class command enumerate table output fails).',
{'args': ['enumerate'],
'general': ['--output-format', 'table']},
{'stderr': ['Output format "table"', 'not allowed', 'Only CIM formats:'],
'rc': 1,
'test': 'innows'},
SIMPLE_MOCK_FILE, OK],
#
# Test class get
#
['Verify class command get --help response',
['get', '--help'],
{'stdout': CLASS_GET_HELP_LINES,
'test': 'innows'},
None, OK],
['Verify class command get -h response',
['get', '-h'],
{'stdout': CLASS_GET_HELP_LINES,
'test': 'innows'},
None, OK],
# command get local-only option
['Verify class command get not local-only. Tests for property names',
['get', 'CIM_Foo_sub2'],
{'stdout': ['string cimfoo_sub2;', 'InstanceID', 'IntegerProp', 'Fuzzy',
'Key ( true )', 'IN ( false )'],
'test': 'in'},
SIMPLE_MOCK_FILE, OK],
['Verify class command get local-only(--lo)).',
['get', 'CIM_Foo_sub2', '--lo'],
{'stdout': ['class CIM_Foo_sub2 : CIM_Foo {',
'',
' string cimfoo_sub2;',
'',
'};', ''],
'test': 'patterns'},
SIMPLE_MOCK_FILE, OK],
['Verify class command get local-only. Tests whole response',
['get', 'CIM_Foo_sub2', '--local-only'],
{'stdout': ['class CIM_Foo_sub2 : CIM_Foo {',
'',
' string cimfoo_sub2;',
'',
'};', ''],
'test': 'patterns'},
SIMPLE_MOCK_FILE, OK],
# includequalifiers. Test the flag that excludes qualifiers
['Verify class command get without qualifiers. Tests whole response',
['get', 'CIM_Foo_sub2', '--nq'],
{'stdout': ['class CIM_Foo_sub2 : CIM_Foo {',
'',
' string cimfoo_sub2;',
'',
' string InstanceID;',
'',
' uint32 IntegerProp;',
'',
' string cimfoo_emb3;',
'',
' uint32 Fuzzy(',
' string TestInOutParameter,',
' CIM_FooRef1 REF TestRef,',
' string OutputParam,',
' uint32 OutputRtnValue);',
'',
' uint32 FuzzyStatic(',
' string TestInOutParameter,',
' CIM_Foo REF TestRef,',
' string OutputParam,',
' uint32 OutputRtnValue,',
' string cimfoo_emb1);',
'',
' string DeleteNothing();',
'',
'};',
''],
'test': 'lines'},
SIMPLE_MOCK_FILE, OK],
['Verify class command get without qualifiers. Tests whole response',
['get', 'CIM_Foo_sub2', '--no-qualifiers'],
{'stdout': ['class CIM_Foo_sub2 : CIM_Foo {',
'',
' string cimfoo_sub2;',
'',
' string InstanceID;',
'',
' uint32 IntegerProp;',
'',
' string cimfoo_emb3;',
'',
' uint32 Fuzzy(',
' string TestInOutParameter,',
' CIM_FooRef1 REF TestRef,',
' string OutputParam,',
' uint32 OutputRtnValue);',
'',
' uint32 FuzzyStatic(',
' string TestInOutParameter,',
' CIM_Foo REF TestRef,',
' string OutputParam,',
' uint32 OutputRtnValue,',
' string cimfoo_emb1);',
'',
' string DeleteNothing();',
'',
'};',
''],
'test': 'lines'},
SIMPLE_MOCK_FILE, OK],
# pylint: disable=line-too-long
['Verify class command get with propertylist. Tests whole response',
['get', 'CIM_Foo_sub2', '--pl', 'InstanceID'],
{'stdout': ['class CIM_Foo_sub2 : CIM_Foo {',
'',
' [Key ( true ),',
' Description ( "This is key property." )]',
' string InstanceID;',
'',
' [Description ( "Method with in and out parameters" )]',
' uint32 Fuzzy(',
' [IN ( true ),',
' OUT ( true ),',
' Description ( "Define data to be returned in output parameter" )]', # noqa: E501
' string TestInOutParameter,',
' [IN ( true ),',
' OUT ( true ),',
' Description ( "Test of ref in/out parameter" )]',
' CIM_FooRef1 REF TestRef,',
' [IN ( false ),',
' OUT ( true ),',
' Description ( "Rtns method name if exists on input" )]', # noqa: E501
' string OutputParam,',
' [IN ( true ),',
' Description ( "Defines return value if provided." )]', # noqa: E501
' uint32 OutputRtnValue);',
'',
' [Description ( "Static method with in and out parameters" ),', # noqa: E501
' Static ( true )]',
' uint32 FuzzyStatic(',
' [IN ( true ),',
' OUT ( true ),',
' Description ( "Define data to be returned in output parameter" )]', # noqa: E501
' string TestInOutParameter,',
' [IN ( true ),',
' OUT ( true ),',
' Description ( "Test of ref in/out parameter" )]',
' CIM_Foo REF TestRef,',
' [IN ( false ),',
' OUT ( true ),',
' Description ( "Rtns method name if exists on input" )]', # noqa: E501
' string OutputParam,',
' [IN ( true ),',
' Description ( "Defines return value if provided." )]', # noqa: E501
' uint32 OutputRtnValue,',
' [IN ( true ),',
' Description ( "Embedded instance parameter" ),',
' EmbeddedInstance ( "CIM_FooEmb1" )]',
' string cimfoo_emb1);',
'',
' [Description ( "Method with no parameters but embedded instance return" ),', # noqa: E501
' EmbeddedInstance ( "CIM_FooEmb2" )]',
' string DeleteNothing();',
'',
'};',
''],
'test': 'lines'},
SIMPLE_MOCK_FILE, OK],
['Verify class command get with empty propertylist. Tests whole '
'response',
['get', 'CIM_Foo_sub2', '--pl', '""'],
{'stdout': ['class CIM_Foo_sub2 : CIM_Foo {',
'',
' [Description ( "Method with in and out parameters" )]',
' uint32 Fuzzy(',
' [IN ( true ),',
' OUT ( true ),',
' Description ( "Define data to be returned in output parameter" )]', # noqa: E501
' string TestInOutParameter,',
' [IN ( true ),',
' OUT ( true ),',
' Description ( "Test of ref in/out parameter" )]',
' CIM_FooRef1 REF TestRef,',
' [IN ( false ),',
' OUT ( true ),',
' Description ( "Rtns method name if exists on input" )]', # noqa: E501
' string OutputParam,',
' [IN ( true ),',
' Description ( "Defines return value if provided." )]', # noqa: E501
' uint32 OutputRtnValue);',
'',
' [Description ( "Static method with in and out parameters" ),', # noqa: E501
' Static ( true )]',
' uint32 FuzzyStatic(',
' [IN ( true ),',
' OUT ( true ),',
' Description ( "Define data to be returned in output parameter" )]', # noqa: E501
' string TestInOutParameter,',
' [IN ( true ),',
' OUT ( true ),',
' Description ( "Test of ref in/out parameter" )]',
' CIM_Foo REF TestRef,',
' [IN ( false ),',
' OUT ( true ),',
' Description ( "Rtns method name if exists on input" )]', # noqa: E501
' string OutputParam,',
' [IN ( true ),',
' Description ( "Defines return value if provided." )]', # noqa: E501
' uint32 OutputRtnValue,',
' [IN ( true ),',
' Description ( "Embedded instance parameter" ),',
' EmbeddedInstance ( "CIM_FooEmb1" )]',
' string cimfoo_emb1);',
'',
' [Description ( "Method with no parameters but embedded instance return" ),', # noqa: E501
' EmbeddedInstance ( "CIM_FooEmb2" )]',
' string DeleteNothing();',
'',
'};',
''],
'test': 'lines'},
SIMPLE_MOCK_FILE, OK],
# pylint: enable=line-too-long
['Verify class command get with xml output format).',
{'args': ['get', 'CIM_Foo'],
'general': ['--output-format', 'repr']},
{'stdout': [r"CIMClass\(classname='CIM_Foo', superclass=None,",
r"'InstanceID': CIMProperty\(name='InstanceID', value=None,"],
'test': 'regex'},
SIMPLE_MOCK_FILE, OK],
['Verify class command get with repr output format).',
{'args': ['get', 'CIM_Foo'],
'general': ['--output-format', 'txt']},
{'stdout': ["CIMClass(classname='CIM_Foo', ...)"],
'test': 'lines'},
SIMPLE_MOCK_FILE, OK],
['Verify class command get with repr output format).',
{'args': ['get', 'CIM_Foo'],
'general': ['--output-format', 'xml']},
{'stdout': ['<CLASS( | .+ )NAME="CIM_Foo">',
'<PROPERTY( | .+ )NAME="InstanceID"',
'<PROPERTY( | .+ )NAME="IntegerProp"',
'<METHOD( | .+ )NAME="DeleteNothing"'],
'test': 'regex'},
SIMPLE_MOCK_FILE, OK],
# pylint: disable=line-too-long
['Verify class command get with propertylist and classorigin,',
['get', 'CIM_Foo_sub2', '--pl', 'InstanceID', '--ico'],
{'stdout': ['class CIM_Foo_sub2 : CIM_Foo {',
' [Key ( true ),',
' Description ( "This is key property." )]',
' string InstanceID;',
' [Description ( "Method with in and out parameters" )]',
' uint32 Fuzzy(',
' [IN ( true ),',
' OUT ( true ),',
' Description ( "Define data to be returned in '
'output parameter" )]',
' string TestInOutParameter,',
' [IN ( true ),',
' OUT ( true ),',
' Description ( "Test of ref in/out parameter" )]',
' CIM_FooRef1 REF TestRef,',
' [IN ( false ),',
' OUT ( true ),',
' Description ( "Rtns method name if exists on '
'input" )]',
' string OutputParam,',
' [IN ( true ),',
' Description ( "Defines return value if '
'provided." )]',
' uint32 OutputRtnValue);',
' [Description ( "Static method with in and out '
'parameters" ),',
' Static ( true )]',
' uint32 FuzzyStatic(',
' [IN ( true ),',
' OUT ( true ),',
' Description ( "Define data to be returned in '
'output parameter" )]',
' string TestInOutParameter,',
' [IN ( true ),',
' OUT ( true ),',
' Description ( "Test of ref in/out parameter" )]',
' CIM_Foo REF TestRef,',
' [IN ( false ),',
' OUT ( true ),',
' Description ( "Rtns method name if exists on '
'input" )]',
' string OutputParam,',
' [IN ( true ),',
' Description ( "Defines return value if '
'provided." )]',
' uint32 OutputRtnValue,',
' [IN ( true ),',
' Description ( "Embedded instance parameter" ),',
' EmbeddedInstance ( "CIM_FooEmb1" )]',
' string cimfoo_emb1);',
' [Description ( "Method with no parameters but embedded instance return" ),', # noqa: E501
' EmbeddedInstance ( "CIM_FooEmb2" )]',
' string DeleteNothing();',
'};', ''],
'test': 'linesnows'},
SIMPLE_MOCK_FILE, OK],
# pylint: enable=line-too-long
['Verify class command enumerate --di --no --namespace',
['get', 'CIM_Namespace', '-n', 'interop'],
{'stdout': ['class CIM_Namespace',
'string ObjectManagerCreationClassName;'],
'test': 'innows'},
SIMPLE_INTEROP_MOCK_FILE, OK],
# get command errors
['Verify class command get invalid classname',
['get', 'CIM_Argh'],
{'stderr': ['CIMError', 'CIM_ERR_NOT_FOUND', '6'],
'rc': 1,
'test': 'regex'},
SIMPLE_MOCK_FILE, OK],
['Verify class command get invalid namespace',
['get', 'CIM_Foo', '--namespace', 'Argh'],
{'stderr': ['CIMError', 'CIM_ERR_INVALID_NAMESPACE', '3'],
'rc': 1,
'test': 'regex'},
SIMPLE_MOCK_FILE, OK],
['Verify class command enumerate table output fails).',
{'args': ['get', 'CIM_Foo'],
'general': ['--output-format', 'table']},
{'stderr': ['Output format "table" ', 'not allowed', 'Only CIM formats:'],
'rc': 1,
'test': 'innows'},
SIMPLE_MOCK_FILE, OK],
#
# find command
#
['Verify class command find --help response',
['find', '--help'],
{'stdout': CLASS_FIND_HELP_LINES,
'test': 'innows'},
None, OK],
['Verify class command find -h response',
['find', '-h'],
{'stdout': CLASS_FIND_HELP_LINES,
'test': 'innows'},
None, OK],
['Verify class command find simple name in all namespaces',
['find', 'CIM_*'],
{'stdout': [" root/cimv2: CIM_Foo",
" root/cimv2: CIM_Foo_sub",
" root/cimv2: CIM_Foo_sub2",
" root/cimv2: CIM_Foo_sub_sub"],
'test': 'in'},
SIMPLE_MOCK_FILE, OK],
['Verify class command find simple name in all namespaces wo case',
['find', 'cim_*'],
{'stdout': [" root/cimv2: CIM_Foo",
" root/cimv2: CIM_Foo_sub",
" root/cimv2: CIM_Foo_sub2",
" root/cimv2: CIM_Foo_sub_sub"],
'test': 'in'},
SIMPLE_MOCK_FILE, OK],
['Verify class command find simple name in all namespaces lead wc',
['find', '*sub_sub*'],
{'stdout': [" root/cimv2: CIM_Foo_sub_sub"],
'test': 'in'},
SIMPLE_MOCK_FILE, OK],
['Verify class command find simple name in all namespaces wo case',
['find', '*sub_su?*'],
{'stdout': [" root/cimv2: CIM_Foo_sub_sub"],
'test': 'in'},
SIMPLE_MOCK_FILE, OK],
['Verify class command find simple name in known namespace',
['find', 'CIM_*', '-n', 'root/cimv2'],
{'stdout': [" root/cimv2: CIM_BaseEmb",
" root/cimv2: CIM_BaseRef",
" root/cimv2: CIM_Foo",
" root/cimv2: CIM_FooAssoc",
" root/cimv2: CIM_FooEmb1",
" root/cimv2: CIM_FooEmb2",
" root/cimv2: CIM_FooEmb3",
" root/cimv2: CIM_FooRef1",
" root/cimv2: CIM_FooRef2",
" root/cimv2: CIM_Foo_sub",
" root/cimv2: CIM_Foo_sub2",
" root/cimv2: CIM_Foo_sub_sub"],
'test': 'lines'},
SIMPLE_MOCK_FILE, OK],
['Verify class command find simple name in interop namespace',
['find', 'CIM_*'],
{'stdout': [" interop: CIM_Namespace",
" interop: CIM_ObjectManager"],
'test': 'in'},
SIMPLE_INTEROP_MOCK_FILE, OK],
['Verify class command find name in known namespace -o grid',
{'general': ['-o', 'grid'],
'args': ['find', 'CIM_*', '-n', 'root/cimv2']},
{'stdout': ['Find class CIM_*',
'+-------------+-----------------+',
'| Namespace | Classname |',
'+=============+=================+',
'| root/cimv2 | CIM_BaseEmb |',
'+-------------+-----------------+',
'| root/cimv2 | CIM_BaseRef |',
'+-------------+-----------------+',
'| root/cimv2 | CIM_Foo |',
'+-------------+-----------------+',
'| root/cimv2 | CIM_FooAssoc |',
'+-------------+-----------------+',
'| root/cimv2 | CIM_FooEmb1 |',
'+-------------+-----------------+',
'| root/cimv2 | CIM_FooEmb2 |',
'+-------------+-----------------+',
'| root/cimv2 | CIM_FooEmb3 |',
'+-------------+-----------------+',
'| root/cimv2 | CIM_FooRef1 |',
'+-------------+-----------------+',
'| root/cimv2 | CIM_FooRef2 |',
'+-------------+-----------------+',
'| root/cimv2 | CIM_Foo_sub |',
'+-------------+-----------------+',
'| root/cimv2 | CIM_Foo_sub2 |',
'+-------------+-----------------+',
'| root/cimv2 | CIM_Foo_sub_sub |',
'+-------------+-----------------+'],
'test': 'lines'},
SIMPLE_MOCK_FILE, OK],
['Verify class command verify nothing found for BLAH_ regex',
['find', 'BLAH_*', '-n', 'root/cimv2'],
{'stdout': "",
'test': 'lines'},
SIMPLE_MOCK_FILE, OK],
['Verify class command find simple name in known namespace with wildcard',
['find', '*sub2', '-n', 'root/cimv2'],
{'stdout': " root/cimv2: CIM_Foo_sub2",
'test': 'lines'},
SIMPLE_MOCK_FILE, OK],
['Verify class command find with --association filter',
['find', '*TST_*', '-n', 'root/cimv2', '--association'],
{'stdout': ['TST_Lineage',
'TST_MemberOfFamilyCollection',
'TST_MemberOfFamilyCollectionExp'],
'test': 'innows'},
QUALIFIER_FILTER_MODEL, OK],
['Verify class command find with --indication filter',
['find', '*TST_*', '-n', 'root/cimv2', '--indication'],
{'stdout': ['TST_Indication', 'root/cimv2:TST_IndicationExperimental'],
'test': 'innows'},
QUALIFIER_FILTER_MODEL, OK],
['Verify class command find with --indication & -no-experimental filters',
['find', '*TST_*', '-n', 'root/cimv2', '--indication',
'--no-experimental'],
{'stdout': ['TST_Indication'],
'test': 'innows'},
QUALIFIER_FILTER_MODEL, OK],
['Verify class command find with --association & --experimental filters',
['find', '*TST_*', '-n', 'root/cimv2', '--association', '--experimental'],
{'stdout': ['TST_MemberOfFamilyCollectionExp'],
'test': 'innows'},
QUALIFIER_FILTER_MODEL, OK],
['Verify class command find with --no-association & --no-experimental, '
'filters',
['find', 'TST_*', '-n', 'root/cimv2', '--no-association',
'--no-experimental', '--no-indication'],
{'stdout': ['root/cimv2: TST_FamilyCollection', 'root/cimv2: TST_Person',
'root/cimv2: TST_PersonClsDep', 'root/cimv2: TST_PersonDep',
'root/cimv2: TST_PersonPropDep', 'root/cimv2: TST_PersonSub'],
'test': 'innows'},
QUALIFIER_FILTER_MODEL, OK],
['Verify class command find with --no-association & --deprecated, ',
['find', 'TST_*', '-n', 'root/cimv2', '--no-association', '--deprecated'],
{'stdout': ['root/cimv2: TST_IndicationDeprecated',
'root/cimv2: TST_PersonClsDep',
'root/cimv2: TST_PersonDep',
'root/cimv2: TST_PersonExpProperty',
'root/cimv2: TST_PersonPropDep'],
'test': 'innows'},
QUALIFIER_FILTER_MODEL, OK],
['Verify class command find with --experimental and --since 2.42.0.',
['find', "*", '--experimental', '--since', '2.42.0'],
{'stdout': ['TST_IndicationExperimental'],
'test': 'innows'},
QUALIFIER_FILTER_MODEL, OK],
['Verify class command find with --experimental and --since 2.45.0.',
['find', "*", '--experimental', '--since', '2.45.0'],
{'stdout': [],
'test': 'innows'},
QUALIFIER_FILTER_MODEL, OK],
['Verify class command find with --experimental and --since 2.45.x.',
['find', "*", '--experimental', '--since', '2.45.x'],
{'stderr': ['--since option value invalid. ',
'Must contain 3 integer elements',
'2.45.x'],
'rc': 1,
'test': 'innows'},
QUALIFIER_FILTER_MODEL, OK],
['Verify class command find with --schema "BLA".',
['find', '*', '--schema', 'BLA'],
{'stdout': ['BLA_Person', ],
'test': 'innows'},
QUALIFIER_FILTER_MODEL, OK],
['Verify class command find with --schema "EXP".',
['find', '*', '--schema', 'EXP'],
{'stdout': ['EXP_TestExperimental1', 'EXP_TestExperimental2',
'EXP_TestExperimental3', 'EXP_TestExperimental4'],
'test': 'innows'},
QUALIFIER_FILTER_MODEL, OK],
['Verify class command find with --schema "EXP". test not-innows',
['find', '*', '--schema', 'EXP'],
{'stdout': ['BLA_Person', 'TST_FamilyCollection', 'TST_Indication',
'TST_IndicationDeprecated', 'TST_IndicationExperimental',
'TST_Lineage', 'TST_MemberOfFamilyCollection',
'TST_MemberOfFamilyCollectionDep',
'TST_MemberOfFamilyCollectionExp',
'TST_Person', 'TST_PersonClsDep', 'TST_PersonDep',
'TST_PersonExp', 'TST_PersonExpProperty',
'TST_PersonPropDep', 'TST_PersonSub'],
'test': 'not-innows'},
QUALIFIER_FILTER_MODEL, OK],
['Verify class command find with --schema "EXP" and --experimental.',
['find', '*', '--schema', 'EXP', '--experimental'],
{'stdout': ['EXP_TestExperimental1', 'EXP_TestExperimental2',
'EXP_TestExperimental3', 'EXP_TestExperimental4'],
'test': 'innows'},
QUALIFIER_FILTER_MODEL, OK],
['Verify class command find with --subclass-of.',
['find', '*', '--subclass-of', 'TST_Person'],
{'stdout': ['root/cimv2: TST_PersonClsDep',
'root/cimv2: TST_PersonDep', 'root/cimv2: TST_PersonExp',
'root/cimv2: TST_PersonExpProperty',
'root/cimv2: TST_PersonPropDep', 'root/cimv2: TST_PersonSub'],
'test': 'innows'},
QUALIFIER_FILTER_MODEL, OK],
['Verify class command find with --subclass-of.',
['find', '*Sub', '--subclass-of', 'TST_Person'],
{'stdout': ['root/cimv2: TST_PersonSub'],
'test': 'innows'},
QUALIFIER_FILTER_MODEL, OK],
# Tests with --leaf-classes
['Verify class command enumerate with --leaf-classes. test innows',
['enumerate', '--di', '--no', '--leaf-classes'],
{'stdout': ['BLA_Person', 'EXP_TestExperimental1', 'EXP_TestExperimental2',
'EXP_TestExperimental3', 'EXP_TestExperimental4',
'TST_FamilyCollection', 'TST_Indication',
'TST_IndicationDeprecated', 'TST_IndicationExperimental',
'TST_Lineage', 'TST_MemberOfFamilyCollection',
'TST_MemberOfFamilyCollectionDep',
'TST_MemberOfFamilyCollectionExp', 'TST_PersonClsDep',
'TST_PersonDep', 'TST_PersonExp', 'TST_PersonExpProperty',
'TST_PersonPropDep', 'TST_PersonSub'],
'test': 'innows'},
QUALIFIER_FILTER_MODEL, OK],
['Verify class command enumerate with --leaf-classes. test not-innows',
['enumerate', '--di', '--no', '--leaf-classes'],
{'stdout': ['TST_Person'],
'test': 'innows'},
QUALIFIER_FILTER_MODEL, OK],
['Verify class command enumerate with --leaf-classes & --subclass-of',
['enumerate', '--di', '--no', '--leaf-classes', '--subclass-of',
'TST_Person'],
{'stdout': ['TST_PersonClsDep', 'TST_PersonDep', 'TST_PersonExp'
'TST_PersonExpProperty', 'TST_PersonPropDep', 'TST_PersonSub'],
'test': 'innows'},
QUALIFIER_FILTER_MODEL, OK],
['Verify class command enumerate with --leaf-classes & --subclass-of, '
'not-innows',
['enumerate', '--di', '--no', '--leaf-classes', '--subclass-of',
'TST_Person'],
{'stdout': ['BLA_Person', 'EXP_TestExperimental1', 'EXP_TestExperimental2',
'EXP_TestExperimental3', 'EXP_TestExperimental4',
'TST_FamilyCollection', 'TST_Indication',
'TST_IndicationDeprecated', 'TST_IndicationExperimental',
'TST_Lineage', 'TST_MemberOfFamilyCollection',
'TST_MemberOfFamilyCollectionDep',
'TST_MemberOfFamilyCollectionExp'],
'test': 'not-innows'},
QUALIFIER_FILTER_MODEL, OK],
#
# command "class delete"
#
['Verify class command delete --help response',
['delete', '--help'],
{'stdout': CLASS_DELETE_HELP_LINES,
'test': 'innows'},
None, OK],
['Verify class command delete -h response',
['delete', '-h'],
{'stdout': CLASS_DELETE_HELP_LINES,
'test': 'innows'},
None, OK],
# Class delete successful
['Verify class command delete successful with no subclasses, '
'--force (deprecated)',
{'args': ['delete', 'CIM_Foo_sub_sub', '--force'],
'general': ['--warn']},
{'stderr': ['DeprecationWarning: The --force / -f option has been '
'deprecated'],
'test': 'in'},
SIMPLE_MOCK_FILE, OK],
['Verify class command delete successful with no subclasses, '
'-f (deprecated)',
{'args': ['delete', 'CIM_Foo_sub_sub', '-f'],
'general': ['--warn']},
{'stderr': ['DeprecationWarning: The --force / -f option has been '
'deprecated'],
'test': 'in'},
SIMPLE_MOCK_FILE, OK],
['Verify class command delete successful with no subclasses, '
'--include-instances',
['delete', 'CIM_Foo_sub_sub', '--include-instances'],
{'stdout': ['Deleted instance root/cimv2:CIM_Foo_sub_sub.',
'Deleted class CIM_Foo_sub_sub'],
'test': 'innows'},
SIMPLE_MOCK_FILE, OK],
['Verify class command delete successful with no subclasses, --namespace '
'and --include-instances',
['delete', 'CIM_Foo_sub_sub', '--namespace', 'root/cimv2',
'--include-instances'],
{'stdout': ['Deleted instance root/cimv2:CIM_Foo_sub_sub.',
'Deleted class CIM_Foo_sub_sub'],
'test': 'innows'},
SIMPLE_MOCK_FILE, OK],
['Verify class command delete (interactive) successful with no subclasses, '
'--include-instances, --dry-run',
{'stdin': ['class delete CIM_Foo_sub_sub --include-instances --dry-run',
'class get CIM_Foo_sub_sub',
'instance count CIM_Foo_sub_sub']},
{'stdout': ['Dry run: Deleted instance root/cimv2:CIM_Foo_sub_sub.'
'InstanceID="CIM_Foo_sub_sub1"',
'Dry run: Deleted class CIM_Foo_sub_sub',
'class CIM_Foo_sub_sub : CIM_Foo_sub {',
'root/cimv2 CIM_Foo_sub_sub 3'],
'test': 'innows'},
SIMPLE_MOCK_FILE, OK],
# Class delete errors
['Verify class command delete no classname',
['delete'],
{'stderr': ['Error: Missing argument .CLASSNAME.'],
'rc': 2,
'test': 'regex'},
SIMPLE_MOCK_FILE, OK],
['Verify class command delete nonexistent classname fails',
['delete', 'Argh'],
{'stderr': ['CIMError', 'CIM_ERR_INVALID_CLASS', '5'],
'rc': 1,
'test': 'regex'},
SIMPLE_MOCK_FILE, OK],
['Verify class command delete fail instances exist',
['delete', 'CIM_Foo_sub_sub'],
{'stderr': 'Cannot delete class CIM_Foo_sub_sub because it has '
'3 instances',
'rc': 1,
'test': 'innows'},
SIMPLE_MOCK_FILE, OK],
['Verify class command delete fail subclasses exist',
['delete', 'CIM_Foo'],
{'stderr': 'Cannot delete class CIM_Foo because it has 12 instances',
'rc': 1,
'test': 'innows'},
SIMPLE_MOCK_FILE, OK],
['Verify class command delete fail subclasses exist, --include-instances',
['delete', 'CIM_Foo', '--include-instances'],
{'stderr': 'Cannot delete class CIM_Foo because these classes depend on '
'it: CIM_Foo_sub, CIM_Foo_sub2',
'rc': 1,
'test': 'innows'},
SIMPLE_MOCK_FILE, OK],
['Verify class command delete fail referencing class CIM_FooRef1 exist',
['delete', 'CIM_FooRef1'],
{'stderr': 'Cannot delete class CIM_FooRef1 because it has 1 instances',
'rc': 1,
'test': 'innows'},
SIMPLE_MOCK_FILE, OK],
['Verify class command delete fail referencing class CIM_FooRef1 exist, '
'--include-instances',
['delete', 'CIM_FooRef1', '--include-instances'],
{'stderr': 'Cannot delete class CIM_FooRef1 because these classes depend '
'on it: CIM_Foo',
'rc': 1,
'test': 'innows'},
SIMPLE_MOCK_FILE, OK],
['Verify class command delete fail referencing class CIM_FooRef2 exist',
['delete', 'CIM_FooRef2'],
{'stderr': 'Cannot delete class CIM_FooRef2 because it has 1 instances',
'rc': 1,
'test': 'innows'},
SIMPLE_MOCK_FILE, OK],
['Verify class command delete fail referencing class CIM_FooRef2 exist, '
'--include-instances',
['delete', 'CIM_FooRef2', '--include-instances'],
{'stderr': 'Cannot delete class CIM_FooRef2 because these classes depend '
'on it: CIM_Foo',
'rc': 1,
'test': 'innows'},
SIMPLE_MOCK_FILE, OK],
['Verify class command delete fail referencing class CIM_FooAssoc exist',
['delete', 'CIM_FooAssoc'],
{'stderr': 'Cannot delete class CIM_FooAssoc because it has 1 instances',
'rc': 1,
'test': 'innows'},
SIMPLE_MOCK_FILE, OK],
['Verify class command delete succesd for referencing class CIM_FooAssoc, '
'--include-instances',
['delete', 'CIM_FooAssoc', '--include-instances'],
{'stdout': '',
'test': 'in'},
SIMPLE_MOCK_FILE, OK],
['Verify class command delete fail embedding class CIM_FooEmb1 exist',
['delete', 'CIM_FooEmb1'],
{'stderr': 'Cannot delete class CIM_FooEmb1 because these classes depend '
'on it: CIM_Foo',
'rc': 1,
'test': 'innows'},
SIMPLE_MOCK_FILE, OK],
['Verify class command delete fail embedding class CIM_FooEmb1 exist, '
'--include-instances',
['delete', 'CIM_FooEmb1', '--include-instances'],
{'stderr': 'Cannot delete class CIM_FooEmb1 because these classes depend '
'on it: CIM_Foo',
'rc': 1,
'test': 'innows'},
SIMPLE_MOCK_FILE, OK],
['Verify class command delete fail embedding class CIM_FooEmb2 exist',
['delete', 'CIM_FooEmb2'],
{'stderr': 'Cannot delete class CIM_FooEmb2 because these classes depend '
'on it: CIM_Foo',
'rc': 1,
'test': 'innows'},
SIMPLE_MOCK_FILE, OK],
['Verify class command delete fail embedding class CIM_FooEmb2 exist, '
'--include-instances',
['delete', 'CIM_FooEmb2', '--include-instances'],
{'stderr': 'Cannot delete class CIM_FooEmb2 because these classes depend '
'on it: CIM_Foo',
'rc': 1,
'test': 'innows'},
SIMPLE_MOCK_FILE, OK],
['Verify class command delete fail embedding class CIM_FooEmb3 exist',
['delete', 'CIM_FooEmb3'],
{'stderr': 'Cannot delete class CIM_FooEmb3 because these classes depend '
'on it: CIM_Foo',
'rc': 1,
'test': 'innows'},
SIMPLE_MOCK_FILE, OK],
['Verify class command delete fail embedding class CIM_FooEmb3 exist, '
'--include-instances',
['delete', 'CIM_FooEmb3', '--include-instances'],
{'stderr': 'Cannot delete class CIM_FooEmb3 because these classes depend '
'on it: CIM_Foo',
'rc': 1,
'test': 'innows'},
SIMPLE_MOCK_FILE, OK],
['Verify class command delete fails if instance provider rejects delete',
{'args': ['delete', 'CIM_Foo_sub_sub', '--include-instances']},
{'stderr': ['CIM_ERR_FAILED',
'Deletion of CIM_Foo_sub_sub instances is rejected'],
'rc': 1,
'test': 'innows'},
[SIMPLE_MOCK_FILE, 'reject_deleteinstance_provider.py'],
MOCK_SETUP_SUPPORTED],
['Verify class command delete using --namespace interop fails because of '
'instances',
['delete', 'CIM_ObjectManager', '-n', 'interop'],
{'stderr': ['Cannot delete class', 'instances'],
'rc': 1,
'test': 'innows'},
SIMPLE_INTEROP_MOCK_FILE, OK],
#
# command "class tree"
#
['Verify class command tree --help response',
['tree', '--help'],
{'stdout': CLASS_TREE_HELP_LINES,
'test': 'innows'},
None, OK],
['Verify class command tree -h response',
['tree', '-h'],
{'stdout': CLASS_TREE_HELP_LINES,
'test': 'innows'},
None, OK],
['Verify class command tree top down. Uses simple mock, no argument',
['tree'],
{'stdout': """root
+-- CIM_BaseEmb
| +-- CIM_FooEmb1
| +-- CIM_FooEmb2
| +-- CIM_FooEmb3
+-- CIM_BaseRef
| +-- CIM_FooRef1
| +-- CIM_FooRef2
+-- CIM_Foo
| +-- CIM_Foo_sub
| | +-- CIM_Foo_sub_sub
| +-- CIM_Foo_sub2
+-- CIM_FooAssoc
""",
'test': 'innows'},
SIMPLE_MOCK_FILE, OK],
['Verify class command tree top down starting at defined class ',
['tree', 'CIM_Foo_sub'],
{'stdout': """CIM_Foo_sub
+-- CIM_Foo_sub_sub
""",
'test': 'innows'},
SIMPLE_MOCK_FILE, OK],
['Verify class command tree top down starting at leaf class',
['tree', 'CIM_Foo_sub'],
{'stdout': """CIM_Foo_sub_sub
""",
'test': 'innows'},
SIMPLE_MOCK_FILE, OK],
['Verify class command tree bottom up. -s',
['tree', '-s', 'CIM_Foo_sub_sub'],
{'stdout': """root
+-- CIM_Foo
+-- CIM_Foo_sub
+-- CIM_Foo_sub_sub
""",
'test': 'innows'},
SIMPLE_MOCK_FILE, OK],
['Verify class command tree -s from top class',
['tree', '-s', 'CIM_Foo'],
{'stdout': """root
+-- CIM_Foo
""",
'test': 'innows'},
SIMPLE_MOCK_FILE, OK],
['Verify class command tree bottom up. --superclasses',
['tree', '--superclasses', 'CIM_Foo_sub_sub'],
{'stdout': """root
+-- CIM_Foo
+-- CIM_Foo_sub
+-- CIM_Foo_sub_sub
""",
'test': 'innows'},
SIMPLE_MOCK_FILE, OK],
['Verify class command tree with --detail',
['tree', '--detail'],
{'stdout': """root
+-- CIM_Foo (Version=2.30.0)
| +-- CIM_Foo_sub (Version=2.31.0)
| +-- CIM_Foo_sub_sub (Version=2.20.1)
+-- CIM_Foo_no_version ()
+-- CIM_Indication (Abstract,Indication,Version=2.24.0)
+-- CIM_Indication_no_version (Abstract,Indication)
+-- TST_Lineage (Association,Version=2.20.1)
+-- TST_Lineage_no_version (Association)
""",
'test': 'innows'},
TREE_TEST_MOCK_FILE, OK],
# class tree' error tests
['Verify class command tree with invalid CLASSNAME fails',
['tree', '-s', 'CIM_Foo_subx'],
{'stderr': ['CIMError:'],
'rc': 1,
'test': 'regex'},
SIMPLE_MOCK_FILE, OK],
['Verify class command tree with superclass option, CLASSNAME fails',
['tree', '-s'],
{'stderr': ['Error: CLASSNAME argument required for --superclasses '
'option'],
'rc': 1,
'test': 'regex'},
SIMPLE_MOCK_FILE, OK],
#
# associators command tests
#
#
['Verify class command associators --help response',
['associators', '--help'],
{'stdout': CLASS_ASSOCIATORS_HELP_LINES,
'test': 'innows'},
None, OK],
['Verify class command associators -h response',
['associators', '-h'],
{'stdout': CLASS_ASSOCIATORS_HELP_LINES,
'test': 'innows'},
None, OK],
['Verify class command associators simple request,',
['associators', 'TST_Person'],
{'stdout': [FAKEURL_STR + '/root/cimv2:TST_Person',
'class TST_Person {',
'',
' [Key ( true ),',
' Description ( "This is key prop" )]',
' string name;',
'',
' string extraProperty = "defaultvalue";',
'',
' [ValueMap { "1", "2" },',
' Values { "female", "male" }]',
' uint16 gender;',
'',
' [ValueMap { "1", "2" },',
' Values { "books", "movies" }]',
' uint16 likes[];',
'',
'};',
''],
'test': 'lines'},
SIMPLE_ASSOC_MOCK_FILE, OK],
['Verify class command associators simple request names only,',
['associators', 'TST_Person', '--names-only'],
{'stdout': [FAKEURL_STR + '/root/cimv2:TST_Person'],
'test': 'lines'},
SIMPLE_ASSOC_MOCK_FILE, OK],
['Verify class command associators simple request, one parameter',
['associators', 'TST_Person', '--ac', 'TST_MemberOfFamilyCollection'],
{'stdout': [FAKEURL_STR + '/root/cimv2:TST_Person',
'class TST_Person {',
'',
' [Key ( true ),',
' Description ( "This is key prop" )]',
' string name;',
'',
' string extraProperty = "defaultvalue";',
'',
' [ValueMap { "1", "2" },',
' Values { "female", "male" }]',
' uint16 gender;',
'',
' [ValueMap { "1", "2" },',
' Values { "books", "movies" }]',
' uint16 likes[];',
'',
'};',
''],
'test': 'lines'},
SIMPLE_ASSOC_MOCK_FILE, OK],
['Verify class command associators request, all filters long',
['associators', 'TST_Person',
'--assoc-class', 'TST_MemberOfFamilyCollection',
'--role', 'member',
'--result-role', 'family',
'--result-class', 'TST_Person'],
{'stdout': [FAKEURL_STR + '/root/cimv2:TST_Person',
'class TST_Person {',
'',
' [Key ( true ),',
' Description ( "This is key prop" )]',
' string name;',
'',
' string extraProperty = "defaultvalue";',
'',
' [ValueMap { "1", "2" },',
' Values { "female", "male" }]',
' uint16 gender;',
'',
' [ValueMap { "1", "2" },',
' Values { "books", "movies" }]',
' uint16 likes[];',
'',
'};',
''],
'test': 'lines'},
SIMPLE_ASSOC_MOCK_FILE, OK],
['Verify class command associators request, all filters short',
['associators', 'TST_Person',
'--ac', 'TST_MemberOfFamilyCollection',
'-r', 'member',
'--rr', 'family',
'--rc', 'TST_Person'],
{'stdout': [FAKEURL_STR + '/root/cimv2:TST_Person',
'class TST_Person {',
'',
' [Key ( true ),',
' Description ( "This is key prop" )]',
' string name;',
'',
' string extraProperty = "defaultvalue";',
'',
' [ValueMap { "1", "2" },',
' Values { "female", "male" }]',
' uint16 gender;',
'',
' [ValueMap { "1", "2" },',
' Values { "books", "movies" }]',
' uint16 likes[];',
'',
'};',
''],
'test': 'lines'},
SIMPLE_ASSOC_MOCK_FILE, OK],
# Behavior changed pywbem 0.15.0 to exception rtn
['Verify class command associators request, all filters short, -ac '
'not valid class',
['associators', 'TST_Person',
'--ac', 'TST_MemberOfFamilyCollectionx',
'-r', 'member',
'--rr', 'family',
'--rc', 'TST_Person'],
{'stderr': ['CIM_ERR_INVALID_PARAMETER'],
'rc': 1,
'test': 'innows'},
SIMPLE_ASSOC_MOCK_FILE, OK],
# Behavior changed pywbem 0.15.0 to exception rtn
['Verify class command associators request, all filters short, -r '
'not valid role',
['associators', 'TST_Person',
'--ac', 'TST_MemberOfFamilyCollection',
'-r', 'memberx',
'--rr', 'family',
'--rc', 'TST_Person'],
{'stdout': [],
'test': 'lines'},
SIMPLE_ASSOC_MOCK_FILE, OK],
# Behavior changed pywbem 0.15.0 to exception rtn
['Verify class command associators request, all filters short, --rc '
'does not valid class',
['associators', 'TST_Person',
'--ac', 'TST_MemberOfFamilyCollection',
'-r', 'member',
'--rr', 'family',
'--rc', 'TST_Personx'],
{'stderr': ['CIM_ERR_INVALID_PARAMETER'],
'rc': 1,
'test': 'innows'},
SIMPLE_ASSOC_MOCK_FILE, OK],
# Behavior changed pywbem 0.15.0 to exception rtn
['Verify class command associators request, all filters long '
'does not pass test',
['associators', 'TST_Person',
'--assoc-class', 'TST_MemberOfFamilyCollection',
'--role', 'member',
'--result-role', 'family',
'--result-class', 'TST_Personx'],
{'stderr': ['CIM_ERR_INVALID_PARAMETER'],
'rc': 1,
'test': 'innows'},
SIMPLE_ASSOC_MOCK_FILE, OK],
# Associator errors
['Verify class command associators no CLASSNAME',
['associators'],
{'stderr': ['Error: Missing argument .CLASSNAME.'],
'rc': 2,
'test': 'regex'},
None, OK],
# Behavior changed pywbem 0.15.0 to exception rtn
['Verify class command associators non-existent CLASSNAME rtns error',
['associators', 'CIM_Nonexistentclass'],
{'stderr': ["CIM_ERR_INVALID_PARAMETER"],
'rc': 1,
'test': 'innows'},
SIMPLE_ASSOC_MOCK_FILE, OK],
['Verify class command associators non-existent namespace fails',
['associators', 'TST_Person', '--namespace', 'blah'],
{'stderr': ['CIMError', 'CIM_ERR_INVALID_NAMESPACE'],
'rc': 1,
'test': 'regex'},
SIMPLE_ASSOC_MOCK_FILE, OK],
#
# references command tests
#
['Verify class command references --help response',
['references', '--help'],
{'stdout': CLASS_REFERENCES_HELP_LINES,
'test': 'innows'},
None, OK],
['Verify class command references -h response',
['references', '-h'],
{'stdout': CLASS_REFERENCES_HELP_LINES,
'test': 'innows'},
None, OK],
['Verify class command references simple request',
['references', 'TST_Person'],
{'stdout': ['class TST_Lineage {',
'Lineage defines the relationship',
'string InstanceID;',
'TST_Person REF parent;',
'TST_Person REF child;',
'[Association ( true )',
'Description ( " Family gathers person to family." )',
'class TST_MemberOfFamilyCollection {',
'[key ( true )]',
'TST_Person REF family;',
'TST_Person REF member;',
],
'test': 'innows'},
SIMPLE_ASSOC_MOCK_FILE, OK],
['Verify class command references simple request -o',
['references', 'TST_Person', '--no'],
{'stdout': [FAKEURL_STR + '/root/cimv2:TST_Lineage',
FAKEURL_STR + '/root/cimv2:TST_MemberOfFamilyCollection'],
'test': 'linesnows'},
SIMPLE_ASSOC_MOCK_FILE, OK],
['Verify class command references request, all filters long',
['references', 'TST_Person',
'--role', 'member',
'--result-class', 'TST_MemberOfFamilyCollection'],
{'stdout': REFERENCES_CLASS_RTN_QUALS2,
'test': 'innows'},
SIMPLE_ASSOC_MOCK_FILE, OK],
['Verify class command references request, filters short',
['references', 'TST_Person',
'-r', 'member',
'--rc', 'TST_MemberOfFamilyCollection'],
{'stdout': REFERENCES_CLASS_RTN_QUALS2,
'test': 'innows'},
SIMPLE_ASSOC_MOCK_FILE, OK],
['Verify class command refereces table output fails).',
{'args': ['associators', 'TST_Person'],
'general': ['--output-format', 'table']},
{'stderr': ['Output format "table" ', 'not allowed', 'Only CIM formats:'],
'rc': 1,
'test': 'innows'},
SIMPLE_MOCK_FILE, OK],
# Reference errors
['Verify class command references no CLASSNAME',
['references'],
{'stderr': ['Error: Missing argument .CLASSNAME.'],
'rc': 2,
'test': 'regex'},
None, OK],
# Behavior changed pywbem 0.15.0, references bad param rtns except.
['Verify class command references non-existent CLASSNAME rtns error',
['references', 'CIM_Nonexistentclass'],
{'stderr': ["CIM_ERR_INVALID_PARAMETER"],
'rc': 1,
'test': 'innows'},
SIMPLE_ASSOC_MOCK_FILE, OK],
['Verify class command references non-existent namespace fails',
['references', 'TST_Person', '--namespace', 'blah'],
{'stderr': ['CIMError', 'CIM_ERR_INVALID_NAMESPACE'],
'rc': 1,
'test': 'regex'},
SIMPLE_ASSOC_MOCK_FILE, OK],
#
# invokemethod command tests
#
['Verify class command invokemethod --help response',
['invokemethod', '--help'],
{'stdout': CLASS_INVOKEMETHOD_HELP_LINES,
'test': 'innows'},
None, OK],
['Verify class command invokemethod -h response',
['invokemethod', '-h'],
{'stdout': CLASS_INVOKEMETHOD_HELP_LINES,
'test': 'innows'},
None, OK],
#
# class invokemethod command without parameters
#
['Verify class command invokemethod CIM_Foo.FuzzyStatic() - no in parms',
['invokemethod', 'CIM_Foo', 'FuzzyStatic'],
{'stdout': ["ReturnValue=0"],
'rc': 0,
'test': 'lines'},
[SIMPLE_MOCK_FILE, INVOKE_METHOD_MOCK_FILE], OK],
['Verify class command invokemethod CIM_Foo.FuzzyStatic() with --namespace',
['invokemethod', 'CIM_Foo', 'FuzzyStatic', '--namespace', 'root/cimv2'],
{'stdout': ["ReturnValue=0"],
'rc': 0,
'test': 'lines'},
[SIMPLE_MOCK_FILE, INVOKE_METHOD_MOCK_FILE], OK],
# Cannot do a test with interop as default because of issue #991
['Verify class command invokemethod CIM_Foo.FuzzyStatic() with --namespace'
' interop not found to validate that --namspace used',
['invokemethod', 'CIM_Foo', 'FuzzyStatic', '--namespace', 'interop'],
{'stderr': ["CIM_ERR_NOT_FOUND", "not found in namespace 'interop'"],
'rc': 1,
'test': 'innows'},
[SIMPLE_INTEROP_MOCK_FILE, INVOKE_METHOD_MOCK_FILE], OK],
['Verify class command invokemethod CIM_Foo.FuzzyStatic() - one in parm',
['invokemethod', 'CIM_Foo', 'FuzzyStatic',
'-p', 'TestInOutParameter="blah"'],
{'stdout': ['ReturnValue=0',
'TestInOutParameter=', 'blah'],
'rc': 0,
'test': 'innows'},
[SIMPLE_MOCK_FILE, INVOKE_METHOD_MOCK_FILE], OK],
['Verify class command invokemethod fails Invalid Class',
['invokemethod', 'CIM_Foox', 'FuzzyStatic'],
{'stderr': ['CIMError', '6'],
'rc': 1,
'test': 'innows'},
[SIMPLE_MOCK_FILE, INVOKE_METHOD_MOCK_FILE], OK],
['Verify class command invokemethod fails Invalid Method',
['invokemethod', 'CIM_Foo', 'Fuzzyx'],
{'stderr': ['Class CIM_Foo does not have a method Fuzzyx'],
'rc': 1,
'test': 'innows'},
[SIMPLE_MOCK_FILE, INVOKE_METHOD_MOCK_FILE], OK],
['Verify class command invokemethod fails non-static method, pywbem 1.0',
['invokemethod', 'CIM_Foo', 'Fuzzy'],
{'stderr': ["Non-static method 'Fuzzy' in class 'CIM_Foo'"],
'rc': 1,
'test': 'innows'},
[SIMPLE_MOCK_FILE, INVOKE_METHOD_MOCK_FILE], PYWBEM_1_0_0],
['Verify class command invokemethod succeeds non-static method, pywbem 0.x',
['invokemethod', 'CIM_Foo', 'Fuzzy'],
{'stdout': ['ReturnValue=0'],
'rc': 0,
'test': 'innows'},
[SIMPLE_MOCK_FILE, INVOKE_METHOD_MOCK_FILE], not PYWBEM_1_0_0],
['Verify class command invokemethod fails Method not registered',
['invokemethod', 'CIM_Foo', 'Fuzzy'],
{'stderr': ['CIMError'],
'rc': 1,
'test': 'innows'},
SIMPLE_MOCK_FILE, OK],
['Verify --timestats gets stats output. Cannot test with lines,execution '
'time is variable.',
{'args': ['get', 'CIM_Foo'],
'general': ['--timestats']},
{'stdout': ['Operation Count Errors',
'GetClass 1 0'],
'rc': 0,
'test': 'innows'},
SIMPLE_MOCK_FILE, OK],
['Verify single command with stdin works',
{'stdin': ['class get -h']},
{'stdout': ['Usage: pywbemcli [GENERAL-OPTIONS] class get '],
'rc': 0,
'test': 'innows'},
None, OK],
['Verify multiple commands with stdin work',
{'stdin': ['class get -h', 'class enumerate -h']},
{'stdout': ['Usage: pywbemcli [GENERAL-OPTIONS] class enumerate ',
'Usage: pywbemcli [GENERAL-OPTIONS] class get '],
'rc': 0,
'test': 'innows'},
None, OK],
]
# TODO command class delete. Extend this test to use stdin (delete, test)
# namespace
# TODO: add test for errors: class invalid, namespace invalid
# other tests. Test local-only on top level
| 37.241451 | 114 | 0.554639 |
09887c8ffc4485168a4cf1dc2d552eb82e642cda | 713 | py | Python | src/python/T0/WMBS/Oracle/RunConfig/InsertRecoReleaseConfig.py | silviodonato/T0 | a093729d08b31175ed35cd20e889bd7094ce152a | [
"Apache-2.0"
] | 6 | 2016-03-09T14:36:19.000Z | 2021-07-27T01:28:00.000Z | src/python/T0/WMBS/Oracle/RunConfig/InsertRecoReleaseConfig.py | silviodonato/T0 | a093729d08b31175ed35cd20e889bd7094ce152a | [
"Apache-2.0"
] | 193 | 2015-01-07T21:03:43.000Z | 2022-03-31T12:22:18.000Z | src/python/T0/WMBS/Oracle/RunConfig/InsertRecoReleaseConfig.py | silviodonato/T0 | a093729d08b31175ed35cd20e889bd7094ce152a | [
"Apache-2.0"
] | 36 | 2015-01-28T19:01:54.000Z | 2021-12-15T17:18:20.000Z | """
_InsertRecoReleaseConfig_
Oracle implementation of InsertRecoReleaseConfig
"""
from WMCore.Database.DBFormatter import DBFormatter
| 28.52 | 79 | 0.548387 |
0988ffb2a91dd9ac6ea127ee5939338c9d7b530e | 1,652 | py | Python | split_wav.py | tanacchi/sound-dataset-generator | a74363c35652dbb7e7cb2dfd390cf89302f3827e | [
"MIT"
] | 1 | 2020-12-02T02:31:33.000Z | 2020-12-02T02:31:33.000Z | split_wav.py | tanacchi/sound_dataset_generator | a74363c35652dbb7e7cb2dfd390cf89302f3827e | [
"MIT"
] | null | null | null | split_wav.py | tanacchi/sound_dataset_generator | a74363c35652dbb7e7cb2dfd390cf89302f3827e | [
"MIT"
] | null | null | null | import wave
import os
import sys
from glob import glob
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--length", type=int, default=30)
parser.add_argument("--offset", type=int, default=15)
args = parser.parse_args()
unit_time_length = args.length
start_time_offset = args.offset
output_dir = os.path.join(".", "output")
os.makedirs(output_dir, exist_ok=True)
downloads_dir = os.path.join(".", "downloads")
target_files = glob(os.path.join(downloads_dir, "*.wav"))
for base_filepath in target_files:
base_filename = os.path.basename(base_filepath)
print(f"Processing for {base_filename}...")
params = None
data_raw = None
with wave.open(base_filepath, "rb") as wave_read:
params = wave_read.getparams()
data_raw = wave_read.readframes(params.nframes)
wave_read.close()
unit_nframes = unit_time_length * params.framerate * params.nchannels * params.sampwidth
start_frame_offset = start_time_offset * params.framerate * params.nchannels * params.sampwidth
file_count = 0
for t in range(0, len(data_raw), start_frame_offset):
file_count += 1
picked_data = data_raw[t:t+unit_nframes]
output_filename = os.path.join(output_dir, f"{base_filename}_{file_count:09}.wav")
with wave.open(output_filename, "wb") as wave_write:
wave_write.setparams((
params.nchannels, params.sampwidth, params.framerate,
len(picked_data), params.comptype, params.compname
))
wave_write.writeframes(picked_data)
wave_write.close()
# os.remove(base_filepath)
print("Done.")
| 33.714286 | 99 | 0.696126 |
0989772d33a2049d0a4c7626cd1168ceea9a9af6 | 726 | py | Python | main.py | Code-Cecilia/DownloadArr | c61940e1e4c2ade9d1da4f2467b72dc8b24abcfd | [
"MIT"
] | 1 | 2022-01-30T15:28:19.000Z | 2022-01-30T15:28:19.000Z | main.py | Code-Cecilia/DownloadArr | c61940e1e4c2ade9d1da4f2467b72dc8b24abcfd | [
"MIT"
] | null | null | null | main.py | Code-Cecilia/DownloadArr | c61940e1e4c2ade9d1da4f2467b72dc8b24abcfd | [
"MIT"
] | null | null | null | import sys
import qbittorrentapi
import json
from ui import UI
print("Connecting to qBittorrent WebUI...")
with open('config.json') as json_file:
config = json.load(json_file)
host = config['host']
username = config['username']
password = config['password']
if not host:
host = input("Enter host: ")
if not username:
username = input("Enter username: ")
if not password:
password = input("Enter password: ")
torrent_client = qbittorrentapi.Client(host=host, username=username, password=password)
try:
torrent_client.auth_log_in()
except qbittorrentapi.LoginFailed as e:
print("Failed to connect:", e)
sys.exit(1)
ui = UI(torrent_client)
ui.run()
| 22 | 88 | 0.673554 |
098a8775723a6e3a315440de72e96cd1befcdb31 | 2,454 | py | Python | ex075A.py | gabrieleliasdev/python-cev | 45390963b5112a982e673f6a6866da422bf9ae6d | [
"MIT"
] | null | null | null | ex075A.py | gabrieleliasdev/python-cev | 45390963b5112a982e673f6a6866da422bf9ae6d | [
"MIT"
] | null | null | null | ex075A.py | gabrieleliasdev/python-cev | 45390963b5112a982e673f6a6866da422bf9ae6d | [
"MIT"
] | null | null | null | from tkinter import *
janela = Tk()
lista = []
texto1 = StringVar()
texto2 = StringVar()
texto3 = StringVar()
texto4 = StringVar()
#--------------------- PROCESSAMENTO DO COMANDO ------
#------------------------------------------------------
#---------------INSERO DOS WIDGETS ---------------
lb1 = Label(janela, text='Digite o primeiro nmero: ')
lb1.grid(row=0,column=0, stick=W)
lb2 = Label(janela, text='Digite o segundo nmero: ')
lb2.grid(row=1,column=0, stick=W)
lb3 = Label(janela, text='Digite o terceiro nmero: ')
lb3.grid(row=2,column=0, stick=W)
lb4 = Label(janela, text='Digite o quarto nmero: ')
lb4.grid(row=3,column=0, stick=W)
et1 = Entry(janela, textvariable=texto1, width=5)
et1.grid(row=0,column=1,sticky=E)
et2 = Entry(janela, textvariable=texto2, width=5)
et2.grid(row=1,column=1,sticky=E)
et3 = Entry(janela, textvariable=texto3, width=5)
et3.grid(row=2,column=1,sticky=E)
et4 = Entry(janela, textvariable=texto4, width=5)
et4.grid(row=3,column=1,sticky=E)
bt1 = Button(janela,text='PROCESSAR', font=('arialblack',11,'bold'),command=click_bt1)
bt1.grid(row=0,column=2,rowspan=4)
txt1 = Text(janela,width=40,height=10,bd=5)
txt1.grid(row=5,column=0,columnspan=3)
#----------------------------------------------------------------------
#------------------- DIMENSIONAMENTO E CENTRALIZAO DA JANELA --------
janela.title('Exercicio - Ex075')
janela_width = 330
janela_height = 260
scream_width = janela.winfo_screenwidth()
scream_height = janela.winfo_screenheight()
cord_x = int((scream_width/2) - (janela_width/2))
cord_y = int((scream_height/2) - (janela_height/2))
janela.geometry(f'{janela_width}x{janela_height}+{cord_x}+{cord_y}')
#---------------------------------------------------------------------
janela.mainloop() | 31.87013 | 86 | 0.601059 |
098be8e0625336c24e5f9477904a571723ffcc00 | 247 | py | Python | src/main/Computer.py | aseruneko/diceforge-ai | 546c4a62410f2b0da4de152f36245b22520304dc | [
"CC0-1.0"
] | 3 | 2020-05-14T03:31:13.000Z | 2020-05-14T04:20:19.000Z | src/main/Computer.py | aseruneko/diceforge-ai | 546c4a62410f2b0da4de152f36245b22520304dc | [
"CC0-1.0"
] | 23 | 2020-05-14T03:24:40.000Z | 2020-05-30T11:57:04.000Z | src/main/Computer.py | aseruneko/diceforge-ai | 546c4a62410f2b0da4de152f36245b22520304dc | [
"CC0-1.0"
] | null | null | null | #!/usr/bin/env python3
# -*- coding:utf-8 -*-
"""
NPC
"""
"""
- Player
-
- AI
"""
__author__ = "aseruneko"
__date__ = "28 May 2020"
from main.Player import Player | 12.35 | 30 | 0.672065 |