hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 11 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 251 | max_stars_repo_name stringlengths 4 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 251 | max_issues_repo_name stringlengths 4 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 251 | max_forks_repo_name stringlengths 4 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.05M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.04M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
c8f400891a861906013fd78e255d4aff2e9b28fa | 5,840 | py | Python | extractexamples.py | afcarl/contra | a82d14342b242cba7a8298d54aa0dabd9d77269d | [
"0BSD"
] | 1 | 2019-04-22T16:56:07.000Z | 2019-04-22T16:56:07.000Z | extractexamples.py | afcarl/contra | a82d14342b242cba7a8298d54aa0dabd9d77269d | [
"0BSD"
] | null | null | null | extractexamples.py | afcarl/contra | a82d14342b242cba7a8298d54aa0dabd9d77269d | [
"0BSD"
] | null | null | null | #!/usr/bin/env python
# Extracts examples of given strings with context in a TAB-separated
# field format from given text documents.
from __future__ import with_statement
import sys
import re
from os import path
options = None
if __name__ == "__main__":
sys.exit(main(sys.argv))
| 37.677419 | 175 | 0.55976 |
c8f61ba84ff26314734e24f05cd833da5e3ee801 | 2,813 | py | Python | pymtl/tools/translation/verilog_bug_test.py | belang/pymtl | 4a96738724b007cbd684753aed0ac3de5b5dbebb | [
"BSD-3-Clause"
] | 206 | 2015-01-05T21:53:56.000Z | 2022-03-14T08:04:49.000Z | pymtl/tools/translation/verilog_bug_test.py | belang/pymtl | 4a96738724b007cbd684753aed0ac3de5b5dbebb | [
"BSD-3-Clause"
] | 84 | 2015-01-25T19:57:33.000Z | 2021-05-11T15:46:56.000Z | pymtl/tools/translation/verilog_bug_test.py | belang/pymtl | 4a96738724b007cbd684753aed0ac3de5b5dbebb | [
"BSD-3-Clause"
] | 99 | 2015-02-17T17:43:44.000Z | 2022-02-14T17:58:18.000Z | #=======================================================================
# verilog_bug_test.py
#=======================================================================
import pytest
from pymtl import *
from exceptions import VerilatorCompileError
pytestmark = requires_verilator
#-----------------------------------------------------------------------
# Point BitStruct
#-----------------------------------------------------------------------
#-----------------------------------------------------------------------
# setup_sim
#-----------------------------------------------------------------------
def setup_sim( model ):
model = TranslationTool( model )
model.elaborate()
sim = SimulationTool( model )
return model, sim
#-----------------------------------------------------------------------
# test_bitstruct_tick_reg
#-----------------------------------------------------------------------
#-----------------------------------------------------------------------
# test_verilator_compile_error
#-----------------------------------------------------------------------
def test_verilator_compile_error( ):
with pytest.raises( VerilatorCompileError ):
model = TestVerilatorCompileError()
model, sim = setup_sim( model )
| 29 | 72 | 0.460363 |
c8f667d55a6083981558407ab139318c270d5ca3 | 436 | py | Python | library/TraverseDirectory-M2.py | remytanx/python3-created-in-github | 83b3dd0f36da6fc4df7c1cc37cac12f178f985a3 | [
"MIT"
] | null | null | null | library/TraverseDirectory-M2.py | remytanx/python3-created-in-github | 83b3dd0f36da6fc4df7c1cc37cac12f178f985a3 | [
"MIT"
] | null | null | null | library/TraverseDirectory-M2.py | remytanx/python3-created-in-github | 83b3dd0f36da6fc4df7c1cc37cac12f178f985a3 | [
"MIT"
] | null | null | null | import os
# Get the list of all files with a specific extension
# In this example, we will take a path of a directory and try to
# list all the files, with a specific extension .py here,
# in the directory and its sub-directories recursively.
path = r'C:\Users\10900225\Documents\Witch\BTX\Workspaces\Library'
for root, dirs, files in os.walk(path):
for file in files:
if(file.endswith(".py")):
print(os.path.join(root,file)) | 33.538462 | 66 | 0.733945 |
c8f71840564fdc1ff2e1787b21b4d5173407d801 | 1,509 | py | Python | Modules/carlosma7/wizard/create_appointment.py | Carlosma7/Odoo | c234fcc18d15d4d8369e237286bee610fd76ceee | [
"CC0-1.0"
] | null | null | null | Modules/carlosma7/wizard/create_appointment.py | Carlosma7/Odoo | c234fcc18d15d4d8369e237286bee610fd76ceee | [
"CC0-1.0"
] | null | null | null | Modules/carlosma7/wizard/create_appointment.py | Carlosma7/Odoo | c234fcc18d15d4d8369e237286bee610fd76ceee | [
"CC0-1.0"
] | null | null | null | #-*- coding: utf-8-*-
from odoo import api, fields, models, _
# Wizard class | 29.019231 | 97 | 0.686547 |
c8f838e818d81e237d9d5d8fa11595a921a6fae3 | 4,731 | py | Python | groups.py | davidmehren/udm_group_matrix | ae71feef4bf299588aa473c95e9073c7d2f5f23e | [
"MIT"
] | null | null | null | groups.py | davidmehren/udm_group_matrix | ae71feef4bf299588aa473c95e9073c7d2f5f23e | [
"MIT"
] | null | null | null | groups.py | davidmehren/udm_group_matrix | ae71feef4bf299588aa473c95e9073c7d2f5f23e | [
"MIT"
] | 1 | 2019-12-06T14:59:39.000Z | 2019-12-06T14:59:39.000Z | #!/bin/env python3
import re
from typing import List
import numpy as np
import matplotlib.pyplot as plt
filtered_users = ["join-backup", "join-slave", "ucs-sso"]
filtered_groups = ["computers", "dc backup hosts", "dc slave hosts"]
def read_groupdump():
_group_list = LDAPGroupList()
with open("groupdump.txt", "r") as file:
current_group = None
for line in file:
if line == "\n":
continue
if line.startswith("DN"):
current_group = LDAPGroup(re.findall(r"cn=(.*?),", line)[0])
_group_list.add(current_group)
# print(current_user)
if current_group.name.startswith("dns-") or current_group.name.startswith(
"ucs-") or current_group.name.startswith("join-"):
continue
if line.startswith(" users"):
user = LDAPUser(re.findall(r"uid=(.*?),", line)[0])
# print(" ", group)
current_group.add_member(user)
if line.startswith(" nestedGroup"):
subgroup = re.findall(r"cn=(.*?),", line)[0]
# print(" ", group)
current_group.add_subgroup(subgroup)
if line.startswith(" sambaRID:"):
rid = re.findall(r"([0-9]{1,4})", line)[0]
current_group.samba_rid = int(rid)
return _group_list
def paint_matrix(groups: LDAPGroupList):
user_list = sorted(groups.get_user_list(), reverse=True)
x_count = len(groups.content)
y_count = len(user_list)
matrix = np.zeros((x_count, y_count))
for g_index, group in enumerate(groups.content):
for user in group.members:
matrix[g_index][user_list.index(user)] = 1
plt.pcolor(matrix.T, edgecolors='k', cmap="Greys", vmin=0, vmax=1)
x_locations = [x + 0.5 for x in range(x_count)]
y_locations = [x + 0.5 for x in range(y_count)]
plt.xticks(x_locations, [group.name for group in groups.content], rotation=45, fontsize=4, ha="right")
plt.yticks(y_locations, [user.name for user in user_list], fontsize=2)
plt.tight_layout()
plt.savefig("groups.png", dpi=600)
if __name__ == '__main__':
groups = read_groupdump()
for group in groups.content:
group.parse_subgroups(groups)
groups.tidy()
paint_matrix(groups)
| 31.125 | 106 | 0.581484 |
c8f87a26e0ea3211d6cafee5a76cf221fb9382c8 | 107,098 | py | Python | src/dbobjects.py | 211tbc/synthesis | 55b4dcb85b7a2ed5fbc46b1740c8ca0ab80248a4 | [
"Unlicense"
] | null | null | null | src/dbobjects.py | 211tbc/synthesis | 55b4dcb85b7a2ed5fbc46b1740c8ca0ab80248a4 | [
"Unlicense"
] | 7 | 2016-08-12T15:12:43.000Z | 2020-06-07T03:19:13.000Z | src/dbobjects.py | 211tbc/synthesis | 55b4dcb85b7a2ed5fbc46b1740c8ca0ab80248a4 | [
"Unlicense"
] | null | null | null | from sqlalchemy import create_engine, Column, Integer, BigInteger, String, Boolean, MetaData, ForeignKey
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker, relationship
from sqlalchemy.types import DateTime, Date, Interval
from sqlalchemy.pool import NullPool
from .conf import settings
from logging import Logger
print("loaded dbobjects module")
#class DeduplicationLink(DB.Base, MapBase):
def test():
from . import postgresutils
utils = postgresutils.Utils()
utils.blank_database()
print("instantiating db")
db = DB()
session = db.Session()
db.Base.metadata.create_all(db.pg_db_engine)
new = Source(source_id_id_num = 1, source_name='Orange County Corrections')
session.add(new)
session.commit()
print("done")
if __name__ == "__main__":
import sys
sys.exit(test())
#The MIT License
#
#Copyright (c) 2011, Alexandria Consulting LLC
#
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#
#The above copyright notice and this permission notice shall be included in
#all copies or substantial portions of the Software.
#
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
#THE SOFTWARE
| 49.789865 | 176 | 0.757755 |
c8f8cdcd902d01952d5a7b8a680f7b6b5e1cd1d5 | 3,154 | py | Python | ImageAnalysis/ImageAnalysis/python/references/bead-designer-test/beads.py | mikebourbeauart/PerlerPrinter | 8c5023de6bb9b3cbe2bc28c1c823030dfd708db4 | [
"MIT"
] | null | null | null | ImageAnalysis/ImageAnalysis/python/references/bead-designer-test/beads.py | mikebourbeauart/PerlerPrinter | 8c5023de6bb9b3cbe2bc28c1c823030dfd708db4 | [
"MIT"
] | 2 | 2021-09-07T23:43:53.000Z | 2022-01-13T00:39:55.000Z | ImageAnalysis/ImageAnalysis/python/references/bead-designer-test/beads.py | mikebourbeauart/PerlerPrinter | 8c5023de6bb9b3cbe2bc28c1c823030dfd708db4 | [
"MIT"
] | 1 | 2019-10-21T17:12:07.000Z | 2019-10-21T17:12:07.000Z | import Image
from ImageColor import getrgb
from reportlab.pdfgen import canvas
from reportlab.lib.units import mm
from reportlab.lib.pagesizes import A4
import uuid
BEAD_RADIUS = 1.75*mm
BEAD_THICKNESS = 1*mm
BOARD_SPACING = 4.85*mm
BOARD_BORDER = 4*mm
#A4 60x43 = 2580
#A3 86x60 = 5160
#A2 86x120 = 10,320
#MARQUEE A4+A4 = 120x43
colours = beadColours()
#read image file header
try:
im = Image.open("images\\pikachu.gif")
image_width = im.size[0]
image_height = im.size[1]
image_format = im.format
except IOError:
print "Error opening file"
out_file = 'result%s.pdf' % uuid.uuid1()
pdf = canvas.Canvas(out_file, pagesize=A4)
##work out the best orientation
a4_width, a4_height = A4
#if (width - (BOARD_BORDER * 2)) < (image_width * BOARD_SPACING):
#width_temp = width
#width = height
#height = width_temp
#for now, just use generated page size
width = (image_width * BOARD_SPACING) + (BOARD_BORDER * 2)
height = (image_height * BOARD_SPACING) + (BOARD_BORDER * 2)
if width < a4_width and width < a4_height:
height = a4_height
pdf.setPageSize((width, height))
im = im.convert('RGB')
data = list(im.getdata())
list_pos = 0
for y in range(0, im.size[1]):
pos_y = height - BOARD_BORDER - (y * BOARD_SPACING)
for x in range(0, im.size[0]):
r = data[list_pos][0]
g = data[list_pos][1]
b = data[list_pos][2]
r, g, b = colours.bestMatch(r,g,b)
pos_x = BOARD_BORDER + (x * BOARD_SPACING)
pdf.setLineWidth(BEAD_THICKNESS)
pdf.setStrokeColorRGB(float(r)/255,float(g)/255,float(b)/255)
pdf.circle(pos_x, pos_y, BEAD_RADIUS, stroke=1, fill=0)
#for light colour we need a thin black border
if r + g + b >= 750:
pdf.setLineWidth(0.25*mm)
pdf.setStrokeColorRGB(0,0,0)
pdf.circle(pos_x, pos_y, BEAD_RADIUS + (BEAD_THICKNESS / 2), stroke=1, fill=0)
pdf.circle(pos_x, pos_y, BEAD_RADIUS - (BEAD_THICKNESS / 2), stroke=1, fill=0)
list_pos += 1
pdf.showPage()
pdf.save()
| 28.414414 | 84 | 0.616677 |
c8f8f117d6dace7d4b6c578a60f491f9e6393f0d | 1,836 | py | Python | common_tools/report_dialog.py | jamiecook/AequilibraE | b1013d59cbeaf6fc4e1a944cf31f20460a2a4156 | [
"MIT"
] | null | null | null | common_tools/report_dialog.py | jamiecook/AequilibraE | b1013d59cbeaf6fc4e1a944cf31f20460a2a4156 | [
"MIT"
] | null | null | null | common_tools/report_dialog.py | jamiecook/AequilibraE | b1013d59cbeaf6fc4e1a944cf31f20460a2a4156 | [
"MIT"
] | null | null | null | """
-----------------------------------------------------------------------------------------------------------
Package: AequilibraE
Name: Report dialog
Purpose: Dialog for showing the report from algorithm runs
Original Author: Pedro Camargo (c@margo.co)
Contributors:
Last edited by: Pedro Camargo
Website: www.AequilibraE.com
Repository: https://github.com/AequilibraE/AequilibraE
Created: 2014-03-19
Updated: 30/09/2016
Copyright: (c) AequilibraE authors
Licence: See LICENSE.TXT
-----------------------------------------------------------------------------------------------------------
"""
from qgis.core import *
from PyQt4 import QtGui, uic
from PyQt4.QtGui import *
import sys
import os
from auxiliary_functions import standard_path
FORM_CLASS, _ = uic.loadUiType(os.path.join(os.path.dirname(__file__), 'forms/ui_report.ui'))
| 30.6 | 108 | 0.56427 |
c8f9b47386e455dd9e70d1f591e4c141b1b8e828 | 21,580 | py | Python | gui/robot_data_visualizer.py | wh1210/robot-data-visualizer | ebb59687233a8d09c8ed327c66ed1d69c4623136 | [
"MIT"
] | null | null | null | gui/robot_data_visualizer.py | wh1210/robot-data-visualizer | ebb59687233a8d09c8ed327c66ed1d69c4623136 | [
"MIT"
] | 13 | 2018-11-20T22:55:39.000Z | 2022-03-11T23:36:18.000Z | gui/robot_data_visualizer.py | wh1210/robot-data-visualizer | ebb59687233a8d09c8ed327c66ed1d69c4623136 | [
"MIT"
] | 2 | 2018-11-09T01:48:07.000Z | 2018-12-29T23:10:53.000Z | import os
import sys
sys.path.append('.')
sys.path.append('..')
import warnings
warnings.filterwarnings("ignore")
from datetime import datetime
import matplotlib
matplotlib.use("TkAgg")
import matplotlib.lines as lines
import matplotlib.image as mpimg
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
from matplotlib.figure import Figure
import tkinter as tk
from tools.get_dates_umich import get_dates_umich
from tools.staticmap_for_gps import map_for_gps
from tools.data_manager import DataManager
from tools.view_lidar import hokuyo_plot
from tools.view_lidar import threshold_lidar_pts
if __name__ == '__main__':
app = MainWindow(None)
app.mainloop()
| 34.091627 | 117 | 0.580445 |
c8fa3bb594a67f398ad5e9f8e305ca9da2fda5ed | 1,780 | py | Python | day10/day10.py | BroderickCarlin/AdventOfCode | 52d12d16f3d291a51984e6d85dbe97e604abc005 | [
"MIT"
] | null | null | null | day10/day10.py | BroderickCarlin/AdventOfCode | 52d12d16f3d291a51984e6d85dbe97e604abc005 | [
"MIT"
] | null | null | null | day10/day10.py | BroderickCarlin/AdventOfCode | 52d12d16f3d291a51984e6d85dbe97e604abc005 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
lengths = "187,254,0,81,169,219,1,190,19,102,255,56,46,32,2,216"
suffix = [17, 31, 73, 47, 23]
num_rounds = 64
if __name__ == "__main__":
print("1: {}".format(puzzle1()))
print("2: {}".format(puzzle2()))
| 24.383562 | 64 | 0.455056 |
c8fc7cc35ebc665797970c840fc5d039b1988b5c | 1,914 | py | Python | 17tensorflow/tf2/2my_model.py | cheerfulwang/python-tutorial | d0f7348e1da4ff954e3add66e1aae55d599283ee | [
"Apache-2.0"
] | 2 | 2021-01-04T10:44:44.000Z | 2022-02-13T07:53:41.000Z | 17tensorflow/tf2/2my_model.py | zm79287/python-tutorial | d0f7348e1da4ff954e3add66e1aae55d599283ee | [
"Apache-2.0"
] | null | null | null | 17tensorflow/tf2/2my_model.py | zm79287/python-tutorial | d0f7348e1da4ff954e3add66e1aae55d599283ee | [
"Apache-2.0"
] | 2 | 2020-11-23T08:58:51.000Z | 2022-02-13T07:53:42.000Z | # -*- coding: utf-8 -*-
"""
@author:XuMingxuming624@qq.com)
@description:
"""
import tensorflow as tf
import tensorflow.keras as keras
import tensorflow.keras.layers as layers
#
num_words = 2000
num_tags = 12
num_departments = 4
#
body_input = keras.Input(shape=(None,), name='body')
title_input = keras.Input(shape=(None,), name='title')
tag_input = keras.Input(shape=(num_tags,), name='tag')
#
body_feat = layers.Embedding(num_words, 64)(body_input)
title_feat = layers.Embedding(num_words, 64)(title_input)
#
body_feat = layers.LSTM(32)(body_feat)
title_feat = layers.LSTM(128)(title_feat)
features = layers.concatenate([title_feat,body_feat, tag_input])
#
priority_pred = layers.Dense(1, activation='sigmoid', name='priority')(features)
department_pred = layers.Dense(num_departments, activation='softmax', name='department')(features)
#
model = keras.Model(inputs=[body_input, title_input, tag_input],
outputs=[priority_pred, department_pred])
model.summary()
keras.utils.plot_model(model, 'multi_model.png', show_shapes=True)
model.compile(optimizer=keras.optimizers.RMSprop(1e-3),
loss={'priority': 'binary_crossentropy',
'department': 'categorical_crossentropy'},
loss_weights=[1., 0.2])
import numpy as np
#
title_data = np.random.randint(num_words, size=(1280, 10))
body_data = np.random.randint(num_words, size=(1280, 100))
tag_data = np.random.randint(2, size=(1280, num_tags)).astype('float32')
#
priority_label = np.random.random(size=(1280, 1))
department_label = np.random.randint(2, size=(1280, num_departments))
#
history = model.fit(
{'title': title_data, 'body':body_data, 'tag':tag_data},
{'priority':priority_label, 'department':department_label},
batch_size=32,
epochs=5
)
model.save('model_save.h5')
del model
model = keras.models.load_model('model_save.h5') | 29.446154 | 98 | 0.719436 |
c8ff0f334dbba342f0a95112a0a41bb1cc0f4aaf | 3,937 | py | Python | src/genie/libs/parser/nxos/tests/ShowIpOspf/cli/equal/golden_output_2_expected.py | balmasea/genieparser | d1e71a96dfb081e0a8591707b9d4872decd5d9d3 | [
"Apache-2.0"
] | 204 | 2018-06-27T00:55:27.000Z | 2022-03-06T21:12:18.000Z | src/genie/libs/parser/nxos/tests/ShowIpOspf/cli/equal/golden_output_2_expected.py | balmasea/genieparser | d1e71a96dfb081e0a8591707b9d4872decd5d9d3 | [
"Apache-2.0"
] | 468 | 2018-06-19T00:33:18.000Z | 2022-03-31T23:23:35.000Z | src/genie/libs/parser/nxos/tests/ShowIpOspf/cli/equal/golden_output_2_expected.py | balmasea/genieparser | d1e71a96dfb081e0a8591707b9d4872decd5d9d3 | [
"Apache-2.0"
] | 309 | 2019-01-16T20:21:07.000Z | 2022-03-30T12:56:41.000Z |
expected_output = {
'vrf':
{'default':
{'address_family':
{'ipv4':
{'instance':
{'1':
{'areas':
{'0.0.0.0':
{'area_id': '0.0.0.0',
'area_type': 'normal',
'authentication': 'none',
'existed': '1w5d',
'numbers':
{'active_interfaces': 4,
'interfaces': 6,
'loopback_interfaces': 4,
'passive_interfaces': 0},
'statistics':
{'area_scope_lsa_cksum_sum': '1',
'area_scope_lsa_count': 1,
'spf_last_run_time': 0.000447,
'spf_runs_count': 2}}},
'auto_cost':
{'bandwidth_unit': 'mbps',
'enable': False,
'reference_bandwidth': 40000},
'enable': False,
'discard_route_external': True,
'discard_route_internal': True,
'graceful_restart':
{'ietf':
{'enable': True,
'exist_status': 'none',
'restart_interval': 60,
'state': 'Inactive',
'type': 'ietf'}},
'instance': 1,
'nsr':
{'enable': True},
'numbers':
{'active_areas':
{'normal': 1,
'nssa': 0,
'stub': 0,
'total': 1},
'areas':
{'normal': 1,
'nssa': 0,
'stub': 0,
'total': 1}},
'opaque_lsa_enable': True,
'preference':
{'single_value':
{'all': 110}},
'router_id': '10.100.2.2',
'single_tos_routes_enable': True,
'spf_control':
{'paths': 8,
'throttle':
{'lsa':
{'group_pacing': 10,
'hold': 5000,
'maximum': 5000,
'minimum': 1000,
'numbers':
{'external_lsas':
{'checksum': '0',
'total': 0},
'opaque_as_lsas':
{'checksum': '0',
'total': 0}},
'start': 0.0},
'spf':
{'hold': 1000,
'maximum': 5000,
'start': 200}}}}}}}}}}
| 49.2125 | 73 | 0.216916 |
c8ffacba13563fc63e94eff5bc851a3e548d81b6 | 4,566 | py | Python | rain/cloud/system/system.py | SuPerCxyz/rain | 578b6d125f535414d3ea3fcfee4015b70fed560c | [
"Apache-2.0"
] | 2 | 2018-12-20T01:38:56.000Z | 2018-12-29T14:49:36.000Z | rain/cloud/system/system.py | SuPerCxyz/rain | 578b6d125f535414d3ea3fcfee4015b70fed560c | [
"Apache-2.0"
] | null | null | null | rain/cloud/system/system.py | SuPerCxyz/rain | 578b6d125f535414d3ea3fcfee4015b70fed560c | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding:utf-8 -*-
import json
import platform
import time
from getdevinfo import getdevinfo
import psutil
from rain.common import rain_log
from rain.common import utils
from rain.common.utils import async_call
logger = rain_log.logg(__name__)
def get_memcache_info(self):
"""Collect memory and swap information and return dictionary type.
"""
memcache_info = psutil.virtual_memory()
memcache_total = memcache_info.total / 1024 ** 2
memcache_used = memcache_info.used / 1024 ** 2
memcache_available = memcache_info.available / 1024 ** 2
memcache_buff = memcache_info.cached / 1024 ** 2
memcache_cached = memcache_info.cached / 1024 ** 2
memcache_percent = memcache_info.percent
memcache_info_dict = {
'memcache_total_MB': memcache_total,
'memcache_used_MB': memcache_used,
'memcache_available_MB': memcache_available,
'memcache_buff_MB': memcache_buff,
'memcache_cached_MB': memcache_cached,
'memcache_percent': memcache_percent
}
logger.info('Collect memory related information.')
return memcache_info_dict
def _get_user(self):
"""Collect login user information.
"""
user_info_list = []
user_list = psutil.users()
for user in user_list:
user_dict = {}
user_dict['name'] = user.name
user_dict['host'] = user.host
user_dict['conn_time'] = utils.str_time(user.started)
user_info_list.append(user_dict)
return user_info_list
def get_system_info(self):
"""Collect system information.
"""
system_info = {}
system_info['python_version'] = platform.python_version()
system_info['hostname'] = platform.node()
system_info['system_info'] = platform.platform()
system_info['boot_time'] = utils.str_time(psutil.boot_time())
system_info['time'] = time.asctime(time.localtime(time.time()))
system_info['user'] = self._get_user()
logger.info('Collect user login information.')
return system_info
| 33.328467 | 75 | 0.592641 |
c8ffe69de767e55075d5f9e090d7f69a2c93dd80 | 7,517 | py | Python | models.py | rudrasohan/Trust-Region-Policy-Optimization | bbaadf37aa3ea4ccc35907038eea4add9e5e050c | [
"MIT"
] | 3 | 2019-11-16T15:40:14.000Z | 2021-12-28T14:26:36.000Z | models.py | rudrasohan/Trust-Region-Policy-Optimization | bbaadf37aa3ea4ccc35907038eea4add9e5e050c | [
"MIT"
] | null | null | null | models.py | rudrasohan/Trust-Region-Policy-Optimization | bbaadf37aa3ea4ccc35907038eea4add9e5e050c | [
"MIT"
] | null | null | null | """Model Definations for trpo."""
import gym
import numpy as np
import torch
import time
import scipy.optimize
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from distributions import DiagonalGaussian
from helpers import get_flat_params, set_flat_params, get_flat_grads
#from helpers import sample_trajectories, compute_advantage_returns, get_flat_params
def test_policy_value():
env = gym.make("MountainCarContinuous-v0")
policy = GaussianMLPPolicy(env.observation_space, env.action_space, use_std_net=True)
paths = sample_trajectories(env, policy, 1000)
print(len(paths["rewards"]))
baseline = MLPBaseline(env.observation_space, env.action_space)
compute_advantage_returns(paths, baseline, 0.9, 0.1)
print(paths.keys())
baseline.update(paths)
print(paths['dist'].keys())
flat_params_mean = get_flat_params(policy.mean_network.parameters())
flat_params_std = get_flat_params(policy.std_network.parameters())
print(flat_params)
#test_policy_value() | 33.261062 | 119 | 0.600905 |
7400b5e2ffa5344609e346cb8a0ec6ea5d60b0b6 | 148 | py | Python | sample_1/admin.py | JordanEC/django-rest-and-angular | 571eb2a7d966c2b7f1f520a764420207387709cd | [
"MIT"
] | null | null | null | sample_1/admin.py | JordanEC/django-rest-and-angular | 571eb2a7d966c2b7f1f520a764420207387709cd | [
"MIT"
] | null | null | null | sample_1/admin.py | JordanEC/django-rest-and-angular | 571eb2a7d966c2b7f1f520a764420207387709cd | [
"MIT"
] | null | null | null | from django.contrib import admin
from sample_1.models import *
# Register your models here.
admin.site.register(Author)
admin.site.register(Book)
| 18.5 | 32 | 0.797297 |
740438f708cbfe346a44823a28bc4994e0b1022b | 196 | py | Python | uuid1/models.py | charlesDavid009/Uuid | 7553843c0112e7f0e248cd5692eccca72553e720 | [
"MIT"
] | 1 | 2021-05-24T18:52:53.000Z | 2021-05-24T18:52:53.000Z | uuid1/models.py | charlesDavid009/Uuid | 7553843c0112e7f0e248cd5692eccca72553e720 | [
"MIT"
] | null | null | null | uuid1/models.py | charlesDavid009/Uuid | 7553843c0112e7f0e248cd5692eccca72553e720 | [
"MIT"
] | null | null | null | from django.db import models
import uuid
# Create your models here.
| 19.6 | 53 | 0.760204 |
74043e623f8ec052206750dba77f648adab74816 | 1,660 | py | Python | comedy-org.py | qedpi/file-organizer | 07bdc6fd8e752aae03078529dfefe4838f4f4c4e | [
"MIT"
] | 2 | 2020-11-23T16:32:11.000Z | 2021-05-14T00:35:16.000Z | comedy-org.py | qedpi/file-organizer | 07bdc6fd8e752aae03078529dfefe4838f4f4c4e | [
"MIT"
] | null | null | null | comedy-org.py | qedpi/file-organizer | 07bdc6fd8e752aae03078529dfefe4838f4f4c4e | [
"MIT"
] | null | null | null | import os
from shutil import move, rmtree
from itertools import chain
from genres import genre_of, DOWNLOAD_DIR, DST_DIRS, VIDEO_EXTENSIONS
print(genre_of)
print(f'moving files from {DOWNLOAD_DIR}: \n'
# f'with keywords: {COMEDY_TAGS} \n'
# f'with extensions: {VIDEO_EXTENSIONS} \n'
)
files_moved = 0
for file_name in os.listdir(DOWNLOAD_DIR):
name_parts = file_name.split('.')
# check single & double word combos todo: generalize to more than 2
two_words = ('.'.join(name_parts[i:i + 2]) for i in range(len(name_parts) - 1))
file_path = os.path.join(DOWNLOAD_DIR, file_name)
if os.path.isfile(file_path): # skip files
continue
# print(file_name, os.access(file_path, os.W_OK)) # todo: doesn't check if it's locked!
# move files to corresponding dir
try:
# print(f'Try {file_name}')
# with open(os.path.join(DOWNLOAD_DIR, file_name), 'r') as f:
if any((keyword := part) in genre_of for part in chain(name_parts, two_words)):
dst_dir = DST_DIRS[genre_of[keyword]]
# move video file
for maybe_vid in (name for name in os.listdir(file_path)):
if any(maybe_vid.endswith(ext) for ext in VIDEO_EXTENSIONS):
move(os.path.join(file_path, maybe_vid), dst_dir)
print(f'moved {maybe_vid} to {dst_dir}')
# delete empty file
rmtree(file_path)
files_moved += 1
# now extract the vid & delete dir
except PermissionError:
print('permission denied')
continue # skip this file if locked (eg by qTorrent)
print(f'{files_moved = }') | 37.727273 | 92 | 0.638554 |
7405313149ad1d453f1faa1ff9ea0b0aec012d46 | 3,572 | py | Python | keeper/v2api/projects.py | lsst-sqre/ltd-keeper | c658bcce726764e7416a8a386b418e83912b0f32 | [
"Apache-2.0",
"MIT"
] | 5 | 2016-05-16T18:46:26.000Z | 2019-07-08T15:16:41.000Z | keeper/v2api/projects.py | lsst-sqre/ltd-keeper | c658bcce726764e7416a8a386b418e83912b0f32 | [
"Apache-2.0",
"MIT"
] | 46 | 2016-02-18T16:54:36.000Z | 2022-03-25T19:43:45.000Z | keeper/v2api/projects.py | lsst-sqre/ltd-keeper | c658bcce726764e7416a8a386b418e83912b0f32 | [
"Apache-2.0",
"MIT"
] | 4 | 2016-08-20T23:10:07.000Z | 2022-03-25T19:52:09.000Z | """Handlers for project-related APIs."""
from __future__ import annotations
from typing import Dict, Tuple
from flask import request
from flask_accept import accept_fallback
from keeper.auth import token_auth
from keeper.logutils import log_route
from keeper.models import Organization, Product, db
from keeper.services.createproduct import create_product
from keeper.services.updateproduct import update_product
from keeper.taskrunner import launch_tasks
from keeper.v2api import v2api
from ._models import (
ProjectPatchRequest,
ProjectPostRequest,
ProjectResponse,
ProjectsResponse,
)
from ._urls import url_for_project
__all__ = ["get_projects", "get_project", "create_project", "update_project"]
| 28.349206 | 77 | 0.676932 |
7405685566287cf4e859fe85e98cb0c021c50b86 | 2,237 | py | Python | plugins/markdown_extensions/katex.py | raabrp/rraabblog | a1d47ede918f4838ac3bbcff9ef4e7c67f851c32 | [
"MIT"
] | null | null | null | plugins/markdown_extensions/katex.py | raabrp/rraabblog | a1d47ede918f4838ac3bbcff9ef4e7c67f851c32 | [
"MIT"
] | null | null | null | plugins/markdown_extensions/katex.py | raabrp/rraabblog | a1d47ede918f4838ac3bbcff9ef4e7c67f851c32 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Allow server-side KaTeX rendering for Markdown through node.js
The markdown extension adds regex patterns for `$` and `$$` in the source `.md`
file, and applies KaTeX to the intermediate text with a `python-bond` call to
node.js
requires
* node
* npm
* katex (npm install katex)
* python-bond (pip3 install --user python-bond)
KaTeX: https://github.com/Khan/KaTeX
"""
import markdown
from markdown.util import etree
import bond
JS = bond.make_bond('JavaScript')
JS.eval_block(
r'''
katex = require('katex');
function render(s, is_block) {
return katex.renderToString(s, {
displayMode: is_block,
throwOnError: false
});
}
'''
)
katex = JS.callable('render')
memoise = {}
###############################################################################
| 24.053763 | 97 | 0.565042 |
7407dcd338f0c898023c04aaa216c45c15fae02b | 247 | py | Python | 1-Python-Programming-Basics (Sep 2020)/Course-Exercises-and-Exams/05_While-Loop/00.Book-Exercises-7.1-Complex-Loops-02-Numbers-N-to-1.py | karolinanikolova/SoftUni-Software-Engineering | 7891924956598b11a1e30e2c220457c85c40f064 | [
"MIT"
] | null | null | null | 1-Python-Programming-Basics (Sep 2020)/Course-Exercises-and-Exams/05_While-Loop/00.Book-Exercises-7.1-Complex-Loops-02-Numbers-N-to-1.py | karolinanikolova/SoftUni-Software-Engineering | 7891924956598b11a1e30e2c220457c85c40f064 | [
"MIT"
] | null | null | null | 1-Python-Programming-Basics (Sep 2020)/Course-Exercises-and-Exams/05_While-Loop/00.Book-Exercises-7.1-Complex-Loops-02-Numbers-N-to-1.py | karolinanikolova/SoftUni-Software-Engineering | 7891924956598b11a1e30e2c220457c85c40f064 | [
"MIT"
] | null | null | null | # N 1
# , n 1 ( -1).
# , n = 100, : 100, 99, 98, , 3, 2, 1.
n = int(input())
for i in range(n, 0, -1):
print(i) | 30.875 | 85 | 0.631579 |
7408452dfdbed6f56d0e2243de45d1e90b286cdf | 1,490 | py | Python | simpleclassroom/urls.py | cbetheridge/simpleclassroom | 9e99262ffdb4efc0e27566855866dfc26244bf26 | [
"MIT"
] | null | null | null | simpleclassroom/urls.py | cbetheridge/simpleclassroom | 9e99262ffdb4efc0e27566855866dfc26244bf26 | [
"MIT"
] | null | null | null | simpleclassroom/urls.py | cbetheridge/simpleclassroom | 9e99262ffdb4efc0e27566855866dfc26244bf26 | [
"MIT"
] | null | null | null | """simpleclassroom URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.9/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
from django.contrib import admin
from views import views
from views import io
urlpatterns = [
url(r'^$', views.display_classrooms, name='index'),
url(r'^classrooms/', views.display_classrooms, name='classrooms'),
url(r'^student_list/', views.display_students, name='student list'),
url(r'^student_details/', views.display_student_details, name='student view'),
url(r'^io/add_class/', io.add_classroom, name='add class'),
url(r'^io/del_class/', io.delete_classroom, name='delete class'),
url(r'^io/add_student/', io.add_student, name='add student'),
url(r'^io/del_student/', io.delete_student, name='delete student'),
url(r'^io/enroll/', io.enroll_student, name='enroll student'),
url(r'^io/unenroll/', io.unenroll_student, name='unenroll student'),
url(r'^admin/', admin.site.urls),
]
| 42.571429 | 80 | 0.713423 |
cd9d087613da3991818c9538bda9aacfcb7b2302 | 714 | py | Python | Mechanize/checkWords.py | rpvnwnkl/DailyWriter | 7934d636219e46b9875f31e327bf52993e15e517 | [
"MIT"
] | null | null | null | Mechanize/checkWords.py | rpvnwnkl/DailyWriter | 7934d636219e46b9875f31e327bf52993e15e517 | [
"MIT"
] | null | null | null | Mechanize/checkWords.py | rpvnwnkl/DailyWriter | 7934d636219e46b9875f31e327bf52993e15e517 | [
"MIT"
] | null | null | null | #!usr/bin/env python
import sys, logging
import re
import mechanize
logger = logging.getLogger('mechanize')
logger.addHandler(logging.StreamHandler(sys.stdout))
logger.setLevel(logging.DEBUG)
br = mechanize.Browser()
br.set_debug_http(True)
br.set_debug_responses(True)
br.set_debug_redirects(True)
br.open("https://750words.com/auth")
email = open('email.txt', 'r').read()
password = open('password.txt', 'r').read()
print email, password
br.select_form(nr=0)
br['person[email_address]'] = 'rpvnwnkl@gmail.com'
br['person[password]'] = 'password'
response2 = br.submit()
print br.title
print response2.geturl()
print response2.info()
print response2.read()
print br.select_form(nr=0)
print br['entry[body]']
| 23.032258 | 52 | 0.752101 |
cd9ec9af338573f552a9119ee09d53bff7f7cebd | 4,939 | py | Python | simplereg/data_writer.py | gift-surg/SimpleReg | 9d9a774f5b7823c2256844c9d0260395604fb396 | [
"BSD-3-Clause"
] | 18 | 2017-11-10T15:09:41.000Z | 2021-01-12T07:48:46.000Z | simplereg/data_writer.py | gift-surg/SimpleReg | 9d9a774f5b7823c2256844c9d0260395604fb396 | [
"BSD-3-Clause"
] | null | null | null | simplereg/data_writer.py | gift-surg/SimpleReg | 9d9a774f5b7823c2256844c9d0260395604fb396 | [
"BSD-3-Clause"
] | 3 | 2019-03-20T14:13:03.000Z | 2020-01-15T01:32:51.000Z | # \file DataWriter.py
# \brief Class to read data
#
# \author Michael Ebner (michael.ebner.14@ucl.ac.uk)
# \date June 2018
import os
import sys
import numpy as np
import nibabel as nib
import SimpleITK as sitk
import pysitk.python_helper as ph
import pysitk.simple_itk_helper as sitkh
from simplereg.definitions import ALLOWED_IMAGES
from simplereg.definitions import ALLOWED_LANDMARKS
from simplereg.definitions import ALLOWED_TRANSFORMS
from simplereg.definitions import ALLOWED_TRANSFORMS_DISPLACEMENTS
| 40.154472 | 78 | 0.578255 |
cd9f005c2266883ac0727dd4f11b65c0cc61acbf | 3,881 | py | Python | configman/datetime_util.py | peterbe/configman | 724d80b25a0ebbb2e75ad69e92a6611494cd68b4 | [
"BSD-3-Clause"
] | null | null | null | configman/datetime_util.py | peterbe/configman | 724d80b25a0ebbb2e75ad69e92a6611494cd68b4 | [
"BSD-3-Clause"
] | null | null | null | configman/datetime_util.py | peterbe/configman | 724d80b25a0ebbb2e75ad69e92a6611494cd68b4 | [
"BSD-3-Clause"
] | null | null | null | # ***** BEGIN LICENSE BLOCK *****
# Version: MPL 1.1/GPL 2.0/LGPL 2.1
#
# The contents of this file are subject to the Mozilla Public License Version
# 1.1 (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
# http://www.mozilla.org/MPL/
#
# Software distributed under the License is distributed on an "AS IS" basis,
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
# for the specific language governing rights and limitations under the
# License.
#
# The Original Code is configman
#
# The Initial Developer of the Original Code is
# Mozilla Foundation
# Portions created by the Initial Developer are Copyright (C) 2011
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# K Lars Lohn, lars@mozilla.com
# Peter Bengtsson, peterbe@mozilla.com
#
# Alternatively, the contents of this file may be used under the terms of
# either the GNU General Public License Version 2 or later (the "GPL"), or
# the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
# in which case the provisions of the GPL or the LGPL are applicable instead
# of those above. If you wish to allow use of your version of this file only
# under the terms of either the GPL or the LGPL, and not to allow others to
# use your version of this file under the terms of the MPL, indicate your
# decision by deleting the provisions above and replace them with the notice
# and other provisions required by the GPL or the LGPL. If you do not delete
# the provisions above, a recipient may use your version of this file under
# the terms of any one of the MPL, the GPL or the LGPL.
#
# ***** END LICENSE BLOCK *****
import datetime
def datetime_from_ISO_string(s):
""" Take an ISO date string of the form YYYY-MM-DDTHH:MM:SS.S
and convert it into an instance of datetime.datetime
"""
try:
return datetime.datetime.strptime(s, '%Y-%m-%dT%H:%M:%S')
except ValueError:
try:
return datetime.datetime.strptime(s, '%Y-%m-%d')
except ValueError:
return datetime.datetime.strptime(s, '%Y-%m-%dT%H:%M:%S.%f')
def date_from_ISO_string(s):
""" Take an ISO date string of the form YYYY-MM-DD
and convert it into an instance of datetime.date
"""
return datetime.datetime.strptime(s, '%Y-%m-%d').date()
def datetime_to_ISO_string(aDate):
""" Take a datetime and convert to string of the form YYYY-MM-DDTHH:MM:SS.S
"""
return aDate.isoformat()
def date_to_ISO_string(aDate):
""" Take a datetime and convert to string of the form YYYY-MM-DD
"""
return aDate.strftime('%Y-%m-%d')
def str_to_timedelta(input_str):
""" a string conversion function for timedelta for strings in the format
DD:HH:MM:SS
"""
days, hours, minutes, seconds = 0, 0, 0, 0
details = input_str.split(':')
if len(details) >= 4:
days = int(details[-4])
if len(details) >= 3:
hours = int(details[-3])
if len(details) >= 2:
minutes = int(details[-2])
if len(details) >= 1:
seconds = int(details[-1])
return datetime.timedelta(days=days,
hours=hours,
minutes=minutes,
seconds=seconds)
def timedelta_to_str(aTimedelta):
""" a conversion function for time deltas to string in the form
DD:HH:MM:SS
"""
days = aTimedelta.days
temp_seconds = aTimedelta.seconds
hours = temp_seconds / 3600
minutes = (temp_seconds - hours * 3600) / 60
seconds = temp_seconds - hours * 3600 - minutes * 60
return '%d:%d:%d:%d' % (days, hours, minutes, seconds)
| 34.651786 | 79 | 0.67328 |
cda50a569978706c9bec0db5233be29def4df294 | 2,952 | py | Python | src/tests/voluntario/test_api.py | Akijunior/Atados | 255c9c9137e48aa82fdea63f9d6d65a3720c3f92 | [
"MIT"
] | null | null | null | src/tests/voluntario/test_api.py | Akijunior/Atados | 255c9c9137e48aa82fdea63f9d6d65a3720c3f92 | [
"MIT"
] | null | null | null | src/tests/voluntario/test_api.py | Akijunior/Atados | 255c9c9137e48aa82fdea63f9d6d65a3720c3f92 | [
"MIT"
] | null | null | null | from django.contrib.auth import get_user_model
from django.contrib.auth.models import User
from rest_framework.authtoken.models import Token
from rest_framework.test import APITestCase, APIClient
from rest_framework.views import status
from voluntario.models import Voluntario
| 42.782609 | 87 | 0.686992 |
cda678a982b6a913bc586a56ae657d42e29745b5 | 508 | py | Python | main.py | ki-ljl/Scaffold-Federated-Learning | 12e04217df3af2c326ea90fef6cff47beaaec485 | [
"MIT"
] | 9 | 2022-03-02T13:58:29.000Z | 2022-03-31T06:45:40.000Z | main.py | ki-ljl/Scaffold-Federated-Learning | 12e04217df3af2c326ea90fef6cff47beaaec485 | [
"MIT"
] | null | null | null | main.py | ki-ljl/Scaffold-Federated-Learning | 12e04217df3af2c326ea90fef6cff47beaaec485 | [
"MIT"
] | null | null | null | # -*- coding:utf-8 -*-
"""
@Time2022/05/05 12:57
@AuthorKI
@Filemain.py
@MottoHungry And Humble
"""
from data_process import clients_wind
from server import Scaffold
if __name__ == '__main__':
main()
| 20.32 | 79 | 0.582677 |
cda77d54e2daf57e2851f4a131c3a29a3329d0d5 | 464 | py | Python | reddit/files/setup_cassandra.py | mitodl/reddit-formula | 68c597f5391b8bf960de3d701225de2fc45d04e4 | [
"BSD-3-Clause"
] | null | null | null | reddit/files/setup_cassandra.py | mitodl/reddit-formula | 68c597f5391b8bf960de3d701225de2fc45d04e4 | [
"BSD-3-Clause"
] | 4 | 2017-09-29T18:34:06.000Z | 2018-05-23T19:07:17.000Z | reddit/files/setup_cassandra.py | mitodl/reddit-formula | 68c597f5391b8bf960de3d701225de2fc45d04e4 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
import pycassa
sys = pycassa.SystemManager("cassandra.service.consul:9160")
if "reddit" not in sys.list_keyspaces():
print "creating keyspace 'reddit'"
sys.create_keyspace("reddit", "SimpleStrategy", {"replication_factor": "3"})
print "done"
if "permacache" not in sys.get_keyspace_column_families("reddit"):
print "creating column family 'permacache'"
sys.create_column_family("reddit", "permacache")
print "done"
| 30.933333 | 80 | 0.726293 |
cda9eb07b967369dac4f17bb21af05cd80acf296 | 1,472 | py | Python | Data Analysis with Pandas Intermediate/Pandas Internals_ Series-145.py | vipmunot/Data-Analysis-using-Python | 34586d8cbbc336508c4a7a68abe14944f1096252 | [
"MIT"
] | null | null | null | Data Analysis with Pandas Intermediate/Pandas Internals_ Series-145.py | vipmunot/Data-Analysis-using-Python | 34586d8cbbc336508c4a7a68abe14944f1096252 | [
"MIT"
] | null | null | null | Data Analysis with Pandas Intermediate/Pandas Internals_ Series-145.py | vipmunot/Data-Analysis-using-Python | 34586d8cbbc336508c4a7a68abe14944f1096252 | [
"MIT"
] | null | null | null | ## 1. Data Structures ##
import pandas as pd
fandango = pd.read_csv('fandango_score_comparison.csv')
print(fandango.head(2))
## 2. Integer Indexes ##
fandango = pd.read_csv('fandango_score_comparison.csv')
series_film = fandango['FILM']
series_rt = fandango['RottenTomatoes']
print(series_film[:5])
print(series_rt[:5])
## 3. Custom Indexes ##
# Import the Series object from pandas
from pandas import Series
film_names = series_film.values
rt_scores = series_rt.values
series_custom=pd.Series(index = film_names, data = rt_scores)
## 4. Integer Index Preservation ##
series_custom = Series(rt_scores , index=film_names)
series_custom[['Minions (2015)', 'Leviathan (2014)']]
fiveten = series_custom[5:10]
print(fiveten)
## 5. Reindexing ##
original_index = series_custom.index.tolist()
sorted_by_index = series_custom.reindex(index = sorted(original_index))
## 6. Sorting ##
sc2 = series_custom.sort_index()
sc3 = series_custom.sort_values()
print(sc2.head(10))
print(sc3.head(10))
## 7. Transforming Columns With Vectorized Operations ##
series_normalized = series_custom/20
## 8. Comparing and Filtering ##
criteria_one = series_custom > 50
criteria_two = series_custom < 75
both_criteria = series_custom[criteria_one & criteria_two]
## 9. Alignment ##
rt_critics = Series(fandango['RottenTomatoes'].values, index=fandango['FILM'])
rt_users = Series(fandango['RottenTomatoes_User'].values, index=fandango['FILM'])
rt_mean =(rt_users + rt_critics) / 2 | 25.37931 | 81 | 0.754076 |
cdad55a9ce2a49755ae4b294972c1f2e61c115f9 | 425 | py | Python | problem/01000~09999/01058/1058.py3.py | njw1204/BOJ-AC | 1de41685725ae4657a7ff94e413febd97a888567 | [
"MIT"
] | 1 | 2019-04-19T16:37:44.000Z | 2019-04-19T16:37:44.000Z | problem/01000~09999/01058/1058.py3.py | njw1204/BOJ-AC | 1de41685725ae4657a7ff94e413febd97a888567 | [
"MIT"
] | 1 | 2019-04-20T11:42:44.000Z | 2019-04-20T11:42:44.000Z | problem/01000~09999/01058/1058.py3.py | njw1204/BOJ-AC | 1de41685725ae4657a7ff94e413febd97a888567 | [
"MIT"
] | 3 | 2019-04-19T16:37:47.000Z | 2021-10-25T00:45:00.000Z | n=int(input())
link=[[100]*n for i in range(n)]
for i in range(n):
x=input()
for j in range(n):
if x[j]=='Y': link[i][j]=1
for i in range(n):
for j in range(n):
for k in range(n):
if link[j][i]+link[i][k]<link[j][k]:
link[j][k]=link[j][i]+link[i][k]
link[k][j]=link[j][k]
ans=0
for i in range(n):
t=0
for j in range(n):
if link[i][j]<=2 and i!=j: t+=1
ans=max(t,ans)
print(ans) | 20.238095 | 42 | 0.52 |
cdae861a30ba2bb3bd941147a704995ddbb3e7b8 | 4,894 | py | Python | pytest_ipynb/plugin.py | kevingerman/pytest-ipynb | 04b5fed4f280983f64254b01e3b24b7733e99224 | [
"BSD-3-Clause"
] | 104 | 2015-01-21T16:10:46.000Z | 2021-05-31T06:53:35.000Z | pytest_ipynb/plugin.py | kevingerman/pytest-ipynb | 04b5fed4f280983f64254b01e3b24b7733e99224 | [
"BSD-3-Clause"
] | 26 | 2015-04-09T04:12:48.000Z | 2018-12-22T18:41:33.000Z | pytest_ipynb/plugin.py | kevingerman/pytest-ipynb | 04b5fed4f280983f64254b01e3b24b7733e99224 | [
"BSD-3-Clause"
] | 21 | 2015-02-06T10:07:28.000Z | 2021-04-19T21:31:48.000Z | import pytest
import os,sys
import warnings
try:
from exceptions import Exception, TypeError, ImportError
except:
pass
from runipy.notebook_runner import NotebookRunner
wrapped_stdin = sys.stdin
sys.stdin = sys.__stdin__
sys.stdin = wrapped_stdin
try:
from Queue import Empty
except:
from queue import Empty
# code copied from runipy main.py
with warnings.catch_warnings():
try:
from IPython.utils.shimmodule import ShimWarning
warnings.filterwarnings('error', '', ShimWarning)
except ImportError:
try:
# IPython 3
from IPython.nbformat import reads, NBFormatError
except ShimWarning:
# IPython 4
from nbformat import reads, NBFormatError
except ImportError:
# IPython 2
from IPython.nbformat.current import reads, NBFormatError
finally:
warnings.resetwarnings()
def get_cell_description(cell_input):
"""Gets cell description
Cell description is the first line of a cell,
in one of this formats:
* single line docstring
* single line comment
* function definition
"""
try:
first_line = cell_input.split("\n")[0]
if first_line.startswith(('"', '#', 'def')):
return first_line.replace('"','').replace("#",'').replace('def ', '').replace("_", " ").strip()
except:
pass
return "no description"
| 32.845638 | 122 | 0.599918 |
cdaf411884a90226584098d678014eeaecc826d5 | 90 | py | Python | test.py | stpwin/fb-groub-sentiment | 0c0c860bf7b405e7cc4a7fac5a337b751dddb910 | [
"bzip2-1.0.6"
] | null | null | null | test.py | stpwin/fb-groub-sentiment | 0c0c860bf7b405e7cc4a7fac5a337b751dddb910 | [
"bzip2-1.0.6"
] | 7 | 2019-08-25T01:53:14.000Z | 2022-03-11T23:57:08.000Z | test.py | stpwin/fb-groub-sentiment | 0c0c860bf7b405e7cc4a7fac5a337b751dddb910 | [
"bzip2-1.0.6"
] | null | null | null | items = {"a": True, "b": False}
b = [v for k, v in items.items() if v == True]
print(b)
| 15 | 46 | 0.533333 |
cdb4d928fe81a97440ce0c56dea2317a5512f228 | 2,258 | py | Python | setup.py | vbrinnel/ztflc | b1ccab67e5e0e385d8406f179c1ad0c346afa129 | [
"Apache-2.0"
] | 1 | 2020-04-07T14:36:49.000Z | 2020-04-07T14:36:49.000Z | setup.py | vbrinnel/ztflc | b1ccab67e5e0e385d8406f179c1ad0c346afa129 | [
"Apache-2.0"
] | 3 | 2020-01-16T18:25:46.000Z | 2021-05-19T20:51:52.000Z | setup.py | vbrinnel/ztflc | b1ccab67e5e0e385d8406f179c1ad0c346afa129 | [
"Apache-2.0"
] | 1 | 2021-03-31T19:47:33.000Z | 2021-03-31T19:47:33.000Z | #! /usr/bin/env python
#
DESCRIPTION = "ztflc: Force photometry lc fitter"
LONG_DESCRIPTION = """ Force photometry lc fitter"""
DISTNAME = "ztflc"
AUTHOR = "Mickael Rigault"
MAINTAINER = "Mickael Rigault"
MAINTAINER_EMAIL = "m.rigault@ipnl.in2p3.fr"
URL = "https://github.com/MickaelRigault/ztflc/"
LICENSE = "BSD (3-clause)"
DOWNLOAD_URL = "https://github.com/MickaelRigault/ztflc/tarball/0.2"
VERSION = "0.2.3"
try:
from setuptools import setup, find_packages
_has_setuptools = True
except ImportError:
from distutils.core import setup
_has_setuptools = False
if __name__ == "__main__":
install_requires = check_dependencies()
if _has_setuptools:
packages = find_packages()
print(packages)
else:
# This should be updated if new submodules are added
packages = ["ztflc"]
setup(
name=DISTNAME,
author=AUTHOR,
author_email=MAINTAINER_EMAIL,
maintainer=MAINTAINER,
maintainer_email=MAINTAINER_EMAIL,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
license=LICENSE,
url=URL,
version=VERSION,
download_url=DOWNLOAD_URL,
install_requires=install_requires,
scripts=["bin/forcephoto.py"],
packages=packages,
include_package_data=True,
# package_data={'pysedm': ['data/*.*']},
classifiers=[
"Intended Audience :: Science/Research",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.5",
"License :: OSI Approved :: BSD License",
"Topic :: Scientific/Engineering :: Astronomy",
"Operating System :: POSIX",
"Operating System :: Unix",
"Operating System :: MacOS",
],
)
| 26.880952 | 68 | 0.637733 |
cdb4fa7248d3772a040373832306d9f403d71783 | 311 | py | Python | ibm1.py | thovo/ibm1 | df00eca56827e294642d503972f29ab3e139caf0 | [
"MIT"
] | null | null | null | ibm1.py | thovo/ibm1 | df00eca56827e294642d503972f29ab3e139caf0 | [
"MIT"
] | null | null | null | ibm1.py | thovo/ibm1 | df00eca56827e294642d503972f29ab3e139caf0 | [
"MIT"
] | null | null | null | #!/usr/bin/python
__author__ = 'thovo'
import sys
ibm1() | 18.294118 | 73 | 0.59164 |
cdb6e8d6090040ad0dc31239d89e99153192bd44 | 1,927 | py | Python | wordfinds/raw.py | GrandMoff100/WordFinds | 4b56532f399178e5f2b18b246084644061c5bfc2 | [
"MIT"
] | 2 | 2021-05-22T19:19:56.000Z | 2021-08-16T11:34:11.000Z | wordfinds/raw.py | GrandMoff100/WordFinds | 4b56532f399178e5f2b18b246084644061c5bfc2 | [
"MIT"
] | null | null | null | wordfinds/raw.py | GrandMoff100/WordFinds | 4b56532f399178e5f2b18b246084644061c5bfc2 | [
"MIT"
] | 1 | 2021-11-09T13:55:43.000Z | 2021-11-09T13:55:43.000Z | import random
from .utils import filler
from .array import RawWordFindArray, WordArray
| 32.661017 | 120 | 0.511676 |
cdb7047c417fa314c5e02129e1672265cc3318ba | 2,969 | py | Python | src/neon/frontend/aeon_shim.py | MUTTERSCHIFF/ngraph-neon | 762e8ea639cdc671311ee4929bd1ee8cdf83e8bb | [
"Apache-2.0"
] | 13 | 2018-03-17T00:27:18.000Z | 2020-06-18T01:36:34.000Z | src/neon/frontend/aeon_shim.py | MUTTERSCHIFF/ngraph-neon | 762e8ea639cdc671311ee4929bd1ee8cdf83e8bb | [
"Apache-2.0"
] | 20 | 2018-03-17T14:49:04.000Z | 2018-04-19T17:47:38.000Z | src/neon/frontend/aeon_shim.py | NervanaSystems/ngraph-neon | 8988ab90ee81c8b219ea5c374702e56d7f383302 | [
"Apache-2.0"
] | 5 | 2018-03-23T22:47:17.000Z | 2020-10-21T16:15:02.000Z | # ******************************************************************************
# Copyright 2017-2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ******************************************************************************
from __future__ import print_function, absolute_import
import logging
from builtins import object
import neon as ng
logger = logging.getLogger(__name__)
try:
from aeon import DataLoader
except ImportError:
msg = "\n".join(["",
"Unable to import Aeon module.",
"Please see installation instructions at:",
"*****************",
"https://github.com/NervanaSystems/aeon/blob/rc1-master/README.md",
"*****************",
""])
logger.error(msg)
raise ImportError(msg)
NAME_MAP = {"channels": "C",
"height": "H",
"width": "W",
"frames": "D"}
"""Converts aeon axis names to canonical ngraph axis types."""
| 35.345238 | 88 | 0.583025 |
cdb91795db8c176b9e6d1d2b0ffc0bc2b063adbd | 857 | py | Python | Lessons/Chapter9Exercise1.py | Luderio/Scientific-Computing-with-Python | c7eebcc3b46b68b3d5c08ad25fb802ae9ff42f7f | [
"MIT"
] | null | null | null | Lessons/Chapter9Exercise1.py | Luderio/Scientific-Computing-with-Python | c7eebcc3b46b68b3d5c08ad25fb802ae9ff42f7f | [
"MIT"
] | null | null | null | Lessons/Chapter9Exercise1.py | Luderio/Scientific-Computing-with-Python | c7eebcc3b46b68b3d5c08ad25fb802ae9ff42f7f | [
"MIT"
] | null | null | null | wordCounter = dict()
while True :
inputFile = input('Enter a file: ')
try :
fileName = open(inputFile)
except :
fileName = 'invalid'
if fileName == 'invalid' :
if inputFile == 'done' :
break
else :
print('Invalid Input')
continue
for lines in fileName :
lines = lines.rstrip()
words = lines.split()
for wordItems in words :
wordCounter[wordItems] = wordCounter.get(wordItems, 0) + 1
largestWordCount = None
largestWord = None
for word,count in wordCounter.items() :
if largestWordCount is None or count > largestWordCount :
largestWord = word
largestWordCount = count
print('Largest Word:', largestWord, 'Count:', largestWordCount)
print(wordCounter)
continue
| 25.969697 | 70 | 0.574096 |
cdb943e87aa2338b8600a2d1fc39c5fdf842f690 | 1,022 | py | Python | iris/cli/help.py | kpanic/lymph | 5681de5e65ee72efb96012608fc5189a48adafcd | [
"Apache-2.0"
] | null | null | null | iris/cli/help.py | kpanic/lymph | 5681de5e65ee72efb96012608fc5189a48adafcd | [
"Apache-2.0"
] | null | null | null | iris/cli/help.py | kpanic/lymph | 5681de5e65ee72efb96012608fc5189a48adafcd | [
"Apache-2.0"
] | null | null | null | from iris.cli.base import Command, format_docstring, get_command_class
HELP = format_docstring("""
Usage: iris [options] <command> [<args>...]
Iris is the personification of the rainbow and messenger of the gods.
{COMMON_OPTIONS}
Commands:
instance Run a single service instance (one process).
node Run a node service that manages a group of processes on the same
machine.
request Send a request message to some service and output the reply.
inspect Describe the available rpc methods of a service.
tail Stream the logs of one or more services.
discover Show available services.
help Display help information about iris.
""")
| 26.894737 | 77 | 0.671233 |
cdb9f5699b06eaa0f164fb54a701bb1fdb951c1f | 3,321 | py | Python | src/Featurizers/DateTimeFeaturizerData/Tools/JsonGenerator.py | Bhaskers-Blu-Org2/FeaturizersLibrary | 229ae38ea233bfb02a6ff92ec3a67c1751c58005 | [
"MIT"
] | 15 | 2019-12-14T07:54:18.000Z | 2021-03-14T14:53:28.000Z | src/Featurizers/DateTimeFeaturizerData/Tools/JsonGenerator.py | Lisiczka27/FeaturizersLibrary | dc7b42abd39589af0668c896666affb4abe8a622 | [
"MIT"
] | 30 | 2019-12-03T20:58:56.000Z | 2020-04-21T23:34:39.000Z | src/Featurizers/DateTimeFeaturizerData/Tools/JsonGenerator.py | Lisiczka27/FeaturizersLibrary | dc7b42abd39589af0668c896666affb4abe8a622 | [
"MIT"
] | 13 | 2020-01-23T00:18:47.000Z | 2021-10-04T17:46:45.000Z | # ----------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License
# ----------------------------------------------------------------------
"""Generates JSON files based on data previously pickled"""
import lzma
import os
import pickle
import sys
import json
# Note that this isn't used directly, but is required by the picked python content
import pandas as pd
import CommonEnvironment
from CommonEnvironment import CommandLine
from CommonEnvironment import FileSystem
from CommonEnvironment.StreamDecorator import StreamDecorator
# ----------------------------------------------------------------------
_script_fullpath = CommonEnvironment.ThisFullpath()
_script_dir, _script_name = os.path.split(_script_fullpath)
# ----------------------------------------------------------------------
# ----------------------------------------------------------------------
# ----------------------------------------------------------------------
# ----------------------------------------------------------------------
def _holiday_data_loader(_path):
"""Load holiday data as a static initializer."""
with lzma.open(_path, "rb") as fr:
df = pickle.loads(fr.read())
df['countryRegionCode'] = df['countryRegionCode'] \
.apply(lambda x: x if type(x) == str else None)
df['isPaidTimeOff'] = df['isPaidTimeOff'] \
.apply(lambda x: x if type(x) == bool else None)
return df
# ----------------------------------------------------------------------
# ----------------------------------------------------------------------
# ----------------------------------------------------------------------
if __name__ == "__main__":
try:
sys.exit(CommandLine.Main())
except KeyboardInterrupt:
pass
| 34.957895 | 82 | 0.504065 |
cdba82790169d516d43e4d1c83b7c0a26c10e1fe | 7,152 | py | Python | fer.py | MahmoudSabra1/Emotion-recognition-song-recommendation | 5cad8413b6c98cee12798334009fe8942a420527 | [
"MIT"
] | 11 | 2020-11-11T14:52:05.000Z | 2022-03-11T11:37:42.000Z | fer.py | MahmoudSabra1/Emotion-recognition-song-recommendation | 5cad8413b6c98cee12798334009fe8942a420527 | [
"MIT"
] | 1 | 2021-06-21T06:42:59.000Z | 2021-06-21T06:42:59.000Z | fer.py | MahmoudSabra1/Emotion-recognition-song-recommendation | 5cad8413b6c98cee12798334009fe8942a420527 | [
"MIT"
] | 7 | 2021-01-26T03:40:12.000Z | 2021-12-20T12:24:34.000Z | # Two lines that remove tensorflow GPU logs
# import os
# os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from keras.optimizers import Adam
from keras.models import Sequential, model_from_json
from keras.layers import Conv2D, MaxPooling2D, Dense, Flatten, Dropout, BatchNormalization, Activation
from keras.preprocessing.image import ImageDataGenerator
from sklearn import model_selection
from math import ceil
# Loads csv files and appends pixels to X and labels to y
run_model()
| 32.216216 | 115 | 0.65646 |
cdbc9720aa5c6f9258b24ce41fb35960b581a3f8 | 705 | py | Python | manual.py | Taschenbergerm/bgg_miner | f20057ec2e85e20ad08f92514ce38c699e8c85eb | [
"MIT"
] | null | null | null | manual.py | Taschenbergerm/bgg_miner | f20057ec2e85e20ad08f92514ce38c699e8c85eb | [
"MIT"
] | null | null | null | manual.py | Taschenbergerm/bgg_miner | f20057ec2e85e20ad08f92514ce38c699e8c85eb | [
"MIT"
] | null | null | null | from pprint import pprint
import requests
from lxml import etree
if __name__ == "__main__":
main()
| 27.115385 | 68 | 0.591489 |
cdbd2bded66eee36ec46ada4de75a010512f317b | 2,962 | py | Python | app/requests.py | gabrielcoder247/News-Highlight-v2 | 595f4ee9739b173142d1012bdda63526818930e4 | [
"Unlicense"
] | null | null | null | app/requests.py | gabrielcoder247/News-Highlight-v2 | 595f4ee9739b173142d1012bdda63526818930e4 | [
"Unlicense"
] | null | null | null | app/requests.py | gabrielcoder247/News-Highlight-v2 | 595f4ee9739b173142d1012bdda63526818930e4 | [
"Unlicense"
] | null | null | null | import urllib.request,json
from .models import Source,Article
from . import main
# Getting Api Key
api_Key = None
#Getting the base urls
sources_base_url = None
articles_base_url = None
def configure_request(app):
'''
Function to acquire the api key and base urls
'''
global api_Key,sources_base_url,articles_base_url
api_Key = app.config['NEWS_API_KEY']
sources_base_url = app.config['NEWS_SOURCES_BASE_URL']
articles_base_url = app.config['NEWS_ARTICLES_BASE_URL']
def get_sources(category):
'''
Function that gets the json response to our url request
'''
get_sources_url = sources_base_url.format(category)
with urllib.request.urlopen(get_sources_url,data=None) as url:
get_sources_data = url.read()
get_sources_response = json.loads(get_sources_data)
sources_results = None
if get_sources_response['sources']:
sources_results_list = get_sources_response['sources']
sources_results = process_sources(sources_results_list)
# print(sources_results)
return sources_results
def process_sources(sources_results):
'''
Function that processes the sources result and transform them to a list of Objects
Args:
sources_results: A list of dictionaries that contain sources details
Returns :
sources_list: A list of sources objects
'''
sources_list = []
for source_item in sources_results:
id = source_item.get('id')
name = source_item.get('name')
description = source_item.get('description')
url = source_item.get('url')
category = source_item.get('category')
source_object = Source(id,name,description,url,category)
sources_list.append(source_object)
return sources_list
def get_articles(source):
'''
Function that gets the json response to our url request
'''
get_articles_url = articles_base_url.format(source,api_Key)
with urllib.request.urlopen(get_articles_url,data=None) as url:
get_articles_data = url.read()
get_articles_response = json.loads(get_articles_data)
articles_results = None
if get_articles_response['articles']:
articles_results_list = get_articles_response['articles']
articles_results = process_articles(articles_results_list)
return articles_results
def process_articles(articles_results):
'''
Function that processes the articles result and transform them to a list of Objects
Args:
articles_results: A list of dictionaries that contain articles details
Returns :
articles_list: A list of articles objects
'''
articles_list = []
for article_item in articles_results:
name = article_item.get('name')
author = article_item.get('author')
title = article_item.get('title')
description = article_item.get('description')
url = article_item.get('url')
urlToImage = article_item.get('urlToImage')
publishedAt = article_item.get('publishedAt')
if publishedAt and author and urlToImage:
article_object = Article(name,author,title,description,url,urlToImage,publishedAt)
articles_list.append(article_object)
return articles_list | 30.854167 | 85 | 0.778528 |
cdc3ceae4eb0b0fc7a29f9482fb7047dcfef58b4 | 727 | py | Python | main.py | csmyth93/solo_scoring | 6c1a32a3430058aa7d51be604dcc02d11ce85edd | [
"MIT"
] | null | null | null | main.py | csmyth93/solo_scoring | 6c1a32a3430058aa7d51be604dcc02d11ce85edd | [
"MIT"
] | null | null | null | main.py | csmyth93/solo_scoring | 6c1a32a3430058aa7d51be604dcc02d11ce85edd | [
"MIT"
] | null | null | null |
if __name__ == '__main__':
players = get_names()
print(players)
scores = get_player_scores(players)
print(scores)
| 22.71875 | 63 | 0.515818 |
cdc3dc53bc12e7691159632083c1b94dc1973dac | 74 | py | Python | tests/unit/conftest.py | fourTheorem/slic-slack | cffc870c2399feff67199050460abdcb3385ef17 | [
"Apache-2.0"
] | 4 | 2022-01-14T15:47:55.000Z | 2022-01-14T16:15:18.000Z | tests/unit/conftest.py | fourTheorem/slic-slack | cffc870c2399feff67199050460abdcb3385ef17 | [
"Apache-2.0"
] | null | null | null | tests/unit/conftest.py | fourTheorem/slic-slack | cffc870c2399feff67199050460abdcb3385ef17 | [
"Apache-2.0"
] | null | null | null | import os
os.environ['SLACK_WEBHOOK_URL'] = 'https://example.com/slack'
| 14.8 | 61 | 0.72973 |
cdc442d6b9ce4b9876165256e71bc1dbffd0f620 | 760 | py | Python | python/twisted/web_echo.py | y2ghost/work | b7f5b02db9dc0df6157bc799ddb4a1ac9d574cf3 | [
"MIT"
] | null | null | null | python/twisted/web_echo.py | y2ghost/work | b7f5b02db9dc0df6157bc799ddb4a1ac9d574cf3 | [
"MIT"
] | null | null | null | python/twisted/web_echo.py | y2ghost/work | b7f5b02db9dc0df6157bc799ddb4a1ac9d574cf3 | [
"MIT"
] | null | null | null | from twisted.protocols import basic
from twisted.internet import protocol, reactor
reactor.listenTCP(8000, HTTPEchoFactory())
reactor.run()
| 27.142857 | 68 | 0.661842 |
cdc5a62dbf81299334c3372e259e1e6b484185cd | 2,804 | py | Python | app01/tools.py | xinxinliang/ksDjango | 0c0f4a5842cf225e77035b716979fcf9b8d03311 | [
"Apache-2.0"
] | 13 | 2021-03-11T12:35:29.000Z | 2022-02-25T02:22:47.000Z | app01/tools.py | xinxinliang/ksDjango | 0c0f4a5842cf225e77035b716979fcf9b8d03311 | [
"Apache-2.0"
] | 1 | 2021-11-04T03:02:10.000Z | 2021-11-04T03:02:10.000Z | app01/tools.py | xinxinliang/ksDjango | 0c0f4a5842cf225e77035b716979fcf9b8d03311 | [
"Apache-2.0"
] | 4 | 2021-06-12T19:27:01.000Z | 2022-02-04T05:13:54.000Z | import requests
import json
import os
import time
from app01.models import UserTitle
# idnaame
URL = "https://video.kuaishou.com/graphql"
headers = {
"accept":"*/*",
"Content-Length":"<calculated when request is sent>",
"Accept-Encoding": "gzip, deflate",
"Connection": "keep-alive",
"content-type": "application/json",
"Cookie": r'kpf=PC_WEB; kpn=KUAISHOU_VISION; clientid=3; did=web_ec874916e390b9741609686125a0452e; didv=1613879531823; client_key=65890b29; ktrace-context=1|MS43NjQ1ODM2OTgyODY2OTgyLjc1MjgyMjUyLjE2MTU0NDI5NDQ0MzYuMTU2OTE=|MS43NjQ1ODM2OTgyODY2OTgyLjIxMjcxODY4LjE2MTU0NDI5NDQ0MzYuMTU2OTI=|0|graphql-server|webservice|false|NA; userId=427400950; kuaishou.server.web_st=ChZrdWFpc2hvdS5zZXJ2ZXIud2ViLnN0EqABUkHhV7V4kZgEsKH5ujlHNWEHV_KRDoBGhvSztjMMB54VfcpY6EJgzK_b3ZYFhM0obMSTVBDc7Csb-KuDKQpR8sobH5ozd82kEMIV5eb3S0QSJBxAemnSYimqR5IskD_IGA06cph50uA_oH2OftW2tSpaBuXl3vyYhFv6aS_24d8z0n9WILEo5JcTI0QpDdmDoRnXxHc_x7JHIR3s1pBlBhoSzFZBnBL4suA5hQVn0dPKLsMxIiDp66EsPPenAZG6MBgmJkQL2mrCKEDn1OPcTisxS6wffSgFMAE; kuaishou.server.web_ph=cb43dea88ab3a4c31dd231f2dc9cc29b8680',
"Host": "video.kuaishou.com",
"Origin": "https://video.kuaishou.com",
"Referer": "https://video.kuaishou.com/profile/3xsms2z7ft9fmhg",
"User-Agent": "PostmanRuntime/7.26.8"
}
payload = {"operationName":"visionProfileUserList","variables":{"ftype":1},"query":"query visionProfileUserList($pcursor: String, $ftype: Int) {\n visionProfileUserList(pcursor: $pcursor, ftype: $ftype) {\n result\n fols {\n user_name\n headurl\n user_text\n isFollowing\n user_id\n __typename\n }\n hostName\n pcursor\n __typename\n }\n}\n"}
if __name__ == "__main__":
start_data() | 43.8125 | 760 | 0.696148 |
cdc5fa09b3e8bd5d035d3ebb8b21feb4b7b64279 | 2,183 | py | Python | core/thirdparty/load_openpose.py | jshuhnow/OddEyeCam | ed76cd1c29701b7b49f20bcd61e7e72d3140fda8 | [
"MIT"
] | 8 | 2020-10-08T13:32:33.000Z | 2021-12-08T10:59:03.000Z | core/thirdparty/load_openpose.py | jshuhnow/OddEyeCam | ed76cd1c29701b7b49f20bcd61e7e72d3140fda8 | [
"MIT"
] | null | null | null | core/thirdparty/load_openpose.py | jshuhnow/OddEyeCam | ed76cd1c29701b7b49f20bcd61e7e72d3140fda8 | [
"MIT"
] | 1 | 2021-04-15T23:50:13.000Z | 2021-04-15T23:50:13.000Z | import os
import sys
this_dir = os.path.dirname(__file__)
import numpy as np
openpose_path = os.path.join(this_dir, 'openpose')
op_release_path = os.path.join(openpose_path, 'Release')
model_path = os.path.join(openpose_path, 'models')
print(op_release_path)
sys.path.append(op_release_path);
os.environ['PATH'] = os.environ['PATH'] + ';' + openpose_path + '/x64/Release;' + openpose_path + '/bin;'
import pyopenpose as op
opWrapper = op.WrapperPython()
params = dict()
params["model_folder"] = model_path
params["number_people_max"] = 1
params["net_resolution"]="-1x160"
params["body"] = 1
params["output_resolution"] = "-1x-1"
params["disable_multi_thread"] = True
opWrapper.configure(params)
opWrapper.start() | 35.209677 | 107 | 0.639487 |
cdc633f283f26d40a91533035d25cbe1abaa2d61 | 11,193 | py | Python | pybench/benchmarks/benchmark_ml.py | pentschev/pybench | 89d65a6c418a1fee39d447bd11b8a999835b74a9 | [
"Apache-2.0"
] | 14 | 2019-06-29T19:19:10.000Z | 2022-03-31T06:40:33.000Z | pybench/benchmarks/benchmark_ml.py | pentschev/pybench | 89d65a6c418a1fee39d447bd11b8a999835b74a9 | [
"Apache-2.0"
] | 2 | 2019-07-23T22:06:37.000Z | 2019-08-19T22:15:32.000Z | pybench/benchmarks/benchmark_ml.py | pentschev/pybench | 89d65a6c418a1fee39d447bd11b8a999835b74a9 | [
"Apache-2.0"
] | 5 | 2019-07-23T14:48:48.000Z | 2020-04-01T08:43:00.000Z | import pytest
import importlib
import numba, numba.cuda
import numpy as np
from pybench import run_benchmark
_shapes = {
"small": [(int(2 ** 14), 512), (int(2 ** 15), 512), (int(2 ** 16), 512)],
"large": [(int(2 ** 20), 512), (int(2 ** 21), 512), (int(2 ** 22), 512)],
}
| 26.841727 | 82 | 0.54132 |
cdc72216af29eaceb6c114484063fc2831f99596 | 420 | py | Python | ABC127C/resolve.py | staguchi0703/problems_easy | 82804b99b3ce8104762c3f6f5cc60b009a17bdc8 | [
"MIT"
] | null | null | null | ABC127C/resolve.py | staguchi0703/problems_easy | 82804b99b3ce8104762c3f6f5cc60b009a17bdc8 | [
"MIT"
] | null | null | null | ABC127C/resolve.py | staguchi0703/problems_easy | 82804b99b3ce8104762c3f6f5cc60b009a17bdc8 | [
"MIT"
] | null | null | null | def resolve():
'''
code here
'''
N , M = [int(item) for item in input().split()]
LRs = [[int(item) for item in input().split()] for _ in range(M)]
L_max = 0
R_min = N
for L, R in LRs:
L_max = max(L_max, L)
R_min = min(R_min, R)
delta = R_min - L_max
if delta >= 0:
print(delta + 1)
else:
print(0)
if __name__ == "__main__":
resolve()
| 16.8 | 69 | 0.490476 |
cdc9ffbc19062cc077e25fb215d33c0447db75e0 | 7,109 | py | Python | om10/plotting.py | drphilmarshall/OM10 | 009c16f0ef4e1c5f8f78c78df3c7711b7be24938 | [
"MIT"
] | 5 | 2017-02-17T19:43:54.000Z | 2021-05-19T09:30:53.000Z | om10/plotting.py | drphilmarshall/OM10 | 009c16f0ef4e1c5f8f78c78df3c7711b7be24938 | [
"MIT"
] | 55 | 2015-02-06T19:25:58.000Z | 2021-03-09T07:57:04.000Z | om10/plotting.py | drphilmarshall/OM10 | 009c16f0ef4e1c5f8f78c78df3c7711b7be24938 | [
"MIT"
] | 16 | 2015-01-29T23:55:45.000Z | 2021-04-16T03:06:38.000Z | # ======================================================================
# Globally useful modules, imported here and then accessible by all
# functions in this file:
from __future__ import print_function
# Fonts, latex:
import matplotlib
matplotlib.rc('font',**{'family':'serif', 'serif':['TimesNewRoman']})
matplotlib.rc('text', usetex=True)
import corner
import pylab, sys, numpy as np
# ======================================================================
def plot_sample(sample, saveImg=False, fig=None, color='black',
parameters=('MAGI','IMSEP','VELDISP','ZLENS','ZSRC')):
"""
Given an OM10 sample, make a corner plot of the required quantities.
Parameters
----------
parameters : str, tuple
Names of the lens parameters to plot
saveImg : bool
If true, save image with standardized name.
IQ : float
Image quality, for reference.
fig : matplotlib figure object
Overlay plot on an existing figure
Returns
-------
fig : matplotlib figure object
New or updated figure
"""
features, labels = extract_features(sample, parameters)
if fig is None:
fig = corner.corner(features, labels=labels, color=color, smooth=1.0)
else:
_ = corner.corner(features, labels=labels, color=color, smooth=1.0, fig=fig)
for ax in fig.axes:
for item in ([ax.xaxis.label, ax.yaxis.label]):
item.set_fontsize(20)
for item in (ax.get_xticklabels() + ax.get_yticklabels()):
item.set_fontsize(16)
if saveImg:
pngfile = "om10_sample.png"
pylab.savefig(pngfile)
print("OM10: Sample plot saved to file:", pngfile)
return fig
# ======================================================================
def extract_features(x, names):
"""
Given an OM10 table of lenses, extract the required parameters and
provide labels for them.
Parameters
----------
x : Table
OM10 lens sample.
names : str, tuple
Names of features required.
Returns
-------
features : float, ndarray
Values of requested features, for each lens in the Table
labels : str, list
Corresponding axis labels
"""
features = np.array([])
labels = []
p = len(names)
n = len(x)
for name in names:
features = np.append(features, x[name])
labels.append(axis_labels[name])
return features.reshape(p,n).transpose(), labels
# ======================================================================
def plot_lens(lens, saveImg=False, IQ=0.7):
"""
Given an OM10 lens, compute some basic quantities
and use them to plot a cartoon visualization of the lens.
Parameters
----------
saveImg : bool
If true, save image with standardized name.
IQ : float
Image quality, for reference.
"""
# # Force matplotlib to not use any Xwindows backend:
# if saveImg:
# try: matplotlib.use('Agg')
# except: pass
# else:
# try: matplotlib.use('TkAgg')
# except: pass
# Pull out data for ease of use:
id = lens['LENSID'][0]
xi = lens['XIMG'][0]
yi = lens['YIMG'][0]
nim = lens['NIMG'][0]
mui = lens['MAG'][0]
md = lens['APMAG_I'][0]
ms = lens['MAGI_IN'][0]
xs = lens['XSRC'][0]
ys = lens['YSRC'][0]
xd = 0.0
yd = 0.0
zd = lens['ZLENS'][0]
zs = lens['ZSRC'][0]
q = 1.0 - lens['ELLIP'][0]
phi = lens['PHIE'][0]
print("OM10: Plotting image configuration of lens ID ",id)
# Compute image magnitudes:
mi = np.zeros(nim)
lfi = np.zeros(nim)
for i in range(nim):
mi[i] = ms - 2.5*np.log10(np.abs(mui[i]))
lfi[i] = 0.4*(24-mi[i])
print("OM10: lens, image magnitudes:",md,mi)
lfd = 0.4*(24-md)
# print("om10.plot_lens: lens, image log fluxes:",lfd,lfi)
# ------------------------------------------------------------------
# Compute caustics and critical curves:
# ------------------------------------------------------------------
# Start figure:
fig = pylab.figure(figsize=(8,8))
# ,aspect='equal')
# Axes limits, useful sizes:
xmax = 1.99
dm = 1.0/10
# Plot command sets its own axes. 'bp' = blue pentagons
# pylab.plot(xi, yi, 'bp')
pylab.plot(xi, yi, color='blue', \
marker='+', markersize=10, markeredgewidth=2, \
linestyle='')
pylab.plot(xs, ys, color='lightblue', \
marker='+', markersize=10, markeredgewidth=2, \
linestyle='')
pylab.plot(xd, yd, color='orange', \
marker='+', markersize=10, markeredgewidth=2, \
linestyle='')
# Ellipse to represent lens brightness:
ell = matplotlib.patches.Ellipse((xd,yd), width=2*dm*lfd, height=2*q*dm*lfd, angle=phi, alpha=0.2, fc='orange')
pylab.gca().add_patch(ell)
# Circles to represent image brightness:
for i in range(nim):
cir = pylab.Circle((xi[i],yi[i]), radius=dm*lfi[i], alpha=0.2, fc='blue')
pylab.gca().add_patch(cir)
# Circle to represent seeing:
cir = pylab.Circle((1.5,-1.5), radius=IQ/2.0, alpha=0.1, fc='grey')
pylab.gca().add_patch(cir)
text = '{:3.1f}" seeing'.format(IQ)
pylab.annotate(text, (370,5), xytext=None, fontsize=14, \
xycoords='axes points',textcoords='axes points')
# Legend giving lens, source redshift:
text1 = "$z_d$ = %5.2f" % zd
text2 = "$z_s$ = %5.2f" % zs
pylab.annotate(text1, (10,430), xytext=None, fontsize=14, \
xycoords='axes points',textcoords='axes points')
pylab.annotate(text2, (10,410), xytext=None, fontsize=14, \
xycoords='axes points',textcoords='axes points')
# Plot title:
title = "OM10 lensed QSO, ID="+str(id)
pylab.title(title,fontsize=20)
# Set axes labels:
pylab.xlabel("x / arcsec",fontsize=20)
pylab.ylabel("y / arcsec",fontsize=20)
# Set axis limits:
pylab.axis([-xmax,xmax,-xmax,xmax])
# Add a grid:
pylab.grid(color='grey', linestyle='--', linewidth=0.5)
# Plot graph to file:
if saveImg:
pngfile = "om10_qso_ID="+str(id)+".png"
pylab.savefig(pngfile)
print("OM10: Lens plot saved to file:",pngfile)
# ======================================================================
axis_labels = {}
axis_labels['ZLENS'] = '$z_{\\rm d}$'
axis_labels['VELDISP'] = '$\sigma_{\\rm d}$ / km/s'
axis_labels['ELLIP'] = '$\epsilon_{\\rm d}$'
axis_labels['PHIE'] = '$\phi_{\\rm d}$ / km/s'
axis_labels['GAMMA'] = '$\gamma$'
axis_labels['PHIG'] = '$\phi_{\gamma}$'
axis_labels['ZSRC'] = '$z_{\\rm s}$'
axis_labels['MAGI'] = '$i_3$'
axis_labels['MAGI_IN'] = '$i_{\\rm s}$'
axis_labels['IMSEP'] = '$\Delta \\theta$ / arcsec'
axis_labels['i_SDSS_lens'] = '$i_{\\rm d}$ (AB mag)'
axis_labels['i_SDSS_quasar'] = '$i_{\\rm s}$ (AB mag)'
axis_labels['ug'] = '$u-g$ color'
axis_labels['gr'] = '$g-r$ color'
axis_labels['ri'] = '$r-i$ color'
axis_labels['iz'] = '$i-z$ color'
axis_labels['ug'] = '$u-g$ color'
| 30.251064 | 115 | 0.549163 |
cdcae84167fb352d7727b5d25c865135e36f6d5e | 25,254 | py | Python | applications/EsteEuQuero/models/produto.py | vgcarvpro/vgcarvpro | 16d720cb49f02e4f859c27901360b34681e986c0 | [
"BSD-3-Clause"
] | null | null | null | applications/EsteEuQuero/models/produto.py | vgcarvpro/vgcarvpro | 16d720cb49f02e4f859c27901360b34681e986c0 | [
"BSD-3-Clause"
] | null | null | null | applications/EsteEuQuero/models/produto.py | vgcarvpro/vgcarvpro | 16d720cb49f02e4f859c27901360b34681e986c0 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
| 5,050.8 | 25,197 | 0.818405 |
cdcc403733cc344ed109e0132f133aabd50b5dc1 | 1,213 | py | Python | rpython/jit/backend/muvm/registers.py | wdv4758h/mu-client-pypy | d2fcc01f0b4fe3ffa232762124e3e6d38ed3a0cf | [
"Apache-2.0",
"OpenSSL"
] | null | null | null | rpython/jit/backend/muvm/registers.py | wdv4758h/mu-client-pypy | d2fcc01f0b4fe3ffa232762124e3e6d38ed3a0cf | [
"Apache-2.0",
"OpenSSL"
] | null | null | null | rpython/jit/backend/muvm/registers.py | wdv4758h/mu-client-pypy | d2fcc01f0b4fe3ffa232762124e3e6d38ed3a0cf | [
"Apache-2.0",
"OpenSSL"
] | null | null | null | """ Modified version of ../muvm/registers.py. Will update as needed.
"""
#from rpython.jit.backend.arm.locations import VFPRegisterLocation
#from rpython.jit.backend.arm.locations import SVFPRegisterLocation
#from rpython.jit.backend.arm.locations import RegisterLocation
from rpython.jit.metainterp.history import (Const, ConstInt, ConstFloat,
ConstPtr,
INT, REF, FLOAT)
registers = []
vfpregisters = []
svfpregisters = []
all_regs = []
all_vfp_regs = vfpregisters[]
argument_regs = caller_resp = []
callee_resp = []
callee_saved_registers = callee_resp
callee_restored_registers = callee_resp
vfp_argument_regs = caller_vfp_resp = []
svfp_argument_regs = []
callee_vfp_resp = []
callee_saved_vfp_registers = callee_vfp_resp
| 25.270833 | 72 | 0.650453 |
cdcc7612e16b3989892d1765ee8591ffd8c61843 | 1,536 | py | Python | src/database/CRUD/create.py | gregory-chekler/api | 11ecbea945e7eb6fa677a0c0bb32bda51ba15f28 | [
"MIT"
] | 2 | 2020-07-24T12:58:17.000Z | 2020-12-17T02:26:13.000Z | src/database/CRUD/create.py | gregory-chekler/api | 11ecbea945e7eb6fa677a0c0bb32bda51ba15f28 | [
"MIT"
] | 214 | 2019-06-26T17:33:54.000Z | 2022-03-26T00:02:34.000Z | src/database/CRUD/create.py | massenergize/portalBackEnd | 7ed971b2be13901667a216d8c8a46f0bed6d6ccd | [
"MIT"
] | 6 | 2020-03-13T20:29:06.000Z | 2021-08-20T16:15:08.000Z | """
This file contains code to post data from the database. This is meant to
centralize the insertion of data into the database so that multiple apps can
call on the methods in this file without having to define their own
and to prevent code redundancy.
"""
from ..models import *
from ..utils.common import ensure_required_fields
from ..utils.create_factory import CreateFactory
| 25.180328 | 76 | 0.76888 |
cdccadfab450a4e9a57ce9f5439e430bde2038d3 | 527 | py | Python | tfutils/losses/losses.py | njchiang/tf-keras-utils | 6ea5e51ef3ca5729fbc71bf3cffecf4faec033dd | [
"MIT"
] | null | null | null | tfutils/losses/losses.py | njchiang/tf-keras-utils | 6ea5e51ef3ca5729fbc71bf3cffecf4faec033dd | [
"MIT"
] | null | null | null | tfutils/losses/losses.py | njchiang/tf-keras-utils | 6ea5e51ef3ca5729fbc71bf3cffecf4faec033dd | [
"MIT"
] | null | null | null | # this actually won't work with keras... not exactly a keras utility
import tensorflow as tf
# function is untested | 35.133333 | 68 | 0.660342 |
cdccf6c01653163bb8ca38561bfba641eb360f29 | 834 | py | Python | src/data_generator/vocab_builder.py | luciencho/jddc_solo | efddf0885d5e3c640835874f70d57d25123de141 | [
"BSD-3-Clause"
] | null | null | null | src/data_generator/vocab_builder.py | luciencho/jddc_solo | efddf0885d5e3c640835874f70d57d25123de141 | [
"BSD-3-Clause"
] | null | null | null | src/data_generator/vocab_builder.py | luciencho/jddc_solo | efddf0885d5e3c640835874f70d57d25123de141 | [
"BSD-3-Clause"
] | null | null | null | # coding:utf-8
from __future__ import unicode_literals
from __future__ import division
from __future__ import print_function
import os
from src.utils import utils
from src.data_generator import vocabulary
| 33.36 | 81 | 0.713429 |
cdd4efc2e23f55bb4467b8c8df8e0d1cdd72fa3d | 419 | py | Python | xas/queries/elastic.py | neuromore/msgxc | e74565a7991d80e5951ef22452521bcbca27fc92 | [
"Apache-2.0"
] | 1 | 2020-03-30T22:14:21.000Z | 2020-03-30T22:14:21.000Z | xas/queries/elastic.py | neuromore/msgxc | e74565a7991d80e5951ef22452521bcbca27fc92 | [
"Apache-2.0"
] | 12 | 2020-04-15T00:00:49.000Z | 2022-02-27T01:26:08.000Z | xas/queries/elastic.py | neuromore/msgxc | e74565a7991d80e5951ef22452521bcbca27fc92 | [
"Apache-2.0"
] | 4 | 2020-01-16T11:29:38.000Z | 2020-04-03T09:43:40.000Z | from elasticsearch import Elasticsearch
# TODO: Not implemented yet
es = Elasticsearch(["localhost"], sniff_on_connection_fail=True, sniffer_timeout=60) | 38.090909 | 84 | 0.649165 |
cdd5f8ad7b2f42d4bfe80a22a6bf9fc481e565ca | 2,750 | py | Python | U-NET/utils.py | HarshZ26/Object-Detection | 1d73f6aeb7452b0b26bd2713e69f340d129a5ba5 | [
"MIT"
] | 1 | 2022-03-23T15:49:02.000Z | 2022-03-23T15:49:02.000Z | U-NET/utils.py | HarshZ26/Object-Detection | 1d73f6aeb7452b0b26bd2713e69f340d129a5ba5 | [
"MIT"
] | null | null | null | U-NET/utils.py | HarshZ26/Object-Detection | 1d73f6aeb7452b0b26bd2713e69f340d129a5ba5 | [
"MIT"
] | null | null | null | from init import *
VOC_CLASSES = [
"background",
"aeroplane",
"bicycle",
"bird",
"boat",
"bottle",
"bus",
"car",
"cat",
"chair",
"cow",
"diningtable",
"dog",
"horse",
"motorbike",
"person",
"potted plant",
"sheep",
"sofa",
"train",
"tv/monitor",
]
VOC_COLORMAP = [
[0, 0, 0],
[128, 0, 0],
[0, 128, 0],
[128, 128, 0],
[0, 0, 128],
[128, 0, 128],
[0, 128, 128],
[128, 128, 128],
[64, 0, 0],
[192, 0, 0],
[64, 128, 0],
[192, 128, 0],
[64, 0, 128],
[192, 0, 128],
[64, 128, 128],
[192, 128, 128],
[0, 64, 0],
[128, 64, 0],
[0, 192, 0],
[128, 192, 0],
[0, 64, 128],
]
palette = np.array(VOC_COLORMAP)
custom_transforms = [transforms.Normalize(mean=[-0.485, -0.456,-0.406], std=[1/0.229, 1/0.224,1/0.225])]
inv_trans = torchvision.transforms.Compose(custom_transforms)
transform = A.Compose([A.Resize(512,512),
A.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),ToTensorV2()
])
| 25.229358 | 105 | 0.577818 |
cdd753cdba3af6ee31cf0c550e2ee7e5c881ebc9 | 2,898 | py | Python | custom_components/skyq/classes/switchmaker.py | TomBrien/Home_Assistant_SkyQ_MediaPlayer | 50f9ad0d3b7a3bc2acc652415ff59740bf3ace10 | [
"MIT"
] | null | null | null | custom_components/skyq/classes/switchmaker.py | TomBrien/Home_Assistant_SkyQ_MediaPlayer | 50f9ad0d3b7a3bc2acc652415ff59740bf3ace10 | [
"MIT"
] | null | null | null | custom_components/skyq/classes/switchmaker.py | TomBrien/Home_Assistant_SkyQ_MediaPlayer | 50f9ad0d3b7a3bc2acc652415ff59740bf3ace10 | [
"MIT"
] | null | null | null | """
A utility function to generate yaml config for SkyQ media players.
To support easy usage with other home assistant integrations, e.g. google home
"""
import os.path as _path
import yaml
from ..const import CONST_ALIAS_FILENAME
| 30.1875 | 78 | 0.493444 |
cdd78b4b371ac658a03d1638d8afdbda0805a759 | 24,528 | py | Python | datawinners/accountmanagement/admin.py | ICT4H/dcs-web | fb0f53fad4401cfac1c1789ff28b9d5bda40c975 | [
"Apache-2.0"
] | 1 | 2015-11-02T09:11:12.000Z | 2015-11-02T09:11:12.000Z | datawinners/accountmanagement/admin.py | ICT4H/dcs-web | fb0f53fad4401cfac1c1789ff28b9d5bda40c975 | [
"Apache-2.0"
] | null | null | null | datawinners/accountmanagement/admin.py | ICT4H/dcs-web | fb0f53fad4401cfac1c1789ff28b9d5bda40c975 | [
"Apache-2.0"
] | null | null | null | # vim: ai ts=4 sts=4 et sw=4 encoding=utf-8
import datetime
import logging
from django.contrib.auth.admin import UserAdmin
from django.contrib.auth.forms import UserChangeForm
from django.core.exceptions import ValidationError
from django.forms import CharField
from django.utils.translation import ugettext_lazy as _
from django.contrib import admin
from django.contrib.auth.models import User, Group
from django_digest.models import PartialDigest
from django.contrib import messages
from django.utils.safestring import mark_safe
from django.contrib.admin.views.main import ChangeList
from datawinners.common.admin.utils import get_text_search_filter, get_admin_panel_filter
from datawinners.project.submission.export import create_excel_response
from datawinners.search.index_utils import get_elasticsearch_handle
from forms import forms
from datawinners.accountmanagement.models import OrganizationSetting, SMSC, PaymentDetails, MessageTracker, Organization, NGOUserProfile, OutgoingNumberSetting
from mangrove.form_model.field import ExcelDate
from mangrove.utils.types import is_empty, is_not_empty
from datawinners.countrytotrialnumbermapping.models import Country, Network
from datawinners.utils import get_database_manager_for_org
from datawinners.feeds.database import feeds_db_for
from django.db.models import Q
admin.site.disable_action('delete_selected')
def _remove_default_name_fields():
user_display_fields = list(UserAdmin.list_display)
user_display_fields.remove('first_name')
user_display_fields.remove('last_name')
return tuple(user_display_fields)
def export_user_list_to_excel(a,b,c):
#Custom Method to export user details.
list = []
for ngo_user in NGOUserProfile.objects.all():
try:
user = User.objects.get(id=ngo_user.user_id)
if is_required(user) and not user.is_superuser:
details = []
details.append(user.first_name + ' ' + user.last_name)
details.append(user.username)
org_id = ngo_user.org_id
organization = Organization.objects.get(org_id = org_id)
details.append(organization.name)
details.append(organization.status)
details.append(organization.language)
details.append(user_role(user))
list.append(details)
except Exception:
continue
headers = ['Name', 'email', 'Organization Name', 'Status', 'Account language','User Role']
response = create_excel_response(headers,list,'user_list')
return response
admin.site.unregister(Group)
admin.site.unregister(User)
admin.site.register(OrganizationSetting, OrganizationSettingAdmin)
admin.site.register(OutgoingNumberSetting, admin.ModelAdmin)
admin.site.register(SMSC, admin.ModelAdmin)
admin.site.register(MessageTracker, MessageTrackerAdmin)
admin.site.register(Organization, OrganizationAdmin)
admin.site.register(Country, CountryAdmin)
admin.site.register(Network, NetworkAdmin)
admin.site.register(User, DWUserAdmin)
| 44.194595 | 218 | 0.677471 |
cddabebcaa2a91087a2f9f94dcd4f545a6f38cff | 283 | py | Python | tests/conftest.py | bayashi-cl/expander | b3623b656a71801233797e05781295a6101fefd8 | [
"CC0-1.0"
] | null | null | null | tests/conftest.py | bayashi-cl/expander | b3623b656a71801233797e05781295a6101fefd8 | [
"CC0-1.0"
] | 1 | 2022-03-12T20:41:21.000Z | 2022-03-13T06:34:30.000Z | tests/conftest.py | bayashi-cl/expander | b3623b656a71801233797e05781295a6101fefd8 | [
"CC0-1.0"
] | null | null | null | import pytest
from unittest import mock
import os
import pathlib
| 23.583333 | 72 | 0.713781 |
cddacc9ad1d4172e5208503da82fc4edfb83363e | 1,870 | py | Python | init/global_eq_FCtest/setup_fns.py | mattzett/GEMINI-examples | 9932cee67e88898bd00c34bab7ac0568e92e40ca | [
"Apache-2.0"
] | 1 | 2020-03-20T22:19:05.000Z | 2020-03-20T22:19:05.000Z | init/global_eq_FCtest/setup_fns.py | mattzett/GEMINI-examples | 9932cee67e88898bd00c34bab7ac0568e92e40ca | [
"Apache-2.0"
] | 3 | 2020-02-14T14:36:27.000Z | 2020-04-03T21:06:27.000Z | init/global_eq_FCtest/setup_fns.py | mattzett/GEMINI-examples | 9932cee67e88898bd00c34bab7ac0568e92e40ca | [
"Apache-2.0"
] | null | null | null | # This is mostly a repeat of model.setup from the pygemini repository except for that it setups up a periodic
# grid for us in full-globe simulations.
from __future__ import annotations
import argparse
from pathlib import Path
import typing as T
import shutil
import os
from gemini3d.config import read_nml
import gemini3d.model
def model_setup(path: Path | dict[str, T.Any], out_dir: Path, gemini_root: Path = None):
"""
top-level function to create a new simulation FROM A FILE config.nml
Parameters
----------
path: pathlib.Path
path (directory or full path) to config.nml
out_dir: pathlib.Path
directory to write simulation artifacts to
"""
# %% read config.nml
if isinstance(path, dict):
cfg = path
elif isinstance(path, (str, Path)):
cfg = read_nml(path)
else:
raise TypeError("expected Path to config.nml or dict with parameters")
if not cfg:
raise FileNotFoundError(f"no configuration found for {out_dir}")
cfg["dphi"]=90.0
cfg["out_dir"] = Path(out_dir).expanduser().resolve()
if gemini_root:
cfg["gemini_root"] = Path(gemini_root).expanduser().resolve(strict=True)
for k in {"indat_size", "indat_grid", "indat_file"}:
cfg[k] = cfg["out_dir"] / cfg[k]
# FIXME: should use is_absolute() ?
for k in {"eq_dir", "eq_archive", "E0dir", "precdir"}:
if cfg.get(k):
cfg[k] = (cfg["out_dir"] / cfg[k]).resolve()
# %% copy input config.nml to output dir
input_dir = cfg["out_dir"] / "inputs"
input_dir.mkdir(parents=True, exist_ok=True)
shutil.copy2(cfg["nml"], input_dir)
os.environ["GEMINI_ROOT"]="~/libs/bin/"
# %% is this equilibrium or interpolated simulation
if "eq_dir" in cfg:
gemini3d.model.interp(cfg)
else:
gemini3d.model.equilibrium(cfg)
| 29.68254 | 109 | 0.652941 |
cddc0485c396754b68315d1f0f82db760ff25dc5 | 2,580 | py | Python | floodfill_pathfinding.py | mnursey/Battlesnake-2021 | 884b9cf1b40c9b03cc49bd1594135e7caf41ee82 | [
"MIT"
] | null | null | null | floodfill_pathfinding.py | mnursey/Battlesnake-2021 | 884b9cf1b40c9b03cc49bd1594135e7caf41ee82 | [
"MIT"
] | null | null | null | floodfill_pathfinding.py | mnursey/Battlesnake-2021 | 884b9cf1b40c9b03cc49bd1594135e7caf41ee82 | [
"MIT"
] | null | null | null | import board
| 27.446809 | 117 | 0.445736 |
cddc0ce80665ce382edeabc67713697083130041 | 3,736 | py | Python | Gobot-Omni/robot.py | FRC1076/2019-Parade | 3824449ed10e33b401efb646fd2e6470c3941c8b | [
"MIT"
] | null | null | null | Gobot-Omni/robot.py | FRC1076/2019-Parade | 3824449ed10e33b401efb646fd2e6470c3941c8b | [
"MIT"
] | 2 | 2019-06-17T23:38:23.000Z | 2019-06-17T23:39:43.000Z | Gobot-Omni/robot.py | FRC1076/2019-Parade | 3824449ed10e33b401efb646fd2e6470c3941c8b | [
"MIT"
] | null | null | null | import wpilib
import ctre
from wpilib.drive import DifferentialDrive
from wpilib.interfaces import GenericHID
#MOTOR PORTS
LEFT = 1
RIGHT = 3
CENTER1 = 2
CENTER2 = 4
#BALL MANIPULATOR
BALL_MANIP_ID = 5
GATHER_SPEED = 1.0
SPIT_SPEED = -1.0
STOP_SPEED = 0.0
LEFT_HAND = GenericHID.Hand.kLeft
RIGHT_HAND = GenericHID.Hand.kRight
def deadzone(val, deadzone):
if abs(val) < deadzone:
return 0
elif val < (0):
x = ((abs(val) - deadzone)/(1-deadzone))
return (-x)
else:
x = ((val - deadzone)/(1-deadzone))
return (x)
if __name__ == "__main__":
wpilib.run(MyRobot) | 27.470588 | 81 | 0.646413 |
cddcaaf10bf47f30133fae7ab0e9db139ac2e1cc | 1,789 | py | Python | src/tests/test_decorators.py | elifesciences/builder | 161829686f777f7ac7f97bd970395886ba5089c1 | [
"MIT"
] | 11 | 2017-03-01T18:00:30.000Z | 2021-12-10T05:11:02.000Z | src/tests/test_decorators.py | elifesciences/builder | 161829686f777f7ac7f97bd970395886ba5089c1 | [
"MIT"
] | 397 | 2016-07-08T14:39:46.000Z | 2022-03-30T12:45:09.000Z | src/tests/test_decorators.py | elifesciences/builder | 161829686f777f7ac7f97bd970395886ba5089c1 | [
"MIT"
] | 14 | 2016-07-13T08:33:28.000Z | 2020-04-22T21:42:21.000Z | from . import base
from mock import patch
import decorators
| 38.06383 | 97 | 0.653438 |
cddea9a721eee8e3cc13555afb08ee013159480b | 2,158 | py | Python | integration/emulator/device.py | cvlabmiet/master-programming-example | 8a4a231ba2b72a93ae14da2c04e17b2ae3fc6651 | [
"MIT"
] | null | null | null | integration/emulator/device.py | cvlabmiet/master-programming-example | 8a4a231ba2b72a93ae14da2c04e17b2ae3fc6651 | [
"MIT"
] | null | null | null | integration/emulator/device.py | cvlabmiet/master-programming-example | 8a4a231ba2b72a93ae14da2c04e17b2ae3fc6651 | [
"MIT"
] | null | null | null | import re, operator, array
from collections import namedtuple
| 33.2 | 123 | 0.561631 |
cde0f842eb62a19de3f38d4d8d1f8ff65a2ce325 | 10,538 | py | Python | Ramsey_RF_generator.py | bopopescu/Lauecollect | 60ae2b05ea8596ba0decf426e37aeaca0bc8b6be | [
"MIT"
] | null | null | null | Ramsey_RF_generator.py | bopopescu/Lauecollect | 60ae2b05ea8596ba0decf426e37aeaca0bc8b6be | [
"MIT"
] | 1 | 2019-10-22T21:28:31.000Z | 2019-10-22T21:39:12.000Z | Ramsey_RF_generator.py | bopopescu/Lauecollect | 60ae2b05ea8596ba0decf426e37aeaca0bc8b6be | [
"MIT"
] | 2 | 2019-06-06T15:06:46.000Z | 2020-07-20T02:03:22.000Z | """
Ramsay RSG1000B RF Signal Generator, controlled via RS-323 interface
See: Ramsay RSG1000B RF Signal Generator User Guide, p.10-11
Settings: 9600 baud, 8 bits, parity none, stop bits 1, flow control none
DB09 connector pin 2 = TxD, 3 = RxD, 5 = Ground
The controller accepts unterminate ASCII text commands and generates newline-
terminated ASCII text replies.
Commands:
{255 - Initiate communication by addressing device number 255 (default device
number). Reply "\r\n". (Before that command all command with be ignored.)
GO - Get " RF ON\r\n" or " RF OFF\r\n"
O - Toggle RF on/off, reply: " "
Cabling:
"Pico8" iMac -> Prolific USB-SErial 2303 cable -> DB--9 female gender changer ->
Ramsay RSG1000B RF Signal Generator, DB-9 male serial port
Authors: Friedrich Schotte
Date created: 2018-01-22
Date last modified: 2018-01-23
"""
from logging import error,warn,info,debug
__version__ = "1.0"
def __query__(self,command,count=None):
"""Send a command to the controller and return the reply"""
from time import time
from sleep import sleep
sleep(self.last_reply_time + self.wait_time - time())
self.write(command)
reply = self.read(count=count)
self.last_reply_time = time()
return reply
def write(self,command):
"""Send a command to the controller"""
if self.port is not None:
self.port.write(command)
debug("%s: Sent %r" % (self.port.name,command))
def read(self,count=None,port=None):
"""Read a reply from the controller,
count: from non-terminated replies: number of bytes expected
If count is None, a newline or carriage return is expected to
terminate the reply"""
##debug("read count=%r,port=%r" % (count,port))
if port is None: port = self.port
if port is not None:
port.timeout = self.timeout
if count:
#print("in wait:" + str(self.port.inWaiting()))
debug("Trying to read %r bytes from %s..." % (count,port.name))
reply = port.read(count)
else:
debug("Expecting newline terminated reply from %s..." % (port.name))
reply = port.readline()
debug("%s: Read %r" % (port.name,reply))
else: reply = ""
return reply
def init_communications(self):
"""To do before communncating with the controller"""
from os.path import exists
from serial import Serial
if self.port is not None:
try:
info("Checking whether device is still responsive...")
self.port.write(self.id_query)
debug("%s: Sent %r" % (self.port.name,self.id_query))
reply = self.read(count=self.id_reply_length)
if not self.id_reply_valid(reply):
debug("%s: %r: invalid reply %r" % (self.port.name,self.id_query,reply))
info("%s: lost connection" % self.port.name)
self.port = None
else: info("Device is still responsive.")
except Exception,msg:
debug("%s: %s" % (Exception,msg))
self.port = None
if self.port is None:
port_basenames = ["COM"] if not exists("/dev") \
else ["/dev/tty.usbserial","/dev/ttyUSB"]
for i in range(-1,50):
for port_basename in port_basenames:
port_name = port_basename+("%d" % i if i>=0 else "")
##debug("Trying port %s..." % port_name)
try:
port = Serial(port_name,baudrate=self.baudrate)
port.write(self.id_query)
debug("%s: Sent %r" % (port.name,self.id_query))
reply = self.read(count=self.id_reply_length,port=port)
if self.id_reply_valid(reply):
self.port = port
info("Discovered device at %s based on reply %r" % (self.port.name,reply))
break
except Exception,msg: debug("%s: %s" % (Exception,msg))
if self.port is not None: break
def get_RF_on(self):
"""Is radiofrequency output enabled?"""
debug("Reading radiofrequency output state")
reply = self.query("GO") # ' RF OFF\r\n'
value = "RF ON" in reply
if not "RF " in reply:
warn("Reading radiofrequency output state unreadable")
from numpy import nan
value = nan
return value
RF_on = property(get_RF_on,set_RF_on)
VAL = RF_on
Ramsey_RF_driver = RamseyRFDriver()
Ramsey_RF_IOC = RamseyRF_IOC()
def run_IOC():
"""Serve the Ensemble IPAQ up on the network as EPICS IOC"""
import logging
from tempfile import gettempdir
logfile = gettempdir()+"/Ramsey_RF.log"
logging.basicConfig(level=logging.DEBUG,
format="%(asctime)s %(levelname)s: %(message)s",
filename=logfile,
)
Ramsey_RF_IOC.run()
def alias(name):
"""Make property given by name be known under a different name"""
return property(get,set)
from EPICS_motor import EPICS_motor
Ramsey_RF_generator = RamseyRF(prefix="NIH:RF",name="Ramsey_RF")
def binstr(n):
"""binary number representation of n"""
s = ""
for i in range(31,-1,-1):
if (n >> i) & 1: s += "1"
elif s != "": s += "0"
return s
if __name__ == "__main__": # for testing
from sys import argv
if "run_IOC" in argv: run_IOC()
from pdb import pm
import logging
logging.basicConfig(level=logging.DEBUG,
format="%(asctime)s %(levelname)s: %(message)s")
self = Ramsey_RF_driver # for debugging
print('Ramsey_RF_driver.init_communications()')
print("Ramsey_RF_driver.port_name")
print("Ramsey_RF_driver.RF_on")
print("Ramsey_RF_IOC.run()")
print("run_IOC()")
| 36.089041 | 101 | 0.584836 |
cde603f7c8844ec9a35ff25bb6a1c13b5f4bbf79 | 819 | py | Python | modules/file_helper.py | dada00321/ntust_moodle_resource_crawler | cc5d424ab9440d8e67bb072977fc58740d8bc968 | [
"MIT"
] | null | null | null | modules/file_helper.py | dada00321/ntust_moodle_resource_crawler | cc5d424ab9440d8e67bb072977fc58740d8bc968 | [
"MIT"
] | null | null | null | modules/file_helper.py | dada00321/ntust_moodle_resource_crawler | cc5d424ab9440d8e67bb072977fc58740d8bc968 | [
"MIT"
] | null | null | null | import json
def save_json(dict_, json_filepath):
''' [Example] Write JSON '''
'''
dict_ = {"0":{"title":"test-A", "is-available": False, "link":"https://www.AAA.XXX..."},
"1":{"title":"test-B", "is-available": True, "link":"https://www.BBB.XXX..."}}
with open("dict_.txt", 'w') as output_file:
json.dump(dict_, output_file)
'''
with open(json_filepath, 'w') as output_file:
json.dump(dict_, output_file)
def load_json(json_filepath):
''' [Example] Read JSON '''
'''
with open("dict_.txt", 'r') as json_file:
dict_ = json.load(json_file)
print(dict_)
print(type(dict_))
'''
with open(json_filepath, 'r') as json_file:
dict_ = json.load(json_file)
#print(dict_)
#print(type(dict_))
return dict_ | 31.5 | 92 | 0.570208 |
cde6ca9c0b5b99aea51fe8a0efe3ed98163008e0 | 17,570 | py | Python | win/pywinauto/findbestmatch.py | sk8darr/BrowserRefresh-Sublime | daee0eda6480c07f8636ed24e5c555d24e088886 | [
"MIT",
"Unlicense"
] | 191 | 2015-01-02T12:17:07.000Z | 2021-05-26T09:26:05.000Z | win/pywinauto/findbestmatch.py | sk8darr/BrowserRefresh-Sublime | daee0eda6480c07f8636ed24e5c555d24e088886 | [
"MIT",
"Unlicense"
] | 48 | 2015-01-14T00:57:36.000Z | 2021-04-06T21:45:42.000Z | win/pywinauto/findbestmatch.py | sk8darr/BrowserRefresh-Sublime | daee0eda6480c07f8636ed24e5c555d24e088886 | [
"MIT",
"Unlicense"
] | 36 | 2015-01-14T18:54:25.000Z | 2021-07-18T10:54:42.000Z | # GUI Application automation and testing library
# Copyright (C) 2006 Mark Mc Mahon
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public License
# as published by the Free Software Foundation; either version 2.1
# of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc.,
# 59 Temple Place,
# Suite 330,
# Boston, MA 02111-1307 USA
"Module to find the closest match of a string in a list"
__revision__ = "$Revision: 679 $"
import re
import difflib
from . import fuzzydict
#import ctypes
#import ldistance
#levenshtein_distance = ctypes.cdll.levenshtein.levenshtein_distance
#levenshtein_distance = ldistance.distance
# need to use sets.Set for python 2.3 compatability
# but 2.6 raises a deprecation warning about sets module
try:
set
except NameError:
import sets
set = sets.Set
find_best_control_match_cutoff = .6
#====================================================================
_cache = {}
# given a list of texts return the match score for each
# and the best score and text with best score
#====================================================================
def _get_match_ratios(texts, match_against):
"Get the match ratio of how each item in texts compared to match_against"
# now time to figre out the matching
ratio_calc = difflib.SequenceMatcher()
ratio_calc.set_seq1(match_against)
ratios = {}
best_ratio = 0
best_text = ''
global cache
for text in texts:
if 0:
pass
if (text, match_against) in _cache:
ratios[text] = _cache[(text, match_against)]
elif(match_against, text) in _cache:
ratios[text] = _cache[(match_against, text)]
else:
# set up the SequenceMatcher with other text
ratio_calc.set_seq2(text)
# try using the levenshtein distance instead
#lev_dist = levenshtein_distance(unicode(match_against), unicode(text))
#ratio = 1 - lev_dist / 10.0
#ratios[text] = ratio
# calculate ratio and store it
ratios[text] = ratio_calc.ratio()
_cache[(match_against, text)] = ratios[text]
# if this is the best so far then update best stats
if ratios[text] > best_ratio:
best_ratio = ratios[text]
best_text = text
return ratios, best_ratio, best_text
#====================================================================
def find_best_match(search_text, item_texts, items, limit_ratio = .5):
"""Return the item that best matches the search_text
* **search_text** The text to search for
* **item_texts** The list of texts to search through
* **items** The list of items corresponding (1 to 1)
to the list of texts to search through.
* **limit_ratio** How well the text has to match the best match.
If the best match matches lower then this then it is not
considered a match and a MatchError is raised, (default = .5)
"""
search_text = _cut_at_tab(search_text)
text_item_map = UniqueDict()
# Clean each item, make it unique and map to
# to the item index
for text, item in zip(item_texts, items):
text_item_map[_cut_at_tab(text)] = item
ratios, best_ratio, best_text = \
_get_match_ratios(list(text_item_map.keys()), search_text)
if best_ratio < limit_ratio:
raise MatchError(items = list(text_item_map.keys()), tofind = search_text)
return text_item_map[best_text]
#====================================================================
_after_tab = re.compile(r"\t.*", re.UNICODE)
_non_word_chars = re.compile(r"\W", re.UNICODE)
def _cut_at_tab(text):
"Clean out non characters from the string and return it"
# remove anything after the first tab
return _after_tab.sub("", text)
def _clean_non_chars(text):
"Remove non word characters"
# should this also remove everything after the first tab?
# remove non alphanumeric characters
return _non_word_chars.sub("", text)
def IsAboveOrToLeft(ref_control, other_ctrl):
"Return true if the other_ctrl is above or to the left of ref_control"
text_r = other_ctrl.Rectangle()
ctrl_r = ref_control.Rectangle()
# skip controls where text win is to the right of ctrl
if text_r.left >= ctrl_r.right:
return False
# skip controls where text win is below ctrl
if text_r.top >= ctrl_r.bottom:
return False
# text control top left corner is below control
# top left corner - so not to the above or left :)
if text_r.top >= ctrl_r.top and text_r.left >= ctrl_r.left:
return False
return True
#====================================================================
distance_cuttoff = 999
def GetNonTextControlName(ctrl, controls):
"""return the name for this control by finding the closest
text control above and to its left"""
names = []
ctrl_index = controls.index(ctrl)
if ctrl_index != 0:
prev_ctrl = controls[ctrl_index-1]
if prev_ctrl.FriendlyClassName() == "Static" and \
prev_ctrl.IsVisible() and prev_ctrl.WindowText() and \
IsAboveOrToLeft(ctrl, prev_ctrl):
names.append(
prev_ctrl.WindowText() +
ctrl.FriendlyClassName())
# get the visible text controls so that we can get
# the closest text if the control has no text
text_ctrls = [ctrl_ for ctrl_ in controls
if ctrl_.IsVisible() and ctrl_.WindowText() and ctrl_.can_be_label]
best_name = ''
closest = distance_cuttoff
# now for each of the visible text controls
for text_ctrl in text_ctrls:
# get aliases to the control rectangles
text_r = text_ctrl.Rectangle()
ctrl_r = ctrl.Rectangle()
# skip controls where text win is to the right of ctrl
if text_r.left >= ctrl_r.right:
continue
# skip controls where text win is below ctrl
if text_r.top >= ctrl_r.bottom:
continue
# calculate the distance between the controls
# at first I just calculated the distance from the top let
# corner of one control to the top left corner of the other control
# but this was not best, so as a text control should either be above
# or to the left of the control I get the distance between
# the top left of the non text control against the
# Top-Right of the text control (text control to the left)
# Bottom-Left of the text control (text control above)
# then I get the min of these two
# We do not actually need to calculate the difference here as we
# only need a comparative number. As long as we find the closest one
# the actual distance is not all that important to us.
# this reduced the unit tests run on my by about 1 second
# (from 61 ->60 s)
# (x^2 + y^2)^.5
#distance = (
# (text_r.left - ctrl_r.left) ** 2 + # (x^2 + y^2)
# (text_r.bottom - ctrl_r.top) ** 2) \
# ** .5 # ^.5
#distance2 = (
# (text_r.right - ctrl_r.left) ** 2 + # (x^2 + y^2)
# (text_r.top - ctrl_r.top) ** 2) \
# ** .5 # ^.5
distance = abs(text_r.left - ctrl_r.left) + abs(text_r.bottom - ctrl_r.top)
distance2 = abs(text_r.right - ctrl_r.left) + abs(text_r.top - ctrl_r.top)
distance = min(distance, distance2)
# if this distance was closer then the last one
if distance < closest:
closest = distance
best_name = text_ctrl.WindowText() + ctrl.FriendlyClassName()
names.append(best_name)
return names
#====================================================================
def get_control_names(control, allcontrols):
"Returns a list of names for this control"
names = []
# if it has a reference control - then use that
#if hasattr(control, 'ref') and control.ref:
# control = control.ref
# Add the control based on it's friendly class name
names.append(control.FriendlyClassName())
# if it has some character text then add it base on that
# and based on that with friendly class name appended
cleaned = control.WindowText()
# Todo - I don't like the hardcoded classnames here!
if cleaned and control.has_title:
names.append(cleaned)
names.append(cleaned + control.FriendlyClassName())
# it didn't have visible text
else:
# so find the text of the nearest text visible control
non_text_names = GetNonTextControlName(control, allcontrols)
# and if one was found - add it
if non_text_names:
names.extend(non_text_names)
# return the names - and make sure there are no duplicates
return set(names)
#====================================================================
#====================================================================
def build_unique_dict(controls):
"""Build the disambiguated list of controls
Separated out to a different function so that we can get
the control identifiers for printing.
"""
name_control_map = UniqueDict()
# collect all the possible names for all controls
# and build a list of them
for ctrl in controls:
ctrl_names = get_control_names(ctrl, controls)
# for each of the names
for name in ctrl_names:
name_control_map[name] = ctrl
return name_control_map
#====================================================================
def find_best_control_matches(search_text, controls):
"""Returns the control that is the the best match to search_text
This is slightly differnt from find_best_match in that it builds
up the list of text items to search through using information
from each control. So for example for there is an OK, Button
then the following are all added to the search list:
"OK", "Button", "OKButton"
But if there is a ListView (which do not have visible 'text')
then it will just add "ListView".
"""
name_control_map = build_unique_dict(controls)
# # collect all the possible names for all controls
# # and build a list of them
# for ctrl in controls:
# ctrl_names = get_control_names(ctrl, controls)
#
# # for each of the names
# for name in ctrl_names:
# name_control_map[name] = ctrl
search_text = str(search_text)
best_ratio, best_texts = name_control_map.FindBestMatches(search_text)
best_ratio_ci, best_texts_ci = \
name_control_map.FindBestMatches(search_text, ignore_case = True)
best_ratio_clean, best_texts_clean = \
name_control_map.FindBestMatches(search_text, clean = True)
best_ratio_clean_ci, best_texts_clean_ci = \
name_control_map.FindBestMatches(
search_text, clean = True, ignore_case = True)
if best_ratio_ci > best_ratio:
best_ratio = best_ratio_ci
best_texts = best_texts_ci
if best_ratio_clean > best_ratio:
best_ratio = best_ratio_clean
best_texts = best_texts_clean
if best_ratio_clean_ci > best_ratio:
best_ratio = best_ratio_clean_ci
best_texts = best_texts_clean_ci
if best_ratio < find_best_control_match_cutoff:
raise MatchError(items = list(name_control_map.keys()), tofind = search_text)
return [name_control_map[best_text] for best_text in best_texts]
#
#def GetControlMatchRatio(text, ctrl):
# # get the texts for the control
# ctrl_names = get_control_names(ctrl)
#
# #get the best match for these
# matcher = UniqueDict()
# for name in ctrl_names:
# matcher[name] = ctrl
#
# best_ratio, unused = matcher.FindBestMatches(text)
#
# return best_ratio
#
#
#
#def get_controls_ratios(search_text, controls):
# name_control_map = UniqueDict()
#
# # collect all the possible names for all controls
# # and build a list of them
# for ctrl in controls:
# ctrl_names = get_control_names(ctrl)
#
# # for each of the names
# for name in ctrl_names:
# name_control_map[name] = ctrl
#
# match_ratios, best_ratio, best_text = \
# _get_match_ratios(name_control_map.keys(), search_text)
#
# return match_ratios, best_ratio, best_text,
| 32.657993 | 86 | 0.594878 |
cde9443d5f9dce44149feca0d10e665a2fbcf090 | 1,074 | py | Python | setup.py | boichee/fabricator | 33ad4fa615c153817b014d6b7fe9807f1752db25 | [
"MIT"
] | 11 | 2018-07-09T07:08:16.000Z | 2018-07-13T14:05:46.000Z | setup.py | boichee/fabricator | 33ad4fa615c153817b014d6b7fe9807f1752db25 | [
"MIT"
] | 3 | 2020-03-24T17:37:47.000Z | 2021-02-02T22:18:59.000Z | setup.py | boichee/fabricator | 33ad4fa615c153817b014d6b7fe9807f1752db25 | [
"MIT"
] | null | null | null | from setuptools import setup, find_packages
exclude_dirs = ['ez_setup', 'examples', 'tests', 'venv']
# Runtime requirements
reqs = [
'requests',
'six',
'future',
'aenum'
]
# Requirements for testing
test_reqs = ['pytest', 'hypothesis', 'requests_mock']
# Requirements for setup
setup_reqs = ['flake8', 'pep8', 'pytest-runner']
setup(
name='fabricate-it',
version='1.1.0',
author='Brett Levenson',
author_email='blevenson@apple.com',
description='A library that makes creating API clients simple and declarative',
url='https://github.com/boichee/fabricator',
packages=find_packages(exclude=exclude_dirs),
install_requires=reqs,
tests_require=test_reqs,
setup_requires=setup_reqs,
classifiers=[
'Development Status :: 4 - Beta',
'Operating System :: MacOS :: MacOS X',
'Operating System :: POSIX',
'Topic :: Software Development',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.6',
'Intended Audience :: Developers'
]
)
| 26.85 | 83 | 0.650838 |
cde9a03010ce87292ba1da645b7d397d96cc724e | 115 | py | Python | aitoolbox/cloud/__init__.py | mv1388/AIToolbox | c64ac4810a02d230ce471d86b758e82ea232a7e7 | [
"MIT"
] | 3 | 2019-10-12T12:24:09.000Z | 2020-08-02T02:42:43.000Z | aitoolbox/cloud/__init__.py | mv1388/aitoolbox | 1060435e6cbdfd19abcb726c4080b663536b7467 | [
"MIT"
] | 3 | 2020-04-10T14:07:07.000Z | 2020-04-22T19:04:38.000Z | aitoolbox/cloud/__init__.py | mv1388/aitoolbox | 1060435e6cbdfd19abcb726c4080b663536b7467 | [
"MIT"
] | null | null | null | s3_available_options = ['s3', 'aws_s3', 'aws']
gcs_available_options = ['gcs', 'google_storage', 'google storage']
| 38.333333 | 67 | 0.713043 |
cde9dfcf27b3e92945a09440ebd5cd1eb09e8452 | 12,607 | py | Python | src/gan/ccgan/ccGAN.py | matkir/Master_programs | 70c4c399f9c9fc3e1643e78694223b24d7b94b18 | [
"MIT"
] | null | null | null | src/gan/ccgan/ccGAN.py | matkir/Master_programs | 70c4c399f9c9fc3e1643e78694223b24d7b94b18 | [
"MIT"
] | null | null | null | src/gan/ccgan/ccGAN.py | matkir/Master_programs | 70c4c399f9c9fc3e1643e78694223b24d7b94b18 | [
"MIT"
] | null | null | null | from __future__ import print_function, division
if __name__=='__main__':
from cc_weights import Weight_model
else:
from . import Weight_model
from keras.models import load_model
import keras.backend as K
import plotload
import sys
from selector import Selector
#from masker import mask_from_template,mask_randomly_square,mask_green_corner,combine_imgs_with_mask
import masker as ms
import matplotlib.pyplot as plt
import numpy as np
from tqdm import tqdm
import cutter
import masker
if __name__ == '__main__':
cc = CCgan(256,256)
#cc.build_model()
#cc.train_model()
cc.load_model()
#cc.load_model_weights()
w=cc.build_wrapper()
root='/home/mathias/Documents/kvasir-dataset-v2/med/'
cc.sort_folder(w,path=root)
cc.sort_folder(w,path='/media/mathias/A_New_Hope/medico_test/')
| 41.199346 | 142 | 0.546601 |
cdea49e6abeeb4e6ceb631ab1583ede7c457b5ed | 6,585 | py | Python | synapse/server.py | uroborus/synapse | 270825ab2a3e16bb8ffcdbcea058efd28a38e8e1 | [
"Apache-2.0"
] | 1 | 2021-09-09T08:50:20.000Z | 2021-09-09T08:50:20.000Z | synapse/server.py | uroborus/synapse | 270825ab2a3e16bb8ffcdbcea058efd28a38e8e1 | [
"Apache-2.0"
] | null | null | null | synapse/server.py | uroborus/synapse | 270825ab2a3e16bb8ffcdbcea058efd28a38e8e1 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2014 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This file provides some classes for setting up (partially-populated)
# homeservers; either as a full homeserver as a real application, or a small
# partial one for unit test mocking.
# Imports required for the default HomeServer() implementation
from synapse.federation import initialize_http_replication
from synapse.api.events import serialize_event
from synapse.api.events.factory import EventFactory
from synapse.notifier import Notifier
from synapse.api.auth import Auth
from synapse.handlers import Handlers
from synapse.rest import RestServletFactory
from synapse.state import StateHandler
from synapse.storage import DataStore
from synapse.types import UserID, RoomAlias, RoomID
from synapse.util import Clock
from synapse.util.distributor import Distributor
from synapse.util.lockutils import LockManager
from synapse.streams.events import EventSources
from synapse.api.ratelimiting import Ratelimiter
def parse_roomalias(self, s):
"""Parse the string given by 's' as a Room Alias and return a RoomAlias
object."""
return RoomAlias.from_string(s, hs=self)
def parse_roomid(self, s):
"""Parse the string given by 's' as a Room ID and return a RoomID
object."""
return RoomID.from_string(s, hs=self)
# Build magic accessors for every dependency
for depname in BaseHomeServer.DEPENDENCIES:
BaseHomeServer._make_dependency_method(depname)
| 31.658654 | 79 | 0.665907 |
cdea57f865285710bac46af78cc530224ae5efeb | 359 | py | Python | pythonforandroid/recipes/kivy/__init__.py | inclement/p4a-experiment | 4e120e08cc3c33af89948307628c8b28fdf76b87 | [
"MIT"
] | 1 | 2015-06-09T21:12:09.000Z | 2015-06-09T21:12:09.000Z | pythonforandroid/recipes/kivy/__init__.py | inclement/p4a-experiment | 4e120e08cc3c33af89948307628c8b28fdf76b87 | [
"MIT"
] | null | null | null | pythonforandroid/recipes/kivy/__init__.py | inclement/p4a-experiment | 4e120e08cc3c33af89948307628c8b28fdf76b87 | [
"MIT"
] | null | null | null |
from pythonforandroid.toolchain import CythonRecipe, shprint, current_directory, ArchAndroid
from os.path import exists, join
import sh
import glob
recipe = KivyRecipe()
| 21.117647 | 92 | 0.721448 |
cdeaa27ba25e454daf95595f163fae1a13887999 | 1,220 | py | Python | chat.py | Programmer-RD-AI/Learning-NLP-PyTorch | 5780598340308995c0b8436d3031aa58ee7b81da | [
"Apache-2.0"
] | null | null | null | chat.py | Programmer-RD-AI/Learning-NLP-PyTorch | 5780598340308995c0b8436d3031aa58ee7b81da | [
"Apache-2.0"
] | null | null | null | chat.py | Programmer-RD-AI/Learning-NLP-PyTorch | 5780598340308995c0b8436d3031aa58ee7b81da | [
"Apache-2.0"
] | null | null | null | import random
import json
import torch
from model import NeuralNet
from nltk_utils import *
device = "cuda"
with open('intents.json','r') as f:
intents = json.load(f)
FILE = 'data.pth'
data = torch.load(FILE)
input_size = data['input_size']
output_size = data['output_size']
hidden_size = data['hidden_size']
all_words = data['all_words']
tags = data['tags']
model_state = data['model_state']
model = NeuralNet(input_size, hidden_size, output_size).to(device)
model.load_state_dict(model_state)
model.eval()
bot_name = 'Programmer-RD-AI'
print('Lets chat ! type "quit" to exit')
while True:
sentence = input('You : ')
if sentence == 'quit':
break
sentence = tokenize(sentence)
X = bag_of_words(sentence,all_words)
X = X.reshape(1,X.shape[0])
X = torch.from_numpy(X).to(device)
pred = model(X)
pred_ = pred.clone()
_,pred = torch.max(pred,dim=1)
tag = tags[pred.item()]
probs = torch.softmax(pred_,dim=1)
prob = probs[0][pred.item()]
if prob.item() > 0.75:
for intent in intents['intents']:
if tag == intent['tag']:
print(f'{bot_name}: {random.choice(intent["responses"])}')
else:
print(f'{bot_name}: IDK..')
| 29.047619 | 74 | 0.648361 |
cdeb4f70b3a53f9cb3413cfdc25708394eec070f | 329 | py | Python | Jay_Redis/Jay_Redis/utils/GetProxyIp.py | kimsax/gratuation_project | 99f842753f3403d3fdcd62316cf6a8d0091a5871 | [
"MIT"
] | 1 | 2021-03-02T08:04:25.000Z | 2021-03-02T08:04:25.000Z | Jay_Redis/Jay_Redis/utils/GetProxyIp.py | kimsax/gratuation_project | 99f842753f3403d3fdcd62316cf6a8d0091a5871 | [
"MIT"
] | null | null | null | Jay_Redis/Jay_Redis/utils/GetProxyIp.py | kimsax/gratuation_project | 99f842753f3403d3fdcd62316cf6a8d0091a5871 | [
"MIT"
] | null | null | null | # -*- utf-8 -*-
import random
import redis
import requests
# GetIps()
| 20.5625 | 60 | 0.580547 |
cdeb9ce72d1bf949c4fdc2a94b43168b90c61e61 | 876 | py | Python | setup.py | tbz-pariv/ftpservercontext | 426e98dd4983cc6977c4d071a831874726c0fae2 | [
"Apache-2.0"
] | 2 | 2019-04-18T12:28:58.000Z | 2021-01-04T14:52:07.000Z | setup.py | tbz-pariv/ftpservercontext | 426e98dd4983cc6977c4d071a831874726c0fae2 | [
"Apache-2.0"
] | 1 | 2019-01-04T14:46:00.000Z | 2019-06-04T12:31:45.000Z | setup.py | tbz-pariv/ftpservercontext | 426e98dd4983cc6977c4d071a831874726c0fae2 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
import setuptools
setuptools.setup(
name='ftpservercontext',
version='2018.3.0',
license='commercial',
author='Thomas Guettler',
author_email='guettliml.ftpservercontext@thomas-guettler.de',
url='https://github.com/tbz-pariv/ftpservercontext',
long_description=open('README.rst').read(),
packages=setuptools.find_packages(),
zip_safe = False,
# https://www.tbz-pariv.lan/index.html/doku.php?id=python_packages#requirementstxt_vs_install_requires
# All reusable libraries use install_requires.
# Projects (containing only config) can use requirements.txt
install_requires=[
'pyftpdlib',
],
include_package_data=True,
entry_points={
'console_scripts': [
'serve_directory_via_ftp=ftpservercontext.console_scripts:serve_directory_via_ftp',
],
}
)
| 29.2 | 106 | 0.696347 |
cdecd7c4bafe572b5e961bd73c1a75878f9feaa8 | 3,428 | py | Python | zoomeye/cli.py | r0oike/zoomeye-python | b93f1c9c350e4fce7580f9f71ab1e76d06ce165d | [
"Apache-2.0"
] | null | null | null | zoomeye/cli.py | r0oike/zoomeye-python | b93f1c9c350e4fce7580f9f71ab1e76d06ce165d | [
"Apache-2.0"
] | null | null | null | zoomeye/cli.py | r0oike/zoomeye-python | b93f1c9c350e4fce7580f9f71ab1e76d06ce165d | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
* Filename: cli.py
* Description: cli program entry
* Time: 2020.11.30
* Author: liuf5
*/
"""
import os
import sys
import argparse
module_path = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.insert(1, module_path)
from zoomeye import core
def main():
"""
parse user input args
:return:
"""
parser = ZoomEyeParser()
subparsers = parser.add_subparsers()
# zoomeye account info
parser_info = subparsers.add_parser("info", help="Show ZoomEye account info")
parser_info.set_defaults(func=core.info)
# query zoomeye data
parser_search = subparsers.add_parser(
"search",
help="Search the ZoomEye database"
)
parser_search.add_argument(
"dork",
help="The ZoomEye search keyword or ZoomEye exported file"
)
parser_search.add_argument(
"-num",
default=20,
help="The number of search results that should be returned",
type=int,
metavar='value'
)
parser_search.add_argument(
"-facet",
default=None,
nargs='?',
const='app,device,service,os,port,country,city',
type=str,
help=('''
Perform statistics on ZoomEye database,
field: [app,device,service,os,port,country,city]
'''),
metavar='field'
)
parser_search.add_argument(
"-filter",
default=None,
metavar='field=regexp',
nargs='?',
const='app',
type=str,
help=('''
Output more clearer search results by set filter field,
field: [app,version,device,port,city,country,asn,banner,*]
''')
)
parser_search.add_argument(
'-stat',
default=None,
metavar='field',
nargs='?',
const='app,device,service,os,port,country,city',
type=str,
help=('''
Perform statistics on search results,
field: [app,device,service,os,port,country,city]
''')
)
parser_search.add_argument(
"-save",
default=None,
metavar='field=regexp',
help=('''
Save the search results with ZoomEye json format,
if you specify the field, it will be saved with JSON Lines
'''),
nargs='?',
type=str,
const='all'
)
parser_search.add_argument(
"-count",
help="The total number of results in ZoomEye database for a search",
action="store_true"
)
parser_search.set_defaults(func=core.search)
# initial account configuration related commands
parser_init = subparsers.add_parser("init", help="Initialize the token for ZoomEye-python")
parser_init.add_argument("-apikey", help="ZoomEye API Key", default=None, metavar='[api key]')
parser_init.add_argument("-username", help="ZoomEye account username", default=None, metavar='[username]')
parser_init.add_argument("-password", help="ZoomEye account password", default=None, metavar='[password]')
parser_init.set_defaults(func=core.init)
args = parser.parse_args()
try:
args.func(args)
except AttributeError:
parser.print_help()
if __name__ == '__main__':
main()
| 26.369231 | 110 | 0.606768 |
cdece39680fd28858374924d70a1d3ff5d97fb90 | 462 | py | Python | statapy/regression/tests.py | DhananjayAshok/PyStata | b592414d78b87d565d8c59ae9487478a792b8c84 | [
"Apache-2.0"
] | null | null | null | statapy/regression/tests.py | DhananjayAshok/PyStata | b592414d78b87d565d8c59ae9487478a792b8c84 | [
"Apache-2.0"
] | null | null | null | statapy/regression/tests.py | DhananjayAshok/PyStata | b592414d78b87d565d8c59ae9487478a792b8c84 | [
"Apache-2.0"
] | null | null | null | import scipy.stats as stats
def mannwhitneyu(sample_0, sample_1, one_sided=False):
"""
Performs the Mann-Whitney U test
:param sample_0: array of values
:param sample_1: array of values
:param one_sided: True iff you want to use less than alternative hypothesis
:return: statistic, pvalue
"""
res = stats.mannwhitneyu(sample_0, sample_1, alternative="two-sided" if not one_sided else "less")
return res.statistic, res.pvalue | 35.538462 | 102 | 0.722944 |
cdeceab8b898ec021afc4aa90ddeda2bd76d683c | 862 | py | Python | 3) Cartoonizing and Video Capture/#1 Accessing the webcam/webcam_access.py | RezaFirouzii/python-opencv-review | 454a2be7fa36516a2b1fbd4e6162068bba25c989 | [
"MIT"
] | null | null | null | 3) Cartoonizing and Video Capture/#1 Accessing the webcam/webcam_access.py | RezaFirouzii/python-opencv-review | 454a2be7fa36516a2b1fbd4e6162068bba25c989 | [
"MIT"
] | null | null | null | 3) Cartoonizing and Video Capture/#1 Accessing the webcam/webcam_access.py | RezaFirouzii/python-opencv-review | 454a2be7fa36516a2b1fbd4e6162068bba25c989 | [
"MIT"
] | null | null | null | import cv2 as cv
if __name__ == "__main__":
# 0 => first (default) webcam connected,
# 1 => second webcam and so on.
cap = cv.VideoCapture(0, cv.CAP_DSHOW)
# cv.namedWindow("Window")
if not cap.isOpened():
raise IOError("Webcam could not be opened!")
while True:
res, frame = cap.read() # returns (bool, ndarray)
# in case any error occurs
if not res:
break
frame = cv.resize(frame, None, fx=.5, fy=.5)
cv.imshow("Video Stream", frame)
keyboardInput = cv.waitKey(1)
if keyboardInput == 27: # ESC button ascii code
break
cap.release()
cv.destroyAllWindows()
# you can also replace a normal video with webcam
# in video capture object, just give it the address of
# the video instead of 0 or number of your webcam
| 25.352941 | 61 | 0.597448 |
cdf16ad97ffce90e11c1fa4d69eb40752cd40a16 | 3,928 | py | Python | apps/sso/access_requests/models.py | g10f/sso | ba6eb712add388c69d4880f5620a2e4ce42d3fee | [
"BSD-3-Clause"
] | 3 | 2021-05-16T17:06:57.000Z | 2021-05-28T17:14:05.000Z | apps/sso/access_requests/models.py | g10f/sso | ba6eb712add388c69d4880f5620a2e4ce42d3fee | [
"BSD-3-Clause"
] | null | null | null | apps/sso/access_requests/models.py | g10f/sso | ba6eb712add388c69d4880f5620a2e4ce42d3fee | [
"BSD-3-Clause"
] | null | null | null | import datetime
from current_user.models import CurrentUserField
from django.conf import settings
from django.db import models
from django.urls import reverse
from django.utils.timezone import now
from django.utils.translation import gettext_lazy as _
from sso.accounts.models import Application
from sso.models import AbstractBaseModel, AbstractBaseModelManager
from sso.organisations.models import is_validation_period_active, Organisation
| 39.28 | 111 | 0.649949 |
cdf35f3aa036ddd5079307083d76c1f9e474653b | 1,518 | py | Python | test/snr_test.py | AP-Atul/wavelets | cff71e777759844b35f8e96f14930b2c71a215a1 | [
"MIT"
] | 5 | 2021-02-01T07:43:39.000Z | 2022-03-25T05:01:31.000Z | test/snr_test.py | AP-Atul/wavelets | cff71e777759844b35f8e96f14930b2c71a215a1 | [
"MIT"
] | null | null | null | test/snr_test.py | AP-Atul/wavelets | cff71e777759844b35f8e96f14930b2c71a215a1 | [
"MIT"
] | null | null | null | import os
from time import time
import numpy as np
import soundfile
from matplotlib import pyplot as plt
from wavelet.fast_transform import FastWaveletTransform
from wavelet.util.utility import threshold, mad, snr, amp_to_db
INPUT_FILE = "/example/input/file.wav"
OUTPUT_DIR = "/example/output/"
info = soundfile.info(INPUT_FILE) # getting info of the audio
rate = info.samplerate
WAVELET_NAME = "coif1"
t = FastWaveletTransform(WAVELET_NAME)
outputFileName = os.path.join(OUTPUT_DIR, "_" + WAVELET_NAME + ".wav")
noiseRatios = list()
with soundfile.SoundFile(outputFileName, "w", samplerate=rate, channels=info.channels) as of:
start = time()
for block in soundfile.blocks(INPUT_FILE, int(rate * info.duration * 0.10)): # reading 10 % of duration
coefficients = t.waveDec(block)
# VISU Shrink
sigma = mad(coefficients)
thresh = sigma * np.sqrt(2 * np.log(len(block)))
# thresholding using the noise threshold generated
coefficients = threshold(coefficients, thresh)
# getting the clean signal as in original form and writing to the file
clean = t.waveRec(coefficients)
clean = np.asarray(clean)
of.write(clean)
noiseRatios.append(snr(amp_to_db(clean)))
end = time()
x = []
for i in range(len(noiseRatios)):
x.append(i)
plt.plot(x, np.array(noiseRatios).astype(float))
plt.show()
print(f"Finished processing with {WAVELET_NAME}")
print(f"Time taken :: {end - start} s")
| 29.192308 | 108 | 0.689065 |
cdf4a1acc53ac8000703136e7a930c389adce55b | 1,546 | py | Python | KeyHookThread.py | v2okimochi/Keasy | 0c4d480a4b9fc88f47bbc11ed4ca248cbdc488f2 | [
"MIT"
] | 1 | 2018-10-25T01:31:15.000Z | 2018-10-25T01:31:15.000Z | KeyHookThread.py | v2okimochi/Keasy | 0c4d480a4b9fc88f47bbc11ed4ca248cbdc488f2 | [
"MIT"
] | 1 | 2018-07-16T01:39:39.000Z | 2018-07-16T01:39:39.000Z | KeyHookThread.py | v2okimochi/Keasy | 0c4d480a4b9fc88f47bbc11ed4ca248cbdc488f2 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from pynput import keyboard
from PyQt5.QtCore import QThread, pyqtSignal
| 29.730769 | 72 | 0.599612 |
cdf4c8116e9e2bbcc1b35461d7c08335801aaba4 | 2,243 | py | Python | nad_receiver/nad_commands.py | mindigmarton/nad_receiver | 9449699d076011caf560d8c4384a9b1bf2512080 | [
"MIT"
] | null | null | null | nad_receiver/nad_commands.py | mindigmarton/nad_receiver | 9449699d076011caf560d8c4384a9b1bf2512080 | [
"MIT"
] | null | null | null | nad_receiver/nad_commands.py | mindigmarton/nad_receiver | 9449699d076011caf560d8c4384a9b1bf2512080 | [
"MIT"
] | null | null | null | """
Commands and operators used by NAD.
CMDS[domain][function]
"""
CMDS = {
'main':
{
'dimmer':
{'cmd': 'Main.Dimmer',
'supported_operators': ['+', '-', '=', '?']
},
'mute':
{'cmd': 'Main.Mute',
'supported_operators': ['+', '-', '=', '?']
},
'power':
{'cmd': 'Main.Power',
'supported_operators': ['+', '-', '=', '?']
},
'volume':
{'cmd': 'Main.Volume',
'supported_operators': ['+', '-', '=', '?']
},
'ir':
{'cmd': 'Main.IR',
'supported_operators': ['=']
},
'listeningmode':
{'cmd': 'Main.ListeningMode',
'supported_operators': ['+', '-']
},
'sleep':
{'cmd': 'Main.Sleep',
'supported_operators': ['+', '-']
},
'source':
{'cmd': 'Main.Source',
'supported_operators': ['+', '-', '=', '?']
},
'version':
{'cmd': 'Main.Version',
'supported_operators': ['?']
}
},
'tuner':
{
'am_frequency':
{'cmd': 'Tuner.AM.Frequency',
'supported_operators': ['+', '-']
},
'am_preset':
{'cmd': 'Tuner.AM.Preset',
'supported_operators': ['+', '-', '=', '?']
},
'band':
{'cmd': 'Tuner.Band',
'supported_operators': ['+', '-', '=', '?']
},
'fm_frequency':
{'cmd': 'Tuner.FM.Frequency',
'supported_operators': ['+', '-']
},
'fm_mute':
{'cmd': 'Tuner.FM.Mute',
'supported_operators': ['+', '-', '=', '?']
},
'fm_preset':
{'cmd': 'Tuner.FM.Preset',
'supported_operators': ['+', '-', '=', '?']
}
}
}
| 30.310811 | 60 | 0.296478 |
cdf8455878051f84938cb9a928fc16329abe82f4 | 7,846 | py | Python | ui/Ui_addsite.py | EUFAR/asmm-eufar | 69ede7a24f757392e63f04091e86c50ab129016f | [
"BSD-3-Clause"
] | null | null | null | ui/Ui_addsite.py | EUFAR/asmm-eufar | 69ede7a24f757392e63f04091e86c50ab129016f | [
"BSD-3-Clause"
] | 2 | 2015-06-12T09:28:29.000Z | 2015-06-12T09:34:16.000Z | ui/Ui_addsite.py | eufarn7sp/asmm-eufar | 69ede7a24f757392e63f04091e86c50ab129016f | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'addsite.ui'
#
# Created by: PyQt5 UI code generator 5.6
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
| 46.702381 | 115 | 0.672827 |
cdf8ea07b2a210313250ff1ccb9f4bf6d44a0053 | 1,772 | py | Python | testmile-setu/setu/dispatcher/broker.py | test-mile/setu | b273a11e7f9462e64a370bda16f1952ecdbfb5a5 | [
"Apache-2.0"
] | null | null | null | testmile-setu/setu/dispatcher/broker.py | test-mile/setu | b273a11e7f9462e64a370bda16f1952ecdbfb5a5 | [
"Apache-2.0"
] | null | null | null | testmile-setu/setu/dispatcher/broker.py | test-mile/setu | b273a11e7f9462e64a370bda16f1952ecdbfb5a5 | [
"Apache-2.0"
] | null | null | null | from enum import Enum, auto
| 23.012987 | 48 | 0.682844 |
cdf93d47f329e66522fe3776469675377c2e7349 | 1,758 | py | Python | leetcode/0566_reshape_the_matrix.py | chaosWsF/Python-Practice | ff617675b6bcd125933024bb4c246b63a272314d | [
"BSD-2-Clause"
] | null | null | null | leetcode/0566_reshape_the_matrix.py | chaosWsF/Python-Practice | ff617675b6bcd125933024bb4c246b63a272314d | [
"BSD-2-Clause"
] | null | null | null | leetcode/0566_reshape_the_matrix.py | chaosWsF/Python-Practice | ff617675b6bcd125933024bb4c246b63a272314d | [
"BSD-2-Clause"
] | null | null | null | """
In MATLAB, there is a very useful function called 'reshape', which can reshape a matrix into
a new one with different size but keep its original data. You're given a matrix represented
by a two-dimensional array, and two positive integers r and c representing the row number and
column number of the wanted reshaped matrix, respectively. The reshaped matrix need to be filled
with all the elements of the original matrix in the same row-traversing order as they were. If
the 'reshape' operation with given parameters is possible and legal, output the new reshaped
matrix; Otherwise, output the original matrix.
Example 1:
Input:
nums = [[1, 2], [3, 4]]
r = 1, c = 4
Output:
[[1, 2, 3, 4]]
Explanation:
The row-traversing of nums is [1, 2, 3, 4]. The new reshaped matrix is a 1 * 4 matrix, fill
it row by row by using the previous list.
Example 2:
Input:
nums = [[1, 2], [3, 4]]
r = 2, c = 4
Output:
[[1, 2], [3, 4]]
Explanation:
There is no way to reshape a 2 * 2 matrix to a 2 * 4 matrix. So output the original matrix.
Note:
1. The height and width of the given matrix is in range [1, 100].
2. The given r and c are all positive.
"""
| 33.807692 | 100 | 0.606371 |
cdfba4673ccb2b05e2ef7ddcaa8aeaa3095e7451 | 4,629 | py | Python | python/main.py | LaraProject/rnn2java | f35b1b98f74864d4310e7866ad5271ae5389292d | [
"MIT"
] | null | null | null | python/main.py | LaraProject/rnn2java | f35b1b98f74864d4310e7866ad5271ae5389292d | [
"MIT"
] | null | null | null | python/main.py | LaraProject/rnn2java | f35b1b98f74864d4310e7866ad5271ae5389292d | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import socket
import select
from time import sleep
import message_pb2
from google.protobuf.internal import encoder
import tensorflow as tf
from tensorflow.keras import preprocessing
import pickle
import numpy as np
## RNN part
# Load the inference model
# Load the tokenizer
# Talking with our Chatbot
### END RNN PART ###
PORT = 9987
if __name__ == '__main__':
main()
| 32.598592 | 140 | 0.712249 |
cdfbbb1e16902c1d3761509ecf7d21633da2152a | 161,322 | py | Python | dlkit/json_/authorization/sessions.py | UOC/dlkit | a9d265db67e81b9e0f405457464e762e2c03f769 | [
"MIT"
] | 2 | 2018-02-23T12:16:11.000Z | 2020-10-08T17:54:24.000Z | dlkit/json_/authorization/sessions.py | UOC/dlkit | a9d265db67e81b9e0f405457464e762e2c03f769 | [
"MIT"
] | 87 | 2017-04-21T18:57:15.000Z | 2021-12-13T19:43:57.000Z | dlkit/json_/authorization/sessions.py | UOC/dlkit | a9d265db67e81b9e0f405457464e762e2c03f769 | [
"MIT"
] | 1 | 2018-03-01T16:44:25.000Z | 2018-03-01T16:44:25.000Z | """JSON implementations of authorization sessions."""
# pylint: disable=no-init
# Numerous classes don't require __init__.
# pylint: disable=too-many-public-methods,too-few-public-methods
# Number of methods are defined in specification
# pylint: disable=protected-access
# Access to protected methods allowed in package json package scope
# pylint: disable=too-many-ancestors
# Inheritance defined in specification
from bson.objectid import ObjectId
from . import objects
from . import queries
from .. import utilities
from ..id.objects import IdList
from ..osid import sessions as osid_sessions
from ..osid.sessions import OsidSession
from ..primitives import DateTime
from ..primitives import Id
from ..primitives import Type
from ..utilities import JSONClientValidated
from ..utilities import PHANTOM_ROOT_IDENTIFIER
from ..utilities import overlap
from dlkit.abstract_osid.authorization import sessions as abc_authorization_sessions
from dlkit.abstract_osid.authorization.objects import AuthorizationForm as ABCAuthorizationForm
from dlkit.abstract_osid.authorization.objects import VaultForm as ABCVaultForm
from dlkit.abstract_osid.id.primitives import Id as ABCId
from dlkit.abstract_osid.osid import errors
from dlkit.abstract_osid.type.primitives import Type as ABCType
DESCENDING = -1
ASCENDING = 1
CREATED = True
UPDATED = True
ENCLOSURE_RECORD_TYPE = Type(
identifier='enclosure',
namespace='osid-object',
authority='ODL.MIT.EDU')
COMPARATIVE = 0
PLENARY = 1
authorizations = property(fget=get_authorizations)
class AuthorizationQuerySession(abc_authorization_sessions.AuthorizationQuerySession, osid_sessions.OsidSession):
"""This session provides methods for searching ``Authorization`` objects.
The search query is constructed using the ``AuthorizationQuery``.
This session defines views that offer differing behaviors for
searching.
* federated view: searches include authorizations in ``Vaults`` of
which this vault is a ancestor in the vault hierarchy
* isolated view: searches are restricted to authorizations in this
``Vault``
* implicit authorization view: authorizations include implicit
authorizations
* explicit authorization view: only explicit authorizations are
returned
"""
def get_vault_id(self):
"""Gets the ``Vault`` ``Id`` associated with this session.
return: (osid.id.Id) - the ``Vault Id`` associated with this
session
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.ResourceLookupSession.get_bin_id
return self._catalog_id
vault_id = property(fget=get_vault_id)
def get_vault(self):
"""Gets the ``Vault`` associated with this session.
return: (osid.authorization.Vault) - the ``Vault`` associated
with this session
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.ResourceLookupSession.get_bin
return self._catalog
vault = property(fget=get_vault)
def can_search_authorizations(self):
"""Tests if this user can perform authorization searches.
A return of true does not guarantee successful authorization. A
return of false indicates that it is known all methods in this
session will result in a ``PermissionDenied``. This is intended
as a hint to an application that may opt not to offer search
operations to unauthorized users.
return: (boolean) - ``false`` if search methods are not
authorized, ``true`` otherwise
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.ResourceQuerySession.can_search_resources
# NOTE: It is expected that real authentication hints will be
# handled in a service adapter above the pay grade of this impl.
return True
def use_federated_vault_view(self):
"""Federates the view for methods in this session.
A federated view will include authorizations in vaults which are
children of this vault in the vault hierarchy.
*compliance: mandatory -- This method is must be implemented.*
"""
# Implemented from template for
# osid.resource.ResourceLookupSession.use_federated_bin_view
self._use_federated_catalog_view()
def use_isolated_vault_view(self):
"""Isolates the view for methods in this session.
An isolated view restricts searches to this vault only.
*compliance: mandatory -- This method is must be implemented.*
"""
# Implemented from template for
# osid.resource.ResourceLookupSession.use_isolated_bin_view
self._use_isolated_catalog_view()
def use_implicit_authorization_view(self):
"""Sets the view for methods in this session to implicit authorizations.
An implicit view will include authorizations derived from other
authorizations as a result of the ``Qualifier,`` ``Function`` or
``Resource`` hierarchies. This method is the opposite of
``explicit_aut``
*compliance: mandatory -- This method is must be implemented.*
"""
raise errors.Unimplemented()
def use_explicit_authorization_view(self):
"""Sets the view for methods in this session to explicit authorizations.
An explicit view includes only those authorizations that were
explicitly defined and not implied. This method is the opposite
of ``implicitAuthorizationView()``.
*compliance: mandatory -- This method is must be implemented.*
"""
raise errors.Unimplemented()
def get_authorization_query(self):
"""Gets an authorization query.
return: (osid.authorization.AuthorizationQuery) - the
authorization query
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.ResourceQuerySession.get_resource_query_template
return queries.AuthorizationQuery(runtime=self._runtime)
authorization_query = property(fget=get_authorization_query)
def can_delete_authorizations(self):
"""Tests if this user can delete ``Authorizations``.
A return of true does not guarantee successful authorization. A
return of false indicates that it is known deleting an
``Authorization`` will result in a ``PermissionDenied``. This is
intended as a hint to an application that may opt not to offer
delete operations to an unauthorized user.
return: (boolean) - ``false`` if ``Authorization`` deletion is
not authorized, ``true`` otherwise
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.ResourceAdminSession.can_delete_resources
# NOTE: It is expected that real authentication hints will be
# handled in a service adapter above the pay grade of this impl.
return True
def can_manage_authorization_aliases(self):
"""Tests if this user can manage ``Id`` aliases for ``Authorizations``.
A return of true does not guarantee successful authorization. A
return of false indicates that it is known changing an alias
will result in a ``PermissionDenied``. This is intended as a
hint to an application that may opt not to offer alias
operations to an unauthorized user.
return: (boolean) - ``false`` if ``Authorization`` aliasing is
not authorized, ``true`` otherwise
*compliance: mandatory -- This method must be implemented.*
"""
# NOTE: It is expected that real authentication hints will be
# handled in a service adapter above the pay grade of this impl.
return True
vaults = property(fget=get_vaults)
class VaultQuerySession(abc_authorization_sessions.VaultQuerySession, osid_sessions.OsidSession):
"""This session provides methods for searching among ``Vault`` objects.
The search query is constructed using the ``VaultQuery``.
Vaults may have a query record indicated by their respective record
types. The query record is accessed via the ``VaultQuery``.
"""
_session_namespace = 'authorization.VaultQuerySession'
def can_search_vaults(self):
"""Tests if this user can perform ``Vault`` searches.
A return of true does not guarantee successful authorization. A
return of false indicates that it is known all methods in this
session will result in a ``PermissionDenied``. This is intended
as a hint to an application that may opt not to offer search
operations to unauthorized users.
return: (boolean) - ``false`` if search methods are not
authorized, ``true`` otherwise
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.BinQuerySession.can_search_bins_template
# NOTE: It is expected that real authentication hints will be
# handled in a service adapter above the pay grade of this impl.
return True
def get_vault_query(self):
"""Gets a vault query.
return: (osid.authorization.VaultQuery) - a vault query
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.BinQuerySession.get_bin_query_template
return queries.VaultQuery(runtime=self._runtime)
vault_query = property(fget=get_vault_query)
def can_delete_vaults(self):
"""Tests if this user can delete vaults.
A return of true does not guarantee successful authorization. A
return of false indicates that it is known deleting a ``Vault``
will result in a ``PermissionDenied``. This is intended as a
hint to an application that may not wish to offer delete
operations to unauthorized users.
return: (boolean) - ``false`` if ``Vault`` deletion is not
authorized, ``true`` otherwise
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.BinAdminSession.can_delete_bins
# NOTE: It is expected that real authentication hints will be
# handled in a service adapter above the pay grade of this impl.
if self._catalog_session is not None:
return self._catalog_session.can_delete_catalogs()
return True
def can_manage_vault_aliases(self):
"""Tests if this user can manage ``Id`` aliases for ``Vaults``.
A return of true does not guarantee successful authorization. A
return of false indicates that it is known changing an alias
will result in a ``PermissionDenied``. This is intended as a
hint to an application that may opt not to offer alias
operations to an unauthorized user.
return: (boolean) - ``false`` if ``Vault`` aliasing is not
authorized, ``true`` otherwise
*compliance: mandatory -- This method must be implemented.*
"""
# NOTE: It is expected that real authentication hints will be
# handled in a service adapter above the pay grade of this impl.
return True
| 45.960684 | 186 | 0.654709 |
cdfc9bf4504d5cc25bda0a98802b00001e0b1b9c | 1,031 | py | Python | LeetCode/516-longest-palindromic-subsequence.py | leaving-voider/LeetCode.cn-Record | 2922cbdab85556bc0625adc9e6ce44849232e4f4 | [
"MIT"
] | null | null | null | LeetCode/516-longest-palindromic-subsequence.py | leaving-voider/LeetCode.cn-Record | 2922cbdab85556bc0625adc9e6ce44849232e4f4 | [
"MIT"
] | null | null | null | LeetCode/516-longest-palindromic-subsequence.py | leaving-voider/LeetCode.cn-Record | 2922cbdab85556bc0625adc9e6ce44849232e4f4 | [
"MIT"
] | null | null | null | ###############################################################################################
# 5.
# dp
###########
# O(n^2)
# O(n^2)
###############################################################################################
| 44.826087 | 131 | 0.354995 |
cdfcd2a90ed7ec6257eb01c41e93f4909519bbec | 3,427 | py | Python | examples/vae.py | zhangyewu/edward | 8ec452eb0a3801df8bda984796034a9e945faec7 | [
"Apache-2.0"
] | 5,200 | 2016-05-03T04:59:01.000Z | 2022-03-31T03:32:26.000Z | examples/vae.py | zhangyewu/edward | 8ec452eb0a3801df8bda984796034a9e945faec7 | [
"Apache-2.0"
] | 724 | 2016-05-04T09:04:37.000Z | 2022-02-28T02:41:12.000Z | examples/vae.py | zhangyewu/edward | 8ec452eb0a3801df8bda984796034a9e945faec7 | [
"Apache-2.0"
] | 1,004 | 2016-05-03T22:45:14.000Z | 2022-03-25T00:08:08.000Z | """Variational auto-encoder for MNIST data.
References
----------
http://edwardlib.org/tutorials/decoder
http://edwardlib.org/tutorials/inference-networks
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import edward as ed
import numpy as np
import os
import tensorflow as tf
from edward.models import Bernoulli, Normal
from edward.util import Progbar
from observations import mnist
from scipy.misc import imsave
tf.flags.DEFINE_string("data_dir", default="/tmp/data", help="")
tf.flags.DEFINE_string("out_dir", default="/tmp/out", help="")
tf.flags.DEFINE_integer("M", default=100, help="Batch size during training.")
tf.flags.DEFINE_integer("d", default=2, help="Latent dimension.")
tf.flags.DEFINE_integer("n_epoch", default=100, help="")
FLAGS = tf.flags.FLAGS
if not os.path.exists(FLAGS.out_dir):
os.makedirs(FLAGS.out_dir)
if __name__ == "__main__":
tf.app.run()
| 31.731481 | 77 | 0.676685 |
cdff5880102eb2ba8d22b6cbec2e9bb5407da963 | 2,196 | py | Python | backup.py | BigBlueHat/copy-couch | ab4759540faecae8239c94e8045f7fce1f4a4914 | [
"Apache-2.0"
] | null | null | null | backup.py | BigBlueHat/copy-couch | ab4759540faecae8239c94e8045f7fce1f4a4914 | [
"Apache-2.0"
] | null | null | null | backup.py | BigBlueHat/copy-couch | ab4759540faecae8239c94e8045f7fce1f4a4914 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
"""copy-couch makes copies of couches. no joke.
License: Apache 2.0 - http://opensource.org/licenses/Apache-2.0
"""
import argparse
import base64
import ConfigParser
import datetime
import json
import requests
argparser = argparse.ArgumentParser()
argparser.add_argument('config_file', type=file,
help="Config INI file. See `config.sample.ini` for info.")
args = argparser.parse_args()
config = ConfigParser.RawConfigParser({
'protocol': 143,
'host': 'localhost:5984'
})
config.readfp(args.config_file)
local_couch = config._sections['local']
local_couch['password'] = base64.b64decode(local_couch['password'])
local_url = local_couch['protocol'] + '://' + local_couch['host'] + '/'
remote_couch = config._sections['remote']
remote_couch['password'] = base64.b64decode(remote_couch['password'])
remote_url = remote_couch['protocol'] + '://' + remote_couch['host'] + '/'
# setup local db session
local_db = requests.Session()
local_db.auth = (local_couch['user'], local_couch['password'])
# setup remote db session
remote_db = requests.Session()
remote_db.auth = (remote_couch['user'], remote_couch['password'])
rv = local_db.get(local_url).json()
uuid = rv['uuid']
rv = local_db.get(local_url + '_all_dbs').json()
# TODO: make which DB's configurable
dbs = [db for db in rv if db[0] != '_']
# create & store one rep_doc per database
for db in dbs:
# create _replicator docs for each DB on local; target remote
rep_doc = {
"_id": "backup~" + datetime.datetime.now().isoformat(),
"source": local_url,
"target": remote_couch['protocol'] + '://' \
+ remote_couch['user'] + ':' + remote_couch['password'] \
+ '@' + remote_couch['host'] + '/backup%2F' + uuid + '%2F',
"create_target": True
}
rep_doc['source'] += db;
rep_doc['target'] += db;
# TODO: make the backup db name configurable / reusable
print 'Copying ' + db
print ' from: ' + local_url
print ' to: ' + remote_url + 'backup%2F' + uuid + '%2F' + db
rv = local_db.post(local_url + '_replicate', json=rep_doc, headers = {
'Content-Type': 'application/json'})
print rv.json()
| 29.675676 | 74 | 0.659836 |
a8042d0c00c4fb676c0f0e3967070e3d72d5ef12 | 3,568 | py | Python | import_scripts/gpcc2gcmon.py | Jozelito/Raster2TXT | 337c87298ffc6227ca952e9e5cd17a54979e2224 | [
"MIT"
] | null | null | null | import_scripts/gpcc2gcmon.py | Jozelito/Raster2TXT | 337c87298ffc6227ca952e9e5cd17a54979e2224 | [
"MIT"
] | null | null | null | import_scripts/gpcc2gcmon.py | Jozelito/Raster2TXT | 337c87298ffc6227ca952e9e5cd17a54979e2224 | [
"MIT"
] | 1 | 2018-05-29T21:16:44.000Z | 2018-05-29T21:16:44.000Z | #Script para la importacion de datos netCDF de un mes del GPCC en PostGIS.
#Autor: Jos I. lvarez Francoso
import sys
from osgeo import gdal, ogr, osr
from osgeo.gdalconst import GA_ReadOnly, GA_Update
# Funcion para sobreescribir el mensaje de porcentaje completado
if __name__ == '__main__':
# El usuario tiene que definir al menos un parametro: la cadena de conexion Postgis GDAL
if len(sys.argv) < 4 or len(sys.argv) > 4:
print "uso: <GDAL PostGIS connection string> <mes> <agno>"
raise SystemExit
pg_connection_string = sys.argv[1]
mes = sys.argv[2]
agno = sys.argv[3]
gpcc2gcm_win(pg_connection_string, mes, agno)
raise SystemExit
| 36.783505 | 107 | 0.711323 |
a804975ed4327041257e7e887706be1ffc7b7803 | 2,829 | py | Python | app.py | Raisler/Brazil_HDI_DataVisualization | 76dde95dd1a7171e30a4a2e180a9ecdcea6f8c7c | [
"MIT"
] | null | null | null | app.py | Raisler/Brazil_HDI_DataVisualization | 76dde95dd1a7171e30a4a2e180a9ecdcea6f8c7c | [
"MIT"
] | null | null | null | app.py | Raisler/Brazil_HDI_DataVisualization | 76dde95dd1a7171e30a4a2e180a9ecdcea6f8c7c | [
"MIT"
] | null | null | null | import streamlit as st
import pandas as pd
import numpy as np
import plotly.express as px
from plotly.subplots import make_subplots
import plotly.graph_objects as go
import matplotlib.pyplot as plt
df = load_data('hdi.csv')
st.title('Human Development Index in Brazil')
select = st.sidebar.selectbox('Choose', ['Home', 'Analysis by Year', 'Analysis by State'])
if select == 'Home':
st.write('That is a dashboard to see the HDI of all states in Brazil, you can see graphics and values!')
st.write('In soon, more improvements. #Version 1')
st.write('In the sidebar, choose your option for the better view for you!')
st.write('Author: Raisler Voigt | suggestions? raisler.dev@gmail.com')
st.markdown('''<p align="center">
<a href="https://www.instagram.com/raislervoigt/" target="_blank" rel="noopener noreferrer">Instagram</a>
<a href="https://twitter.com/VoigtRaisler" target="_blank" rel="noopener noreferrer">Twitter</a>
<a href="https://www.linkedin.com/in/raisler-voigt7/" target="_blank" rel="noopener noreferrer">Linkedin</a>
<a href="https://github.com/Raisler" target="_blank" rel="noopener noreferrer">GitHub</a>
</p>''', unsafe_allow_html=True)
if select == 'Analysis by Year':
select1 = st.sidebar.selectbox('Anlise por Ano', [2017, 2010, 2000, 1991])
fig1 = px.scatter(df, x="HDI Health {0}".format(select1), y="HDI Education {0}".format(select1), size="HDI {0}".format(select1), color="UF")
fig2 = px.histogram(df, x="UF", y = "HDI {0}".format(select1)).update_xaxes(categoryorder='total descending')
fig3 = px.histogram(df, x="UF", y = "HDI Education {0}".format(select1)).update_xaxes(categoryorder='total descending')
fig4 = px.histogram(df, x="UF", y = "HDI Health {0}".format(select1)).update_xaxes(categoryorder='total descending')
fig5 = px.histogram(df, x="UF", y = "HDI Wealth {0}".format(select1)).update_xaxes(categoryorder='total descending')
fig6 = df[['UF', "HDI Education {0}".format(select1), "HDI Health {0}".format(select1), "HDI Wealth {0}".format(select1)]]
st.write(fig1)
st.write(fig2)
st.subheader('HDI Education')
st.write(fig3)
st.subheader('HDI Health')
st.write(fig4)
st.subheader('HDI Wealth')
st.write(fig5)
st.write(fig6)
if select == 'Analysis by State':
select2 = st.sidebar.selectbox('Choose the State', df['UF'])
cdf = df
cdf.index = cdf['UF']
state = cdf.index == '{}'.format(select2)
state = cdf[state]
trans = state.transpose()
trans = trans.sort_index(ascending = False)
fig1 = px.histogram(x = trans.index, y = trans['{}'.format(select2)]).update_xaxes(categoryorder='total descending')
fig2 = state.transpose()
st.write(fig1)
st.write(fig2)
| 40.414286 | 144 | 0.679392 |
a804e02acc0b6d5ed28538bc5bf647eab91b6259 | 657 | py | Python | Examples/pycomBlink/main.py | sophie-bernier/RemoteOceanAcidificationMonitor | 6a8b799826a2eb9b1d5064883193c61eea0ee310 | [
"Unlicense"
] | 1 | 2021-06-22T23:07:31.000Z | 2021-06-22T23:07:31.000Z | Examples/pycomBlink/main.py | sophie-bernier/RemoteOceanAcidificationMonitor | 6a8b799826a2eb9b1d5064883193c61eea0ee310 | [
"Unlicense"
] | null | null | null | Examples/pycomBlink/main.py | sophie-bernier/RemoteOceanAcidificationMonitor | 6a8b799826a2eb9b1d5064883193c61eea0ee310 | [
"Unlicense"
] | null | null | null | # main.py
import pycom
import time
pycom.heartbeat(False)
red = 0x08
blue = 0x00
green = 0x00
sleepTime = 0.01
while True:
###
#if red >= 0x08:
# if green > 0:
# green -= 1
# else:
# blue += 1
#if blue >= 0x08:
# if red > 0:
# red -= 1
# else:
# green += 1
#if green >= 0x08:
# if blue > 0:
# blue -= 1
# else:
# red += 1
###
setRgb(red, green, blue)
time.sleep(sleepTime)
| 16.425 | 49 | 0.464231 |
a8054920242ac3e7b7e99120e329e53db3f718af | 1,891 | py | Python | dsn/pp/construct.py | expressionsofchange/nerf0 | 788203619fc89c92e8c7301d62bbc4f1f4ee66e1 | [
"MIT"
] | 2 | 2019-04-30T05:42:05.000Z | 2019-08-11T19:17:20.000Z | dsn/pp/construct.py | expressionsofchange/nerf0 | 788203619fc89c92e8c7301d62bbc4f1f4ee66e1 | [
"MIT"
] | null | null | null | dsn/pp/construct.py | expressionsofchange/nerf0 | 788203619fc89c92e8c7301d62bbc4f1f4ee66e1 | [
"MIT"
] | null | null | null | from spacetime import get_s_address_for_t_address
from s_address import node_for_s_address
from dsn.s_expr.structure import TreeText
from dsn.pp.structure import PPNone, PPSingleLine, PPLispy, PPAnnotatedSExpr
from dsn.pp.clef import PPUnset, PPSetSingleLine, PPSetLispy
def construct_pp_tree(tree, pp_annotations):
"""Because pp notes take a t_address, they can be applied on future trees (i.e. the current tree).
The better (more general, more elegant and more performant) solution is to build the pp_tree in sync with the
general tree, and have construct_pp_tree be a function over notes from those clefs rather than on trees.
"""
annotated_tree = build_annotated_tree(tree, PPNone())
for annotation in pp_annotations:
pp_note = annotation.annotation
s_address = get_s_address_for_t_address(tree, pp_note.t_address)
if s_address is None:
continue # the node no longer exists
annotated_node = node_for_s_address(annotated_tree, s_address)
if isinstance(pp_note, PPUnset):
new_value = PPNone()
elif isinstance(pp_note, PPSetSingleLine):
new_value = PPSingleLine()
elif isinstance(pp_note, PPSetLispy):
new_value = PPLispy()
else:
raise Exception("Unknown PP Note")
# let's just do this mutably first... this is the lazy approach (but that fits with the caveats mentioned at the
# top of this method)
annotated_node.annotation = new_value
return annotated_tree
| 35.679245 | 120 | 0.710206 |
a8058f52d55c838079c4a938e2376efb1f6aa6ab | 3,704 | py | Python | geneparse/__init__.py | legaultmarc/geneparse | 5a844df77ded5adc765a086a8d346fce6ba01f3d | [
"MIT"
] | 4 | 2018-11-09T11:10:24.000Z | 2021-07-23T22:17:58.000Z | geneparse/__init__.py | legaultmarc/geneparse | 5a844df77ded5adc765a086a8d346fce6ba01f3d | [
"MIT"
] | 5 | 2017-05-02T15:28:01.000Z | 2018-04-16T18:29:15.000Z | geneparse/__init__.py | legaultmarc/geneparse | 5a844df77ded5adc765a086a8d346fce6ba01f3d | [
"MIT"
] | 1 | 2017-05-12T17:58:32.000Z | 2017-05-12T17:58:32.000Z | """A module to parse genetics file formats."""
# This file is part of geneparse.
#
# The MIT License (MIT)
#
# Copyright (c) 2017 Pharmacogenomics Centre
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import re
from .readers import plink, impute2, dataframe, bgen, dict_based, vcf
from .core import (Genotypes, Variant, ImputedVariant, SplitChromosomeReader,
Chromosome)
from .extract.extractor import Extractor
try:
from .version import geneparse_version as __version__
except ImportError:
__version__ = None
__author__ = "Marc-Andre Legault"
__copyright__ = "Copyright 2014, Beaulieu-Saucier Pharmacogenomics Centre"
__credits__ = ["Louis-Philippe Lemieux Perreault", "Marc-Andre Legault"]
__license__ = "MIT"
__maintainer__ = "Louis-Philippe Lemieux Perreault"
__email__ = "louis-philippe.lemieux.perreault@statgen.org"
__status__ = "Development"
# TODO:
# 1. Warn and show last exception if no reader correctly initialized.
# 2. Could also make it async to load faster.
parsers = {
"plink": plink.PlinkReader,
"bgen": bgen.BGENReader,
"vcf": vcf.VCFReader,
"chrom-split-plink": _SplitChromosomeReaderFactory(plink.PlinkReader),
"impute2": impute2.Impute2Reader,
"chrom-split-impute2": _SplitChromosomeReaderFactory(
impute2.Impute2Reader
),
"chrom-split-bgen": _SplitChromosomeReaderFactory(bgen.BGENReader),
"dataframe": dataframe.DataFrameReader,
"dict-based": dict_based.DictBasedReader,
"pickle": dict_based.PickleBasedReader,
}
| 37.04 | 79 | 0.687365 |
a8065cec94c9ac0bb277d2b7b2c4a7aa013dd5ba | 3,285 | py | Python | pallet.py | sprightlyManifesto/cadQuery2 | 207a1ff2420210460539400dfd1945e8b7245497 | [
"MIT"
] | 1 | 2021-05-31T00:08:02.000Z | 2021-05-31T00:08:02.000Z | pallet.py | sprightlyManifesto/cadQuery2 | 207a1ff2420210460539400dfd1945e8b7245497 | [
"MIT"
] | null | null | null | pallet.py | sprightlyManifesto/cadQuery2 | 207a1ff2420210460539400dfd1945e8b7245497 | [
"MIT"
] | null | null | null | from cadquery import *
from math import sin,cos,acos,asin,pi,atan2
if __name__== "__main__":
p = Pallet()
ks = list(p.torx6.keys())
ks.reverse()
a = cq.Workplane().circle(12).extrude(-3)
for k in ks:
a = a.union(p.torx(a.faces(">Z").workplane(),k).extrude(1))
| 48.308824 | 101 | 0.497717 |
a8071813703c97e154c1a58b74d953608becaf8d | 235 | py | Python | old-regressions/python/tst6.py | muchang/z3test | e3e7739f98b7aa85427fcb8a39a4c675132a896e | [
"MIT"
] | 23 | 2015-04-20T08:51:00.000Z | 2021-11-15T12:20:59.000Z | old-regressions/python/tst6.py | muchang/z3test | e3e7739f98b7aa85427fcb8a39a4c675132a896e | [
"MIT"
] | 18 | 2016-03-02T15:17:42.000Z | 2021-12-16T22:10:05.000Z | old-regressions/python/tst6.py | muchang/z3test | e3e7739f98b7aa85427fcb8a39a4c675132a896e | [
"MIT"
] | 30 | 2015-05-30T15:29:17.000Z | 2022-02-25T15:58:58.000Z |
# Copyright (c) 2015 Microsoft Corporation
from z3 import *
print(simplify(Sqrt(2)).sexpr())
set_option(":pp-decimal-precision", 50, pp_decimal=True)
print(simplify(Sqrt(2)).sexpr())
set_option(precision=20)
print(simplify(Sqrt(2)))
| 23.5 | 56 | 0.744681 |
a808c833e4004773a8618ea9f6a2827bf0e5f1ca | 2,044 | py | Python | data-detective-airflow/tests/dag_generator/test_tdag.py | dmitriy-e/metadata-governance | 018a879951dee3f3c2c05ac8e05b8360dd7f4ab3 | [
"Apache-2.0"
] | 5 | 2021-12-01T09:55:23.000Z | 2021-12-21T16:23:33.000Z | data-detective-airflow/tests/dag_generator/test_tdag.py | dmitriy-e/metadata-governance | 018a879951dee3f3c2c05ac8e05b8360dd7f4ab3 | [
"Apache-2.0"
] | 1 | 2021-11-16T15:55:34.000Z | 2021-11-16T15:55:34.000Z | data-detective-airflow/tests/dag_generator/test_tdag.py | dmitriy-e/metadata-governance | 018a879951dee3f3c2c05ac8e05b8360dd7f4ab3 | [
"Apache-2.0"
] | 2 | 2021-11-03T09:43:09.000Z | 2021-11-17T10:16:29.000Z | import pytest
import allure
from data_detective_airflow.constants import PG_CONN_ID, S3_CONN_ID
from data_detective_airflow.dag_generator.results import PgResult, PickleResult
from data_detective_airflow.dag_generator import ResultType, WorkType
| 41.714286 | 88 | 0.611546 |
a8094575efb5f9d3bcb611dcb83074209e70f07f | 478 | py | Python | Algorithms/Easy/830. Positions of Large Groups/answer.py | KenWoo/Algorithm | 4012a2f0a099a502df1e5df2e39faa75fe6463e8 | [
"Apache-2.0"
] | null | null | null | Algorithms/Easy/830. Positions of Large Groups/answer.py | KenWoo/Algorithm | 4012a2f0a099a502df1e5df2e39faa75fe6463e8 | [
"Apache-2.0"
] | null | null | null | Algorithms/Easy/830. Positions of Large Groups/answer.py | KenWoo/Algorithm | 4012a2f0a099a502df1e5df2e39faa75fe6463e8 | [
"Apache-2.0"
] | null | null | null | from typing import List
if __name__ == "__main__":
s = Solution()
result = s.largeGroupPositions("abc")
print(result)
| 22.761905 | 61 | 0.493724 |
a80a22c9f777e08edf7fe7ed83b93c4fd1e307bc | 1,727 | py | Python | imu.py | aume1/SatelliteTracker | 62725e1d1a72a1350b2af15d9e33fcd574ceb3a2 | [
"MIT"
] | 2 | 2021-06-19T17:17:30.000Z | 2021-06-19T17:17:39.000Z | imu.py | aume1/SatelliteTracker | 62725e1d1a72a1350b2af15d9e33fcd574ceb3a2 | [
"MIT"
] | null | null | null | imu.py | aume1/SatelliteTracker | 62725e1d1a72a1350b2af15d9e33fcd574ceb3a2 | [
"MIT"
] | 1 | 2021-06-19T17:18:32.000Z | 2021-06-19T17:18:32.000Z | import time
import math
import py_qmc5883l
import pigpio
import adafruit_bmp280
from i2c_ADXL345 import ADXL345
import numpy as np
from i2c_ITG3205 import Gyro
if __name__ == "__main__":
pi = pigpio.pi('192.168.178.229')
imu = IMU(pi)
while True:
print(imu.get_roll_pitch_yaw())
| 28.311475 | 104 | 0.579618 |