hexsha
stringlengths
40
40
size
int64
7
1.04M
ext
stringclasses
10 values
lang
stringclasses
1 value
max_stars_repo_path
stringlengths
4
247
max_stars_repo_name
stringlengths
4
125
max_stars_repo_head_hexsha
stringlengths
40
78
max_stars_repo_licenses
sequencelengths
1
10
max_stars_count
int64
1
368k
max_stars_repo_stars_event_min_datetime
stringlengths
24
24
max_stars_repo_stars_event_max_datetime
stringlengths
24
24
max_issues_repo_path
stringlengths
4
247
max_issues_repo_name
stringlengths
4
125
max_issues_repo_head_hexsha
stringlengths
40
78
max_issues_repo_licenses
sequencelengths
1
10
max_issues_count
int64
1
116k
max_issues_repo_issues_event_min_datetime
stringlengths
24
24
max_issues_repo_issues_event_max_datetime
stringlengths
24
24
max_forks_repo_path
stringlengths
4
247
max_forks_repo_name
stringlengths
4
125
max_forks_repo_head_hexsha
stringlengths
40
78
max_forks_repo_licenses
sequencelengths
1
10
max_forks_count
int64
1
105k
max_forks_repo_forks_event_min_datetime
stringlengths
24
24
max_forks_repo_forks_event_max_datetime
stringlengths
24
24
content
stringlengths
1
1.04M
avg_line_length
float64
1.77
618k
max_line_length
int64
1
1.02M
alphanum_fraction
float64
0
1
original_content
stringlengths
7
1.04M
filtered:remove_function_no_docstring
int64
-102
942k
filtered:remove_class_no_docstring
int64
-354
977k
filtered:remove_delete_markers
int64
0
60.1k
626e79d1097e3ee7dcfeaedcec4d5a17f8a45607
3,033
py
Python
networkmonitor/monitor.py
luther38/NetworkMonitor
bd5e9336342e7af71c19ca484f8d386b6b79ad69
[ "MIT" ]
null
null
null
networkmonitor/monitor.py
luther38/NetworkMonitor
bd5e9336342e7af71c19ca484f8d386b6b79ad69
[ "MIT" ]
null
null
null
networkmonitor/monitor.py
luther38/NetworkMonitor
bd5e9336342e7af71c19ca484f8d386b6b79ad69
[ "MIT" ]
null
null
null
import datetime import typing from networkmonitor import OldConfig from networkmonitor.src.configuration import IConfig, ContextConfig from networkmonitor.src.protocols import IProtocols, ContextProtocols from networkmonitor.src import Configuration from networkmonitor.src import Nodes from networkmonitor.src import InvalidProtocol, InvalidNodeConfiguration from networkmonitor.src import RefreshTimer class Monitor(): """ Monitor is the working class that checks all the requested nodes. """
35.267442
104
0.589186
import datetime import typing from networkmonitor import OldConfig from networkmonitor.src.configuration import IConfig, ContextConfig from networkmonitor.src.protocols import IProtocols, ContextProtocols from networkmonitor.src import Configuration from networkmonitor.src import Nodes from networkmonitor.src import InvalidProtocol, InvalidNodeConfiguration from networkmonitor.src import RefreshTimer class Monitor(): """ Monitor is the working class that checks all the requested nodes. """ def __init__(self, iconfig:IConfig): self.__iconfig__:IConfig = iconfig self.__config__:ContextConfig = ContextConfig(self.__iconfig__) self.__config__.GetWorkingConfigClass(True) self.__config__.ReadConfig() self.__configuration__:Configuration = Configuration() self.__configuration__ = self.__config__.configuration self.refresh = RefreshTimer(self.__iconfig__) self.report = [] self.LastRefresh = datetime.datetime.now() self.NextRefresh = datetime.datetime.now() pass def Start(self, force:bool=False) -> None: if force == False: res = self.refresh.CheckRefreshTimer() #res = self.__CheckRefreshTimer() if res == True: self.__Worker() else: self.__Worker() def __ReadConfig__(self) -> None: self.__config__.ReadConfig() self.__configuration__ = Configuration() self.__configuration__ = self.__config__.configuration def __Worker(self)->None: self.__ReadConfig__() report = self.__configuration__.nodes requirement:bool = True for node in report: if requirement == True: np = node.protocol.lower() if np == "icmp": cp = ContextProtocols(IProtocols(node.address, 'ICMP')) cp.GetWorkingClass(True) cp.configuration = self.__configuration__ cp.Start() elif np == "http:get": cp = ContextProtocols(IProtocols(node.address, "HTTP:GET")) cp.GetWorkingClass(True) cp.Start() elif np == "http:post": cp = ContextProtocols(IProtocols(node.address, "Http:Post")) cp.GetWorkingClass(True) cp.Start() else: raise InvalidProtocol(f"{node.protocol} is invalid. Use ICMP, HTTP:Get, HTTP:POST.") node.ms = cp.MS node.status = cp.Status if node.required == True and node.status == "Offline": requirement = False else: node.status = "Offline" # Work has been finished #Copy our local version to the global scope self.LastRefresh = datetime.datetime.now() self.report = report
2,406
0
116
a43b1675c8e19994c0be2bd6717197a732815f09
336
py
Python
welltory/admin.py
vasmedvedev/welltory_test
9dd1ea35850916a2203241798d0acd9415d762b7
[ "MIT" ]
null
null
null
welltory/admin.py
vasmedvedev/welltory_test
9dd1ea35850916a2203241798d0acd9415d762b7
[ "MIT" ]
null
null
null
welltory/admin.py
vasmedvedev/welltory_test
9dd1ea35850916a2203241798d0acd9415d762b7
[ "MIT" ]
null
null
null
from django.contrib import admin from welltory import models admin.site.register(models.Sleep, SleepAdmin) admin.site.register(models.Steps, StepsAdmin) admin.site.register(models.Geo, GeoAdmin)
16.8
45
0.779762
from django.contrib import admin from welltory import models class SleepAdmin(admin.ModelAdmin): pass class StepsAdmin(admin.ModelAdmin): pass class GeoAdmin(admin.ModelAdmin): pass admin.site.register(models.Sleep, SleepAdmin) admin.site.register(models.Steps, StepsAdmin) admin.site.register(models.Geo, GeoAdmin)
0
67
69
b95fcac1f93d1bd02d1dcd14dd2453199e1bd50c
394
py
Python
pymmcore_plus/client/callbacks/basic.py
ianhi/pymmcore-remote
54f93985ab73898b0cfc484e5ca43bf16d35c522
[ "BSD-3-Clause" ]
8
2021-07-15T10:35:08.000Z
2022-03-02T23:36:34.000Z
pymmcore_plus/client/callbacks/basic.py
fdrgsp/pymmcore-remote
929f62cca703a8a6437240d0c46446c0a524ed9a
[ "BSD-3-Clause" ]
99
2021-07-03T02:16:14.000Z
2022-03-29T16:11:59.000Z
pymmcore_plus/client/callbacks/basic.py
fdrgsp/pymmcore-remote
929f62cca703a8a6437240d0c46446c0a524ed9a
[ "BSD-3-Clause" ]
2
2021-05-19T19:59:27.000Z
2021-07-01T19:08:35.000Z
from Pyro5 import api from ...core._signals import _CMMCoreSignaler
28.142857
78
0.690355
from Pyro5 import api from ...core._signals import _CMMCoreSignaler class SynchronousCallback(_CMMCoreSignaler): def __init__(self) -> None: super().__init__() @api.expose def receive_core_callback(self, signal_name: str, args: tuple) -> None: """Will be called by server with name of signal, and tuple of args.""" getattr(self, signal_name).emit(*args)
33
268
23
14fe2fcea35b1656d97913ad5e54df7ffd928511
125
py
Python
app/recipies/admin.py
sourabhsinha396/Rest-api-recipie
a9937d5119c706d1193654ece280ed46b599a344
[ "MIT" ]
null
null
null
app/recipies/admin.py
sourabhsinha396/Rest-api-recipie
a9937d5119c706d1193654ece280ed46b599a344
[ "MIT" ]
9
2021-03-30T14:10:47.000Z
2021-09-22T19:29:50.000Z
app/recipies/admin.py
sourabhsinha396/Rest-api-recipie
a9937d5119c706d1193654ece280ed46b599a344
[ "MIT" ]
null
null
null
from django.contrib import admin from .models import Tag,Ingredient admin.site.register(Tag) admin.site.register(Ingredient)
25
34
0.832
from django.contrib import admin from .models import Tag,Ingredient admin.site.register(Tag) admin.site.register(Ingredient)
0
0
0
1471e53db5b729dc1c33d31786115ca692ce7cf8
671
py
Python
RunProject.py
badeaadi/Colorization
b18d4267aa611b7e87ddfe8a32fef834259e8a45
[ "MIT" ]
null
null
null
RunProject.py
badeaadi/Colorization
b18d4267aa611b7e87ddfe8a32fef834259e8a45
[ "MIT" ]
null
null
null
RunProject.py
badeaadi/Colorization
b18d4267aa611b7e87ddfe8a32fef834259e8a45
[ "MIT" ]
null
null
null
""" PROIECT Colorarea imaginilor folosind autoencoder si invatarea automata Badea Adrian Catalin, grupa 334, anul III, FMI """ import pdb from DataSet import * from AeModel import * data_set: DataSet = DataSet() data_set.scene_name = 'forest' ae_model: AeModel = AeModel(data_set) ae_model.define_the_model() ae_model.compile_the_model() ae_model.train_the_model() ae_model.evaluate_the_model() data_set: DataSet = DataSet() data_set.scene_name = 'coast' ae_model: AeModel = AeModel(data_set) ae_model.define_the_model() ae_model.compile_the_model() ae_model.train_the_model() ae_model.evaluate_the_model()
19.735294
68
0.730253
""" PROIECT Colorarea imaginilor folosind autoencoder si invatarea automata Badea Adrian Catalin, grupa 334, anul III, FMI """ import pdb from DataSet import * from AeModel import * data_set: DataSet = DataSet() data_set.scene_name = 'forest' ae_model: AeModel = AeModel(data_set) ae_model.define_the_model() ae_model.compile_the_model() ae_model.train_the_model() ae_model.evaluate_the_model() data_set: DataSet = DataSet() data_set.scene_name = 'coast' ae_model: AeModel = AeModel(data_set) ae_model.define_the_model() ae_model.compile_the_model() ae_model.train_the_model() ae_model.evaluate_the_model()
0
0
0
379ff9e7c9ab8025f20aee92eea805b694ff479d
1,492
py
Python
lib/nodes/net.py
velexi-corporation/rsi-2020-bemish
3454ecdff66802494d5c3c1678767fddc42d72b6
[ "Apache-2.0" ]
1
2021-07-11T00:50:22.000Z
2021-07-11T00:50:22.000Z
lib/nodes/net.py
velexi-corporation/rsi-2020-bemish
3454ecdff66802494d5c3c1678767fddc42d72b6
[ "Apache-2.0" ]
null
null
null
lib/nodes/net.py
velexi-corporation/rsi-2020-bemish
3454ecdff66802494d5c3c1678767fddc42d72b6
[ "Apache-2.0" ]
null
null
null
from nodes.core import * from nodes.connect import * from nodes.util import * import numpy as np import math
27.62963
61
0.55563
from nodes.core import * from nodes.connect import * from nodes.util import * import numpy as np import math class FoldiakShapedNet(net): def connect_foldiak(self, layerin, layerout): cg1 = ShapedCGroup(layerin, layerout) cg1.mkconnects(HebbianConnect) cg1.normbiases() self.cgroups.append(cg1) cg2 = ShapedCGroup(layerout, layerout) cg2.mkconnects(AntiHebbianConnect) self.cgroups.append(cg2) self.inihblayers.append([layerout,cg1,cg2]) def __init__(self): self.layers = [] self.cgroups = [] self.connects = [] self.meta_timing = [] self.valdict = dict() self.inihblayers = [] self.diffeqs = [] self.isinit = False def setup(self): for i in self.layers: i.setup() self.meta_timing = [] self.diffeqs = [] for i in self.inihblayers: odesolver = FoldiakShapedDiffEq(i[0], i[1], i[2]) self.diffeqs.append(odesolver) self.isinit = True def update(self): for i in self.diffeqs: i.update() for i in self.cgroups: i.update() for i in self.connects: i.update() for i in self.layers: i.update(self.connects) def updatethresonly(self): for i in self.diffeqs: i.update() for i in self.layers: i.update(self.connects)
1,218
7
157
8f2f930b5a2dacd7576259cb4a0ce498b4941c73
359
py
Python
ch02/similarity.py
YaGiNA/DLfS2
3dbaba7a62c198b50849de2e3b74d92897a4cae7
[ "MIT" ]
1
2019-05-15T09:17:23.000Z
2019-05-15T09:17:23.000Z
ch02/similarity.py
YaGiNA/DLfS2
3dbaba7a62c198b50849de2e3b74d92897a4cae7
[ "MIT" ]
null
null
null
ch02/similarity.py
YaGiNA/DLfS2
3dbaba7a62c198b50849de2e3b74d92897a4cae7
[ "MIT" ]
null
null
null
import sys sys.path.append("..") from common.util import preprocess, create_co_matrix, cos_similarity text = "You say goobye and I say hello." corpus, word_to_id, id_to_word = preprocess(text) vocab_size = len(word_to_id) C = create_co_matrix(corpus, vocab_size) c0 = C[word_to_id["you"]] c1 = C[word_to_id["i"]] print(cos_similarity(c0, c1))
25.642857
69
0.721448
import sys sys.path.append("..") from common.util import preprocess, create_co_matrix, cos_similarity text = "You say goobye and I say hello." corpus, word_to_id, id_to_word = preprocess(text) vocab_size = len(word_to_id) C = create_co_matrix(corpus, vocab_size) c0 = C[word_to_id["you"]] c1 = C[word_to_id["i"]] print(cos_similarity(c0, c1))
0
0
0
69b5c654c37a2e2d72fe9be3cc0958d62171dbd6
9,814
py
Python
pydht/pydht.py
scottcunningham/pydht
9a2ecfc8da3794b2dc6587d17b8d51337a8e7df4
[ "BSD-2-Clause-FreeBSD" ]
1
2015-01-04T07:02:54.000Z
2015-01-04T07:02:54.000Z
pydht/pydht.py
scottcunningham/pydht
9a2ecfc8da3794b2dc6587d17b8d51337a8e7df4
[ "BSD-2-Clause-FreeBSD" ]
null
null
null
pydht/pydht.py
scottcunningham/pydht
9a2ecfc8da3794b2dc6587d17b8d51337a8e7df4
[ "BSD-2-Clause-FreeBSD" ]
null
null
null
import math import json import random import uuid import SocketServer import threading import time import key_derivation from .bucketset import BucketSet from .hashing import hash_function, random_id from .peer import Peer from .shortlist import Shortlist k = 20 alpha = 3 id_bits = 128 iteration_sleep = 1 keysize = 2048 DEFAULT_TTL = 604800 # = 7 days, in seconds.
39.099602
151
0.635011
import math import json import random import uuid import SocketServer import threading import time import key_derivation from .bucketset import BucketSet from .hashing import hash_function, random_id from .peer import Peer from .shortlist import Shortlist k = 20 alpha = 3 id_bits = 128 iteration_sleep = 1 keysize = 2048 DEFAULT_TTL = 604800 # = 7 days, in seconds. class DHTRequestHandler(SocketServer.BaseRequestHandler): def handle(self): try: message = json.loads(self.request[0].strip()) message_type = message["message_type"] print "Received message of type", message_type, "from", message["peer_id"] if message_type == "ping": self.handle_ping(message) elif message_type == "pong": self.handle_pong(message) elif message_type == "find_node": self.handle_find(message) elif message_type == "find_value": self.handle_find(message, find_value=True) elif message_type == "found_nodes": self.handle_found_nodes(message) elif message_type == "found_value": self.handle_found_value(message) elif message_type == "store": print "Request to store" self.handle_store(message) elif message_type == "downvote": print "Asked to downvote an item" self.handle_downvote(message) except KeyError, ValueError: pass client_host, client_port = self.client_address peer_id = message["peer_id"] new_peer = Peer(client_host, client_port, peer_id) self.server.dht.buckets.insert(new_peer) def handle_ping(self, message): client_host, client_port = self.client_address id = message["peer_id"] peer = Peer(client_host, client_port, id) peer.pong(socket=self.server.socket, peer_id=self.server.dht.peer.id, lock=self.server.send_lock) def handle_pong(self, message): pass def handle_find(self, message, find_value=False): key = message["id"] id = message["peer_id"] client_host, client_port = self.client_address peer = Peer(client_host, client_port, id) response_socket = self.request[1] if find_value and (key in self.server.dht.data): value = self.server.dht.data[key] peer.found_value(id, value, message["rpc_id"], socket=response_socket, peer_id=self.server.dht.peer.id, lock=self.server.send_lock) else: nearest_nodes = self.server.dht.buckets.nearest_nodes(id) if not nearest_nodes: nearest_nodes.append(self.server.dht.peer) nearest_nodes = [nearest_peer.astriple() for nearest_peer in nearest_nodes] peer.found_nodes(id, nearest_nodes, message["rpc_id"], socket=response_socket, peer_id=self.server.dht.peer.id, lock=self.server.send_lock) def handle_found_nodes(self, message): rpc_id = message["rpc_id"] shortlist = self.server.dht.rpc_ids[rpc_id] del self.server.dht.rpc_ids[rpc_id] nearest_nodes = [Peer(*peer) for peer in message["nearest_nodes"]] shortlist.update(nearest_nodes) def handle_found_value(self, message): rpc_id = message["rpc_id"] shortlist = self.server.dht.rpc_ids[rpc_id] del self.server.dht.rpc_ids[rpc_id] shortlist.set_complete(message["value"]) def handle_store(self, message): key = message["id"] print "Asked to store data for id", key print "Ciphertext is", message["value"] self.server.dht.data[key] = message["value"] self.server.dht.ttls[key] = DEFAULT_TTL def handle_downvote(self, message): key = message["id"] print "Downvote for key", key, " -- uuid is ", message["uid"] self.server.dht.handle_downvote(key, uuid) class DHTServer(SocketServer.ThreadingMixIn, SocketServer.UDPServer): def __init__(self, host_address, handler_cls): SocketServer.UDPServer.__init__(self, host_address, handler_cls) self.send_lock = threading.Lock() class DHT(object): def __init__(self, host, port, id=None, boot_host=None, boot_port=None): if not id: id = random_id() self.id = id self.peer = Peer(unicode(host), port, id) # Data and data decay data structures self.data = {} self.recent_downvotes = [] self.downvotes = {} self.ttls = {} self.pending_replies = {} self.buckets = BucketSet(k, id_bits, self.peer.id) self.rpc_ids = {} # should probably have a lock for this self.server = DHTServer(self.peer.address(), DHTRequestHandler) self.server.dht = self self.server_thread = threading.Thread(target=self.server.serve_forever) self.server_thread.daemon = True self.server_thread.start() self.bootstrap(unicode(boot_host), boot_port) def iterative_find_nodes(self, key, boot_peer=None): shortlist = Shortlist(k, key) shortlist.update(self.buckets.nearest_nodes(key, limit=alpha)) if boot_peer: rpc_id = random.getrandbits(id_bits) self.rpc_ids[rpc_id] = shortlist boot_peer.find_node(key, rpc_id, socket=self.server.socket, peer_id=self.peer.id) while (not shortlist.complete()) or boot_peer: nearest_nodes = shortlist.get_next_iteration(alpha) for peer in nearest_nodes: shortlist.mark(peer) rpc_id = random.getrandbits(id_bits) self.rpc_ids[rpc_id] = shortlist peer.find_node(key, rpc_id, socket=self.server.socket, peer_id=self.peer.id) ###### time.sleep(iteration_sleep) boot_peer = None return shortlist.results() def iterative_find_value(self, key): shortlist = Shortlist(k, key) shortlist.update(self.buckets.nearest_nodes(key, limit=alpha)) while not shortlist.complete(): nearest_nodes = shortlist.get_next_iteration(alpha) for peer in nearest_nodes: shortlist.mark(peer) rpc_id = random.getrandbits(id_bits) self.rpc_ids[rpc_id] = shortlist peer.find_value(key, rpc_id, socket=self.server.socket, peer_id=self.peer.id) ##### time.sleep(iteration_sleep) return shortlist.completion_result() def bootstrap(self, boot_host, boot_port): if boot_host and boot_port: boot_peer = Peer(boot_host, boot_port, 0) self.iterative_find_nodes(self.peer.id, boot_peer=boot_peer) def __getitem__(self, key): hashed_key = hash_function(key) if hashed_key in self.data: return self.data[hashed_key] result = self.iterative_find_value(hashed_key) if result: return result raise KeyError def __setitem__(self, key, value): hashed_key = hash_function(key) nearest_nodes = self.iterative_find_nodes(hashed_key) if not nearest_nodes: self.data[hashed_key] = value for node in nearest_nodes: node.store(hashed_key, value, socket=self.server.socket, peer_id=self.peer.id) def publish(self, value): key = str(uuid.uuid4()) print "Publishing content under new key:", key hashed_key = hash_function(key) print "Hashed key is:", hashed_key # need to encrypt value ciphertext = key_derivation.do_encrypt(key, value) print "Cyphertext is:", ciphertext nearest_nodes = self.iterative_find_nodes(hashed_key) if not nearest_nodes: print "Storing data for key {} locally".format(key) self.data[hashed_key] = ciphertext for node in nearest_nodes: print "Sending data for key {} to closer nodes.".format(key) node.store(hashed_key, ciphertext, socket=self.server.socket, peer_id=self.peer.id) return key def retrieve(self, key): # Retrieve result print "Looking up key:", key hashed_key = hash_function(key) print "Hashed key is", hashed_key result = None if hashed_key in self.data: print "Data for key", "stored locally" result = self.data[hashed_key] else: print "Data stored somewhere else: forwarding request" result = self.iterative_find_value(hashed_key) if not result: print "Key", key, "not found" raise KeyError # result is encrypted + hmac'd # Can throw ValueError if HMAC fails print "Ciphertext is", result plaintext = key_derivation.do_decrypt(key, result) return plaintext def downvote(self, key): uid = str(uuid.uuid4()) hashed_key = hash_function(key) nearest_nodes = self.iterative_find_nodes(hashed_key) print "Downvoting", key if not nearest_nodes: print "Asked myself to downvote a key: {}".format(key) for node in nearest_nodes: print "Asking another node to downvote", key node.downvote(hashed_key, uid, socket=self.server.socket, peer_id=self.peer.id) def handle_downvote(self, key, uuid): if uuid in self.recent_downvotes: return if key not in self.data: return self.downvotes[key] += 1 self.recent_downvotes.append(uuid) def tick(self): for (uuid, downvotes) in self.downvotes.items(): downvote_val = math.log(downvotes, 2) self.ttls[uuid] -= downvote_val for (uuid, ttl) in self.ttls.items(): if ttl <= 0: print "UUID", uuid, " past TTL - deleting"
8,755
81
607
60baa5af0a80d786e5af5dc6c9b0c70a33113f15
913
py
Python
mutability/mutability_exercise2.py
dipeshbabu/python-labs-codio
40ec0f398dc239b41d4105a8f95b35a22024b63e
[ "MIT" ]
2
2021-11-18T01:42:00.000Z
2021-11-28T14:55:31.000Z
mutability/mutability_exercise2.py
dipeshbabu/python-labs-codio
40ec0f398dc239b41d4105a8f95b35a22024b63e
[ "MIT" ]
null
null
null
mutability/mutability_exercise2.py
dipeshbabu/python-labs-codio
40ec0f398dc239b41d4105a8f95b35a22024b63e
[ "MIT" ]
null
null
null
""" Exercise 2 Using the same CelestialBody class, write a static method closer_to_sun that compares two CelectialBody objects and returns the name of the object that is closes to the sun. Expected Output If the objects mercury and venus are compared, then the method would return Mercury. """ class CelestialBody: """Represents a celestial body""" @staticmethod def closer_to_sun(body1, body2): """ Returns the name of the body that is closest to the sun """ if body1.distance < body2.distance: return body1.name else: return body2.name mercury = CelestialBody("Mercury", 4879.4, 57909000, 0) venus = CelestialBody("Venus", 12103.6, 108160000, 0)
27.666667
87
0.665936
""" Exercise 2 Using the same CelestialBody class, write a static method closer_to_sun that compares two CelectialBody objects and returns the name of the object that is closes to the sun. Expected Output If the objects mercury and venus are compared, then the method would return Mercury. """ class CelestialBody: """Represents a celestial body""" def __init__(self, name, diameter, distance, moons): self.name = name self.diameter = diameter self.distance = distance self.moons = moons @staticmethod def closer_to_sun(body1, body2): """ Returns the name of the body that is closest to the sun """ if body1.distance < body2.distance: return body1.name else: return body2.name mercury = CelestialBody("Mercury", 4879.4, 57909000, 0) venus = CelestialBody("Venus", 12103.6, 108160000, 0)
149
0
27
4b6841333d077341a6f1d6dc8abf6907abc1a552
6,442
py
Python
source/clusters/train_cluster.py
microsoft/Aura
d95ae0067bcd82e5952e8eed0e46b1a5eaaa7031
[ "MIT" ]
1
2022-03-02T00:21:33.000Z
2022-03-02T00:21:33.000Z
source/clusters/train_cluster.py
microsoft/Aura
d95ae0067bcd82e5952e8eed0e46b1a5eaaa7031
[ "MIT" ]
null
null
null
source/clusters/train_cluster.py
microsoft/Aura
d95ae0067bcd82e5952e8eed0e46b1a5eaaa7031
[ "MIT" ]
2
2022-03-15T03:12:02.000Z
2022-03-20T20:49:02.000Z
import numpy as np import os from azureml.core.run import Run from scipy.stats import entropy from ..utils.tfrecords import resize, parse_tfrecord from .kmeans import * from ..models import * run = Run.get_context() class ClusterFeatureMap(tf.keras.Model): """" This is a clustering class with methods to allow batch clustering of the latent representation generated by classifier """ class SaveCluster(tf.keras.callbacks.Callback): """ A callback class for saving clusters """ class UpdateCluster(tf.keras.callbacks.Callback): """ A callback class for updating centroid coordinates """ def get_data_from_tfrecords(args, num_replicas): """ Create a tf.data from tf records in args.train_dir/args.validation_dir :param args: :param num_replicas: :return: """ num_frames = args.num_frames num_mel = args.num_mel num_labels = args.num_labels batch_size = args.batch_size * num_replicas autotune = tf.data.AUTOTUNE train_filenames = tf.io.gfile.glob(f'{args.train_dir}/*.tfrec') train_dataset = tf.data.TFRecordDataset(train_filenames, num_parallel_reads=autotune) \ .map(lambda example: parse_tfrecord(example, num_mel=num_mel, num_frames=num_frames, snr=args.snr, labels=args.labels), num_parallel_calls=autotune) \ .map(lambda example: resize(example, num_frames=num_frames, num_mel=num_mel, num_labels=args.num_labels, labels=args.labels, snr=args.snr), num_parallel_calls=autotune) \ .shuffle(10 * batch_size) \ .batch(batch_size) \ .prefetch(autotune) \ .cache() return train_dataset def get_model(args, num_replicas): """ Construct tensorflow model from checkpoint in args.path_model_tf and data loader from args.data_dir """ model = globals()[args.model_name](nclass=args.num_labels) if args.path_model_tf is not None: model.load_weights(tf.train.latest_checkpoint(args.path_model_tf)).expect_partial() cluster_algorithm = globals()[args.clustering_name](args.num_clusters, args.embed_dim) clus = ClusterFeatureMap(cluster_algorithm, model, batch_size=args.batch_size * num_replicas) clus.compile() print('Compiling model done') return clus def train(args): """ Iterate over the batch in the dataset and learn the cluster centers using args.clustering_name and args.model_name feature map. :param args: :return: """ if run._run_id.startswith("OfflineRun"): run.number = 0 strategy = tf.distribute.MirroredStrategy() save_dir = args.save_dir save_dir = f'{save_dir}/{args.experiment_name}_{run.number}' os.makedirs(save_dir, exist_ok=True) with strategy.scope(): model = get_model(args, strategy.num_replicas_in_sync) train_loader = get_data_from_tfrecords(args, strategy.num_replicas_in_sync) model.fit(train_loader, epochs=args.num_epochs, callbacks=[SaveCluster(save_dir), UpdateCluster()])
34.084656
132
0.658025
import numpy as np import os from azureml.core.run import Run from scipy.stats import entropy from ..utils.tfrecords import resize, parse_tfrecord from .kmeans import * from ..models import * run = Run.get_context() class ClusterFeatureMap(tf.keras.Model): """" This is a clustering class with methods to allow batch clustering of the latent representation generated by classifier """ def __init__(self, clustering, classifier, batch_size=16): super().__init__() self.clustering = clustering self.classifier = classifier self.batch_size = batch_size def train_step(self, data): noisy1, label = data[0], data[1] _, latent = self.classifier.estimate(noisy1) latent = tf.reduce_mean(latent, axis=(1)) def get_assign(): return self.clustering.assign(latent) def get_initialize(): return self.clustering.initialize(latent) centroid_assignment = tf.cond(self.clustering.initialized, get_assign, lambda: tf.zeros_like(latent[:, 0], dtype=tf.int64)) def get_update(): return self.clustering.update(latent, centroid_assignment, label) l2_adjustment = self.clustering.compute_distance(latent, centroid_assignment) labels_distance = self.clustering.compute_distance_labels(label, centroid_assignment) tf.cond(self.clustering.initialized, get_update, get_initialize) results = {'cluster_dispersion': tf.reduce_sum(l2_adjustment) / self.batch_size, 'cluster_label_distance': tf.reduce_sum(labels_distance) / self.batch_size} return results def call(self, data): noisy1, label = data[0], data[1] _, latent = self.classifier(noisy1) latent = tf.reduce_mean(latent, axis=(1)) centroid_assignment = self.cluster.assign(latent) return centroid_assignment class SaveCluster(tf.keras.callbacks.Callback): """ A callback class for saving clusters """ def __init__(self, save_dir): super().__init__() self.save_dir = save_dir def on_epoch_end(self, epoch, logs={}): centroids = self.model.clustering.centroids.numpy() labels = self.model.clustering.cluster_labels.numpy() if hasattr(self.model.clustering, 'centroids_covariance'): centroids_covariance = self.model.clustering.centroids_covariance.numpy() np.savez(f'{self.save_dir}/centroids.npz', centroids=centroids, centroid_labels=labels, covariance=centroids_covariance) else: np.savez(f'{self.save_dir}/centroids.npz', centroids=centroids, centroid_labels=labels) # -- label entropy per cluster labels_without_zeros = labels[labels.sum(-1) > 0] prob_labels = labels_without_zeros / labels_without_zeros.sum(-1)[:, None] entropy_clusters = entropy(prob_labels, axis=1) run.log('entropy_label', entropy_clusters.mean()) class UpdateCluster(tf.keras.callbacks.Callback): """ A callback class for updating centroid coordinates """ def __init__(self): super().__init__() def on_epoch_end(self, epoch, logs={}): tf.cond(self.model.clustering.initialized, self.model.clustering.reset_centroids, lambda: None) ch_index = self.model.clustering.compute_calinski_harabasz() db_index = self.model.clustering.compute_davies_bouldin() db_labels_index = self.model.clustering.compute_davies_bouldin_labels() run.log('Calinski-Harabasz Index', float(ch_index)) run.log('Davies-Bouldin Index', float(db_index)) run.log('Davies-Bouldin Labels-Based Index', float(db_labels_index)) def get_data_from_tfrecords(args, num_replicas): """ Create a tf.data from tf records in args.train_dir/args.validation_dir :param args: :param num_replicas: :return: """ num_frames = args.num_frames num_mel = args.num_mel num_labels = args.num_labels batch_size = args.batch_size * num_replicas autotune = tf.data.AUTOTUNE train_filenames = tf.io.gfile.glob(f'{args.train_dir}/*.tfrec') train_dataset = tf.data.TFRecordDataset(train_filenames, num_parallel_reads=autotune) \ .map(lambda example: parse_tfrecord(example, num_mel=num_mel, num_frames=num_frames, snr=args.snr, labels=args.labels), num_parallel_calls=autotune) \ .map(lambda example: resize(example, num_frames=num_frames, num_mel=num_mel, num_labels=args.num_labels, labels=args.labels, snr=args.snr), num_parallel_calls=autotune) \ .shuffle(10 * batch_size) \ .batch(batch_size) \ .prefetch(autotune) \ .cache() return train_dataset def get_model(args, num_replicas): """ Construct tensorflow model from checkpoint in args.path_model_tf and data loader from args.data_dir """ model = globals()[args.model_name](nclass=args.num_labels) if args.path_model_tf is not None: model.load_weights(tf.train.latest_checkpoint(args.path_model_tf)).expect_partial() cluster_algorithm = globals()[args.clustering_name](args.num_clusters, args.embed_dim) clus = ClusterFeatureMap(cluster_algorithm, model, batch_size=args.batch_size * num_replicas) clus.compile() print('Compiling model done') return clus def train(args): """ Iterate over the batch in the dataset and learn the cluster centers using args.clustering_name and args.model_name feature map. :param args: :return: """ if run._run_id.startswith("OfflineRun"): run.number = 0 strategy = tf.distribute.MirroredStrategy() save_dir = args.save_dir save_dir = f'{save_dir}/{args.experiment_name}_{run.number}' os.makedirs(save_dir, exist_ok=True) with strategy.scope(): model = get_model(args, strategy.num_replicas_in_sync) train_loader = get_data_from_tfrecords(args, strategy.num_replicas_in_sync) model.fit(train_loader, epochs=args.num_epochs, callbacks=[SaveCluster(save_dir), UpdateCluster()])
2,881
0
189
500a827b7b2d2907dc4668b184fb3c0816a2b1e1
4,987
py
Python
curriculumBuilder/testDetails.py
code-dot-org/curriculumbuilder
e40330006145b8528f777a8aec2abff5b309d1c7
[ "Apache-2.0" ]
3
2019-10-22T20:21:15.000Z
2022-01-12T19:38:48.000Z
curriculumBuilder/testDetails.py
code-dot-org/curriculumbuilder
e40330006145b8528f777a8aec2abff5b309d1c7
[ "Apache-2.0" ]
67
2019-09-27T17:04:52.000Z
2022-03-21T22:16:23.000Z
curriculumBuilder/testDetails.py
code-dot-org/curriculumbuilder
e40330006145b8528f777a8aec2abff5b309d1c7
[ "Apache-2.0" ]
1
2019-10-18T16:06:31.000Z
2019-10-18T16:06:31.000Z
# pylint: disable=missing-docstring,invalid-name,line-too-long from django.test import TestCase import markdown class TestDetails(TestCase): """ Test details extension. """
41.214876
166
0.600361
# pylint: disable=missing-docstring,invalid-name,line-too-long from django.test import TestCase import markdown class TestDetails(TestCase): """ Test details extension. """ def setUp(self): self.markdown = markdown.Markdown(extensions=['curriculumBuilder.details:DetailsExtension']) def test_details_can_render(self): source = '::: details [summary-content]\n' + \ 'contents, which are sometimes further block elements\n' + \ ':::' expected = '<details><summary><p>summary-content</p></summary><p>contents, which are sometimes further block elements</p></details>' rendered = self.markdown.convert(source) self.assertEqual(rendered, expected) def test_details_can_span_multiple_blocks(self): source = '::: details [summary-content]\n' + \ '\n' + \ 'contents, which are sometimes further block elements\n' + \ '\n' + \ ':::' expected = '<details><summary><p>summary-content</p></summary><p>contents, which are sometimes further block elements</p></details>' rendered = self.markdown.convert(source) self.assertEqual(rendered, expected) def test_details_can_have_a_variable_number_of_opening_colons(self): source = ':::::::: details [summary-content]\n' + \ 'contents, which are sometimes further block elements\n' + \ ':::::::::::::' expected = '<details><summary><p>summary-content</p></summary><p>contents, which are sometimes further block elements</p></details>' rendered = self.markdown.convert(source) self.assertEqual(rendered, expected) def test_details_can_render_markdown_syntax_in_the_summary(self): source = '::: details [**summary** _content_]\n' + \ 'contents, which are sometimes further block elements\n' + \ ':::' expected = '<details><summary><p><strong>summary</strong> <em>content</em></p></summary><p>contents, which are sometimes further block elements</p></details>' rendered = self.markdown.convert(source) self.assertEqual(rendered, expected) def test_details_can_render_markdown_syntax_in_the_body(self): source = '::: details [summary-content]\n' + \ '\n' + \ '# Contents\n' + \ '- can\n' + \ '- be\n' + \ '- markdown\n' + \ '\n' + \ ':::' expected = '<details><summary><p>summary-content</p></summary><h1>Contents</h1><ul>' + \ '<li>can</li>' + \ '<li>be</li>' + \ '<li>markdown</li>' + \ '</ul></details>' rendered = self.markdown.convert(source) self.assertEqual(rendered, expected) def test_details_ignores_trailing_colons(self): # Look how pretty this can be! source = '::::::::::::: details [summary-content] :::::::::::::\n' + \ 'contents, which are sometimes further block elements\n' + \ ':::::::::::::::::::::::::::::::::::::::::::::::::::::' expected = '<details><summary><p>summary-content</p></summary><p>contents, which are sometimes further block elements</p></details>' rendered = self.markdown.convert(source) self.assertEqual(rendered, expected) def test_details_ignores_excess_whitespace(self): source = '::: details [summary-content] \n' + \ '\n' + \ 'contents, which are sometimes further block elements\n' + \ '\n' + \ ':::' expected = '<details><summary><p>summary-content</p></summary><p>contents, which are sometimes further block elements</p></details>' rendered = self.markdown.convert(source) self.assertEqual(rendered, expected) def test_details_can_nest(self): source = ':::: details [outer]\n' + \ '::: details [inner]\n' + \ 'innermost content\n' + \ ':::\n' + \ '::::' expected = '<details><summary><p>outer</p></summary><details><summary><p>inner</p></summary><p>innermost content</p></details></details>' rendered = self.markdown.convert(source) self.assertEqual(rendered, expected) def test_details_requires_a_summary_block(self): source = '::: details\n' + \ 'contents, which are sometimes further block elements\n' + \ ':::' expected = '<p>::: details\n' + \ 'contents, which are sometimes further block elements\n' + \ ':::</p>' rendered = self.markdown.convert(source) self.assertEqual(rendered, expected) def test_details_requires_at_least_three_opening_colons(self): source = ':: details [summary-content]\n' + \ 'contents, which are sometimes further block elements\n' + \ ':::' expected = '<p>:: details [summary-content]\n' + \ 'contents, which are sometimes further block elements\n' + \ ':::</p>' rendered = self.markdown.convert(source) self.assertEqual(rendered, expected)
4,511
0
297
88eef38159455835c84e3f2649e3a65d58cd13ef
5,079
py
Python
gtunrealdevice/config.py
Geeks-Trident-LLC/gtunrealdevice
a691f16ed031c342002472fd7c12c96c0e94be45
[ "BSD-3-Clause" ]
null
null
null
gtunrealdevice/config.py
Geeks-Trident-LLC/gtunrealdevice
a691f16ed031c342002472fd7c12c96c0e94be45
[ "BSD-3-Clause" ]
null
null
null
gtunrealdevice/config.py
Geeks-Trident-LLC/gtunrealdevice
a691f16ed031c342002472fd7c12c96c0e94be45
[ "BSD-3-Clause" ]
null
null
null
"""Module containing the attributes for gtunrealdevice.""" import yaml from os import path from textwrap import dedent from gtunrealdevice.utils import File __version__ = '0.2.8' version = __version__ __edition__ = 'Community' edition = __edition__ __all__ = [ 'Data', 'version', 'edition' ]
35.027586
86
0.607403
"""Module containing the attributes for gtunrealdevice.""" import yaml from os import path from textwrap import dedent from gtunrealdevice.utils import File __version__ = '0.2.8' version = __version__ __edition__ = 'Community' edition = __edition__ __all__ = [ 'Data', 'version', 'edition' ] class Data: message = '' # app yaml files app_directory = File.get_path('.geekstrident', 'gtunrealdevice', is_home=True) devices_info_filename = File.get_path(app_directory, 'devices_info.yaml') serialized_filename = File.get_path(app_directory, 'serialized_data.yaml') # app sample data sample_devices_info_text = dedent(""" #################################################################### # sample devices info # # Note: name, login, and configs nodes are optional # #################################################################### host_address_1: name: host_name (optional) description: (optional) login: |- output_of_login (optional) cmdlines: cmdline_1: |- line 1 output_of_cmdline_1 ... line n output_of_cmdline_1 cmdline_k_for_multiple_output: - |- line 1 - output_of_cmdline_k ... line n - output_of_cmdline_k - |- line 1 - other_output_of_cmdline_k ... line n - other_output_of_cmdline_k configs: cfg_1_reference: |- line 1 of cfg_1 ... line n of cfg_1 """).strip() # main app main_app_text = 'gtunrealdevice v{}'.format(version) # company company = 'Geeks Trident LLC' company_url = 'https://www.geekstrident.com/' # URL repo_url = 'https://github.com/Geeks-Trident-LLC/gtunrealdevice' # TODO: Need to update wiki page for documentation_url instead of README.md. documentation_url = path.join(repo_url, 'blob/develop/README.md') license_url = path.join(repo_url, 'blob/develop/LICENSE') # License years = '2022-2040' license_name = 'BSD 3-Clause License' copyright_text = 'Copyright @ {}'.format(years) license = dedent( """ BSD 3-Clause License Copyright (c) {}, {} All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """.format(years, company) ).strip() @classmethod def get_app_info(cls): from platform import uname as u, python_version as v lst = [cls.main_app_text, 'Project : {}'.format(cls.repo_url), 'License : {}'.format(cls.license_name), 'Platform: {0.system} {0.release} - Python {1}'.format(u(), v()), ] app_info = '\n'.join(lst) return app_info @classmethod def is_devices_info_file_exist(cls): return File.is_exist(cls.devices_info_filename) @classmethod def create_devices_info_file(cls): is_created = File.create(cls.devices_info_filename) cls.message = File.message return is_created @classmethod def get_dependency(cls): dependencies = dict( pyyaml=dict( package='pyyaml v{}'.format(yaml.__version__), url='https://pypi.org/project/PyYAML/' ), ) return dependencies
794
3,952
23
260c878ed68bb23823d5096c9467185ce233fe09
2,117
py
Python
semester5/num-methods/lab2/lab2.py
gardenappl/uni
5bc7110946caf16aae2a0c1ddae4e88bfbb25aa8
[ "WTFPL" ]
null
null
null
semester5/num-methods/lab2/lab2.py
gardenappl/uni
5bc7110946caf16aae2a0c1ddae4e88bfbb25aa8
[ "WTFPL" ]
null
null
null
semester5/num-methods/lab2/lab2.py
gardenappl/uni
5bc7110946caf16aae2a0c1ddae4e88bfbb25aa8
[ "WTFPL" ]
null
null
null
import numpy as np a = np.array([ (1, 3, 5, 7), (3, 5, 7, 1), (5, 7, 1, 3), (7, 1, 1, 5) ], dtype=np.float64) b = np.array([12, 0, 4, 16], dtype=np.float64) MAX_STEPS = 100 print("Gauss (with selection of main element):", solve_gauss_m(a, b)) print("numpy.linalg.solve:", np.linalg.solve(a, b)) a = np.array([ [3, -1, 1], [-1, 2, 0.5], [1, 0.5, 3] ], dtype=np.float64) b = np.array([1, 1.75, 2.5], dtype=np.float64) print("Seidel:", solve_seidel(a, b, epsilon=0.0001)) print("numpy.linalg.solve:", np.linalg.solve(a, b))
23.786517
76
0.505432
import numpy as np a = np.array([ (1, 3, 5, 7), (3, 5, 7, 1), (5, 7, 1, 3), (7, 1, 1, 5) ], dtype=np.float64) b = np.array([12, 0, 4, 16], dtype=np.float64) def solve_gauss_m(a, b): a = a.copy() b = b.copy() for i in range(0, a.shape[0]): main_row_index = i + np.argmax(a[range(i, a.shape[1]), i]) main_row_el = a[main_row_index, i] # Swap main row with ith row if main_row_index != i: a[[main_row_index, i], :] = a[[i, main_row_index], :] b[[main_row_index, i]] = b[[i, main_row_index]] # print(a) # print(b) a[i, :] /= main_row_el b[i] /= main_row_el for row_index in range(i + 1, a.shape[1]): multiplier = a[row_index, i] a[row_index, :] -= a[i, :] * multiplier b[row_index] -= b[i] * multiplier # print(a) # print(b) x = np.empty_like(b) for i in range(0, a.shape[0]): row_index = x.size - 1 - i x[row_index] = b[row_index] for j in range(0, i): x[row_index] -= a[row_index, a.shape[1]-1 - j] * x[x.size-1 - j] return x MAX_STEPS = 100 def solve_seidel(a, b, max_steps=MAX_STEPS, epsilon=1e-04): x = np.zeros_like(b) for step in range(max_steps): x_new = np.zeros_like(x) for i in range(x.size): for j in range(0, i): x_new[i] -= a[i, j] / a[i, i] * x_new[j] for j in range(i + 1, x.size): x_new[i] -= a[i, j] / a[i, i] * x[j] x_new[i] += b[i] / a[i, i] print("Step", step, ":", x_new) if np.allclose(x, x_new, atol=epsilon, rtol=0): return x_new x = x_new return x_new print("Gauss (with selection of main element):", solve_gauss_m(a, b)) print("numpy.linalg.solve:", np.linalg.solve(a, b)) a = np.array([ [3, -1, 1], [-1, 2, 0.5], [1, 0.5, 3] ], dtype=np.float64) b = np.array([1, 1.75, 2.5], dtype=np.float64) print("Seidel:", solve_seidel(a, b, epsilon=0.0001)) print("numpy.linalg.solve:", np.linalg.solve(a, b))
1,511
0
46
ea9ab1b534d31c154176811f60556189131b810f
51
py
Python
haystack/modeling/training/__init__.py
kamilpz/haystack
947549fcccc9dea79a621eea9703a1bb0897c009
[ "Apache-2.0" ]
1
2021-11-19T15:01:00.000Z
2021-11-19T15:01:00.000Z
haystack/modeling/training/__init__.py
jaehyeongAN/haystack
b63669d1bc60b6c773b8b89d631afdd0ebbf4c4c
[ "Apache-2.0" ]
null
null
null
haystack/modeling/training/__init__.py
jaehyeongAN/haystack
b63669d1bc60b6c773b8b89d631afdd0ebbf4c4c
[ "Apache-2.0" ]
1
2021-02-16T10:52:38.000Z
2021-02-16T10:52:38.000Z
from haystack.modeling.training.base import Trainer
51
51
0.882353
from haystack.modeling.training.base import Trainer
0
0
0
429b84c47c7bb1d0236a511a637bb935635a7405
8,460
py
Python
ctseg/config.py
sandialabs/mcdn-3d-seg
6b6a234719d37a11f3f997568c32ca04d62a4b18
[ "MIT" ]
4
2021-01-21T21:28:12.000Z
2021-09-27T19:39:34.000Z
ctseg/config.py
sandialabs/mcdn-3d-seg
6b6a234719d37a11f3f997568c32ca04d62a4b18
[ "MIT" ]
null
null
null
ctseg/config.py
sandialabs/mcdn-3d-seg
6b6a234719d37a11f3f997568c32ca04d62a4b18
[ "MIT" ]
2
2021-09-01T11:44:49.000Z
2022-01-04T02:01:28.000Z
""" """ import os from sacred.observers import FileStorageObserver from sacred import Experiment from ctseg.ctutil.utils import read_json def initialize_experiment(): """Initialize the Sacred Experiment This method reads a JSON config from mcdn-3d-seg/sacred_config.json with the following entries: experiment_name: the name of the sacred experiment file_observer_base_dir: the directory where run logs are saved to. If relative, it is assumed relative to mcdn-3d-seg/ """ # parse the sacred config repo_dir = os.path.dirname(os.path.dirname(__file__)) sacred_config = read_json(os.path.join(repo_dir, "sacred_config.json")) # initialize the experiment ex = Experiment(sacred_config["experiment_name"]) # create a file-based observer to log runs file_observer_base_dir = os.path.expanduser(sacred_config["file_observer_base_dir"]) if not file_observer_base_dir.startswith("/"): file_observer_base_dir = os.path.join(repo_dir, file_observer_base_dir) ex.observers.append(FileStorageObserver.create(file_observer_base_dir)) return ex ex = initialize_experiment() DEFAULT_CONFIG = { "num_gpus": 1, # the number of output segmentation classes "num_classes": 4, # the method used to normalize the data # options include: ZeroMeanUnitVar, NormLog, MagicNormLog "normalization": "", # continuously checks for new inference files and deletes completed files "production_mode": False, "check_alignment": -1, # model architecture "model_config": { # specifies the architecture of a new model "architecture_config": { # the size of model's input window when sampling volumes (x, y, z) "input_shape": [240, 240, 240], "kernel_initializer": "lecun_normal", "activation": "relu", "dropout_rate": 0.1, }, # specifies loading a pre-trained model "load_config": { # whether or not to drop the last layer when loading a model "drop_last_layer": False, # "best", "latest" or "/PATH/TO/MODEL/CHECKPOINT" to resume training from. # Leave empty to not resume "resume_from": "", # path to a weights file to load the model from. takes precedent over # `resume_from` if set "load_weights_from": "", }, }, # data preprocessing "data_config": { # mirrors input chunks in the corresponding dimension "flip_x": False, "flip_y": False, "flip_z": False, # Flip Validation Axis: None or int or tuple of ints, optional # Axis or axes along which to flip over. The default, # axis=None, will flip over all of the axes of the input array. # If axis is negative it counts from the last to the first axis. # If axis is a tuple of ints, flipping is performed on all of the axes # specified in the tuple. "flip_validation_axis": None, "sampler_config": { # the chunk sampling class for during training. one of "OverlapSampler", # "RandomSampler", "BattleShipSampler" "sampler_class": "RandomSampler", # Number of random samples taken from the training data dir when performing # training. Not used in "overlap" mode. "n_samples_per_epoch": 3, # Number of chunks taken from each sample when performing training. Not # used in "overlap" mode. "n_chunks_per_sample": 100, # the amount the input window is translated in the x, y, and z dimensions. # Used during inference but also during training if sampler_class is # "OverlapSampler" "overlap_stride": 240, } }, # configuration specific to training "train_config": { "inputs": { # dir containing training the `.npy` data files "data_dir": "/PATH/TO/TRAIN/DATA", # dir containing the `.npy` training labels. files are matched by name to # data, so this dir can have targets for both training and testing "targets_dir": "/PATH/TO/TRAIN/TARGETS" }, "outputs": { # where cached normalized data is saved to "normalized_data_dir": "/PATH/TO/NORMALIZED/DATA", "csv_log_dir": "/PATH/TO/CSV/LOGS", "tensorboard_log_dir": "/PATH/TO/TENSORBOARD/LOGS", "models_dir": "/PATH/TO/SAVED/MODELS", # where normalizer metadata is saved to "preprocessor_dir": "/PATH/TO/SAVED/PREPROCESSORS", }, "compilation": { # name of the optimizer to use "optimizer": "Adadelta", # the name of the loss function. Valid names include all Keras defaults # as well as fully-qualified function names "loss": "ctseg.ctutil.losses.weighted_categorical_crossentropy", # kwargs passed to the loss function. replace this kwargs dict with `false` # to not use "loss_kwargs": { "beta": 0.9, }, # the names of the metrics to track. Valid names include all Keras defaults # as well as fully-qualified function names "metrics": [ "accuracy", "ctseg.ctutil.metrics.per_class_accuracy", "ctseg.ctutil.metrics.jaccard_index", ], # indicates whether or not to recompile with the above specified optimizer, # loss and metrics if a compiled model is loaded. # Warning: doing this may slow training as it will discard the current state # of the optimizer "recompile": False }, # the max number of epochs to train for "epochs": 1000, # Epoch at which to start training # (useful for resuming a previous training run). "initial_epoch": 0, # the training batch size "batch_size": 1, }, # configuration specific to testing "test_config": { "inputs": { # dir containing the `.npy` test data files "data_dir": "/PATH/TO/TEST/DATA", # dir containing the `.npy` test labels. files are matched by name to data, # so this dir can have targets for both training and testing "targets_dir": "/PATH/TO/TEST/TARGETS" }, "outputs": { # where cached normalized data is saved to "normalized_data_dir": "/PATH/TO/NORMALIZED/DATA" } }, # configuration specific to inference "inference_config": { "inputs": { # where the `.npy` files to be processed live "unprocessed_queue_dir": "/PATH/TO/UNPROCESSED/DATA", }, "outputs": { # where files from `unprocessed_queue_dir` are moved to once processed "processed_data_dir": "/PATH/TO/PROCESSED/DATA", # where cached normalized data is saved to "normalized_data_dir": "/PATH/TO/NORMALIZED/DATA", # where predictions are written to "predictions_dir": "/PATH/TO/INFERENCE/PREDICTIONS" }, # the number of iterations of inference performed per chunk, the results of which # are averaged and standard deviations are calculated "inference_iters": 5, }, # configuration specific to plotting "plot_config": { "inputs": { # dir containing the `.npy` data files "data_dir": "/PATH/TO/DATA", # dir containing the `.npy` labels, if available. Leave empty if not. Files # are matched by name to data "targets_dir": "/PATH/TO/TARGETS", # dir containing the `.npy` predictions "predictions_dir": "/PATH/TO/PREDICTIONS" }, "outputs": { "plots_dir": "/PATH/TO/OUTPUT/PLOTS" } }, } ex.add_config(DEFAULT_CONFIG) @ex.named_config @ex.named_config @ex.named_config @ex.named_config
36.943231
89
0.610757
""" """ import os from sacred.observers import FileStorageObserver from sacred import Experiment from ctseg.ctutil.utils import read_json def initialize_experiment(): """Initialize the Sacred Experiment This method reads a JSON config from mcdn-3d-seg/sacred_config.json with the following entries: experiment_name: the name of the sacred experiment file_observer_base_dir: the directory where run logs are saved to. If relative, it is assumed relative to mcdn-3d-seg/ """ # parse the sacred config repo_dir = os.path.dirname(os.path.dirname(__file__)) sacred_config = read_json(os.path.join(repo_dir, "sacred_config.json")) # initialize the experiment ex = Experiment(sacred_config["experiment_name"]) # create a file-based observer to log runs file_observer_base_dir = os.path.expanduser(sacred_config["file_observer_base_dir"]) if not file_observer_base_dir.startswith("/"): file_observer_base_dir = os.path.join(repo_dir, file_observer_base_dir) ex.observers.append(FileStorageObserver.create(file_observer_base_dir)) return ex ex = initialize_experiment() DEFAULT_CONFIG = { "num_gpus": 1, # the number of output segmentation classes "num_classes": 4, # the method used to normalize the data # options include: ZeroMeanUnitVar, NormLog, MagicNormLog "normalization": "", # continuously checks for new inference files and deletes completed files "production_mode": False, "check_alignment": -1, # model architecture "model_config": { # specifies the architecture of a new model "architecture_config": { # the size of model's input window when sampling volumes (x, y, z) "input_shape": [240, 240, 240], "kernel_initializer": "lecun_normal", "activation": "relu", "dropout_rate": 0.1, }, # specifies loading a pre-trained model "load_config": { # whether or not to drop the last layer when loading a model "drop_last_layer": False, # "best", "latest" or "/PATH/TO/MODEL/CHECKPOINT" to resume training from. # Leave empty to not resume "resume_from": "", # path to a weights file to load the model from. takes precedent over # `resume_from` if set "load_weights_from": "", }, }, # data preprocessing "data_config": { # mirrors input chunks in the corresponding dimension "flip_x": False, "flip_y": False, "flip_z": False, # Flip Validation Axis: None or int or tuple of ints, optional # Axis or axes along which to flip over. The default, # axis=None, will flip over all of the axes of the input array. # If axis is negative it counts from the last to the first axis. # If axis is a tuple of ints, flipping is performed on all of the axes # specified in the tuple. "flip_validation_axis": None, "sampler_config": { # the chunk sampling class for during training. one of "OverlapSampler", # "RandomSampler", "BattleShipSampler" "sampler_class": "RandomSampler", # Number of random samples taken from the training data dir when performing # training. Not used in "overlap" mode. "n_samples_per_epoch": 3, # Number of chunks taken from each sample when performing training. Not # used in "overlap" mode. "n_chunks_per_sample": 100, # the amount the input window is translated in the x, y, and z dimensions. # Used during inference but also during training if sampler_class is # "OverlapSampler" "overlap_stride": 240, } }, # configuration specific to training "train_config": { "inputs": { # dir containing training the `.npy` data files "data_dir": "/PATH/TO/TRAIN/DATA", # dir containing the `.npy` training labels. files are matched by name to # data, so this dir can have targets for both training and testing "targets_dir": "/PATH/TO/TRAIN/TARGETS" }, "outputs": { # where cached normalized data is saved to "normalized_data_dir": "/PATH/TO/NORMALIZED/DATA", "csv_log_dir": "/PATH/TO/CSV/LOGS", "tensorboard_log_dir": "/PATH/TO/TENSORBOARD/LOGS", "models_dir": "/PATH/TO/SAVED/MODELS", # where normalizer metadata is saved to "preprocessor_dir": "/PATH/TO/SAVED/PREPROCESSORS", }, "compilation": { # name of the optimizer to use "optimizer": "Adadelta", # the name of the loss function. Valid names include all Keras defaults # as well as fully-qualified function names "loss": "ctseg.ctutil.losses.weighted_categorical_crossentropy", # kwargs passed to the loss function. replace this kwargs dict with `false` # to not use "loss_kwargs": { "beta": 0.9, }, # the names of the metrics to track. Valid names include all Keras defaults # as well as fully-qualified function names "metrics": [ "accuracy", "ctseg.ctutil.metrics.per_class_accuracy", "ctseg.ctutil.metrics.jaccard_index", ], # indicates whether or not to recompile with the above specified optimizer, # loss and metrics if a compiled model is loaded. # Warning: doing this may slow training as it will discard the current state # of the optimizer "recompile": False }, # the max number of epochs to train for "epochs": 1000, # Epoch at which to start training # (useful for resuming a previous training run). "initial_epoch": 0, # the training batch size "batch_size": 1, }, # configuration specific to testing "test_config": { "inputs": { # dir containing the `.npy` test data files "data_dir": "/PATH/TO/TEST/DATA", # dir containing the `.npy` test labels. files are matched by name to data, # so this dir can have targets for both training and testing "targets_dir": "/PATH/TO/TEST/TARGETS" }, "outputs": { # where cached normalized data is saved to "normalized_data_dir": "/PATH/TO/NORMALIZED/DATA" } }, # configuration specific to inference "inference_config": { "inputs": { # where the `.npy` files to be processed live "unprocessed_queue_dir": "/PATH/TO/UNPROCESSED/DATA", }, "outputs": { # where files from `unprocessed_queue_dir` are moved to once processed "processed_data_dir": "/PATH/TO/PROCESSED/DATA", # where cached normalized data is saved to "normalized_data_dir": "/PATH/TO/NORMALIZED/DATA", # where predictions are written to "predictions_dir": "/PATH/TO/INFERENCE/PREDICTIONS" }, # the number of iterations of inference performed per chunk, the results of which # are averaged and standard deviations are calculated "inference_iters": 5, }, # configuration specific to plotting "plot_config": { "inputs": { # dir containing the `.npy` data files "data_dir": "/PATH/TO/DATA", # dir containing the `.npy` labels, if available. Leave empty if not. Files # are matched by name to data "targets_dir": "/PATH/TO/TARGETS", # dir containing the `.npy` predictions "predictions_dir": "/PATH/TO/PREDICTIONS" }, "outputs": { "plots_dir": "/PATH/TO/OUTPUT/PLOTS" } }, } ex.add_config(DEFAULT_CONFIG) @ex.named_config def use_8_gpus(): num_gpus=8 batch_size=8 @ex.named_config def use_2_gpus(): num_gpus=2 batch_size=2 @ex.named_config def small_chunks(): name="sm33_small_chunk" x_max=192 y_max=192 z_max=192 overlap_stride=192 @ex.named_config def small_testing(): num_chunks_per_training_img=20 num_training_imgs_per_epoch=1
215
0
88
f0b634f5ff4f75ca1711e6e717af315fda15fb61
1,290
py
Python
src/sage/features/pdf2svg.py
kliem/sage-test-27122
cc60cfebc4576fed8b01f0fc487271bdee3cefed
[ "BSL-1.0" ]
null
null
null
src/sage/features/pdf2svg.py
kliem/sage-test-27122
cc60cfebc4576fed8b01f0fc487271bdee3cefed
[ "BSL-1.0" ]
null
null
null
src/sage/features/pdf2svg.py
kliem/sage-test-27122
cc60cfebc4576fed8b01f0fc487271bdee3cefed
[ "BSL-1.0" ]
1
2020-07-23T10:29:56.000Z
2020-07-23T10:29:56.000Z
# -*- coding: utf-8 -*- r""" Check for pdf2svg """ # **************************************************************************** # Copyright (C) 2021 Sebastien Labbe <slabqc@gmail.com> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # (at your option) any later version. # https://www.gnu.org/licenses/ # **************************************************************************** from . import Executable class pdf2svg(Executable): r""" A :class:`sage.features.Feature` describing the presence of ``pdf2svg`` EXAMPLES:: sage: from sage.features.pdf2svg import pdf2svg sage: pdf2svg().is_present() # optional: pdf2svg FeatureTestResult('pdf2svg', True) """ def __init__(self): r""" TESTS:: sage: from sage.features.pdf2svg import pdf2svg sage: isinstance(pdf2svg(), pdf2svg) True """ Executable.__init__(self, "pdf2svg", executable="pdf2svg", spkg='pdf2svg', url="http://www.cityinthesky.co.uk/opensource/pdf2svg/")
33.947368
84
0.531008
# -*- coding: utf-8 -*- r""" Check for pdf2svg """ # **************************************************************************** # Copyright (C) 2021 Sebastien Labbe <slabqc@gmail.com> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # (at your option) any later version. # https://www.gnu.org/licenses/ # **************************************************************************** from . import Executable class pdf2svg(Executable): r""" A :class:`sage.features.Feature` describing the presence of ``pdf2svg`` EXAMPLES:: sage: from sage.features.pdf2svg import pdf2svg sage: pdf2svg().is_present() # optional: pdf2svg FeatureTestResult('pdf2svg', True) """ def __init__(self): r""" TESTS:: sage: from sage.features.pdf2svg import pdf2svg sage: isinstance(pdf2svg(), pdf2svg) True """ Executable.__init__(self, "pdf2svg", executable="pdf2svg", spkg='pdf2svg', url="http://www.cityinthesky.co.uk/opensource/pdf2svg/")
0
0
0
81823ab9baefc07843a37683b2ffb1db1f4fa33b
1,670
py
Python
test.py
IrekPrzybylo/DogBreedDNN
7c429694c648351cd23544b9b1321665c1866c7c
[ "MIT" ]
null
null
null
test.py
IrekPrzybylo/DogBreedDNN
7c429694c648351cd23544b9b1321665c1866c7c
[ "MIT" ]
12
2021-04-08T16:08:27.000Z
2021-06-23T15:10:41.000Z
test.py
IrekPrzybylo/DogBreedDNN
7c429694c648351cd23544b9b1321665c1866c7c
[ "MIT" ]
2
2021-04-08T14:55:04.000Z
2021-04-22T10:59:28.000Z
import os import cv2 import numpy as np import pandas as pd import tensorflow as tf def read_image(path, size): """ Load image from local storage :param path: image path :param size: image size :return: loaded image """ image = cv2.imread(path, cv2.IMREAD_COLOR) image = cv2.resize(image, (size, size)) image = image / 255.0 image = image.astype(np.float32) return image def recognize_image(img_path): """ Recognize image Taking image and predicting breed basing on trained model :param img_path: Image Path :return: top 4 matching breeds, most similar breed """ path = "input/" train_path = os.path.join(path, "train/*") test_path = os.path.join(path, "test/*") labels_path = os.path.join(path, "labels.csv") labels_df = pd.read_csv(labels_path) breed = labels_df["breed"].unique() id2breed = {i: name for i, name in enumerate(breed)} ## Model model = tf.keras.models.load_model("model.h5") image = read_image(img_path, 224) image = np.expand_dims(image, axis=0) pred = model.predict(image)[0] label_idx = np.argmax(pred) top3 = np.argsort(pred)[-4:][::-1] possible_breed = list() print(str(id2breed[top3[0]]).replace("_", " ")) possible_breed.append(str(id2breed[top3[0]]).replace("_", " ")) possible_breed.append(str(id2breed[top3[1]]).replace("_", " ")) possible_breed.append(str(id2breed[top3[2]]).replace("_", " ")) possible_breed.append(str(id2breed[top3[3]]).replace("_", " ")) return str(id2breed[label_idx]).replace("_", " "), possible_breed if __name__ == "__main__": print(recognize_image())
28.793103
69
0.649102
import os import cv2 import numpy as np import pandas as pd import tensorflow as tf def read_image(path, size): """ Load image from local storage :param path: image path :param size: image size :return: loaded image """ image = cv2.imread(path, cv2.IMREAD_COLOR) image = cv2.resize(image, (size, size)) image = image / 255.0 image = image.astype(np.float32) return image def recognize_image(img_path): """ Recognize image Taking image and predicting breed basing on trained model :param img_path: Image Path :return: top 4 matching breeds, most similar breed """ path = "input/" train_path = os.path.join(path, "train/*") test_path = os.path.join(path, "test/*") labels_path = os.path.join(path, "labels.csv") labels_df = pd.read_csv(labels_path) breed = labels_df["breed"].unique() id2breed = {i: name for i, name in enumerate(breed)} ## Model model = tf.keras.models.load_model("model.h5") image = read_image(img_path, 224) image = np.expand_dims(image, axis=0) pred = model.predict(image)[0] label_idx = np.argmax(pred) top3 = np.argsort(pred)[-4:][::-1] possible_breed = list() print(str(id2breed[top3[0]]).replace("_", " ")) possible_breed.append(str(id2breed[top3[0]]).replace("_", " ")) possible_breed.append(str(id2breed[top3[1]]).replace("_", " ")) possible_breed.append(str(id2breed[top3[2]]).replace("_", " ")) possible_breed.append(str(id2breed[top3[3]]).replace("_", " ")) return str(id2breed[label_idx]).replace("_", " "), possible_breed if __name__ == "__main__": print(recognize_image())
0
0
0
52ace3880da10107b1f510d9ac03ca28cec40035
412
py
Python
examples/animations/timer.py
colinmford/coldtype
8462dbd5f65f3ef8f3cbc8662a866b7e20ec5985
[ "Apache-2.0" ]
142
2020-06-12T17:01:58.000Z
2022-03-16T23:21:37.000Z
examples/animations/timer.py
colinmford/coldtype
8462dbd5f65f3ef8f3cbc8662a866b7e20ec5985
[ "Apache-2.0" ]
35
2020-04-15T15:34:54.000Z
2022-03-19T20:26:47.000Z
examples/animations/timer.py
colinmford/coldtype
8462dbd5f65f3ef8f3cbc8662a866b7e20ec5985
[ "Apache-2.0" ]
14
2020-06-23T18:56:46.000Z
2022-03-31T15:54:56.000Z
from coldtype import * @animation((1080, 1080), timeline=Timeline(3500, 24))
34.333333
82
0.536408
from coldtype import * @animation((1080, 1080), timeline=Timeline(3500, 24)) def timer(f): e = f.a.progress(f.i, easefn="linear").e c = f.a.r.inset(50).take(d:=150, "mxx").take(d, "mny") cpen = DP().oval(c) return DPS([ (DP(f.a.r.inset(0)).f(None).s(hsl(0.6)).sw(10)), (cpen.copy().f(hsl(0.3))), (cpen.copy().subsegment(0, e).f(hsl(0.9, 1)).s(hsl(0.9, 1, 0.3)).sw(2)),])
312
0
22
0302eb3a4c90c0f51ea5fb46f2c400e93479d849
236
py
Python
bitbots_misc/bitbots_bringup/scripts/launch_warning.py
MosHumanoid/bitbots_thmos_meta
f45ccc362dc689b69027be5b0d000d2a08580de4
[ "MIT" ]
null
null
null
bitbots_misc/bitbots_bringup/scripts/launch_warning.py
MosHumanoid/bitbots_thmos_meta
f45ccc362dc689b69027be5b0d000d2a08580de4
[ "MIT" ]
57
2019-03-02T10:59:05.000Z
2021-12-09T18:57:34.000Z
bitbots_misc/bitbots_bringup/scripts/launch_warning.py
MosHumanoid/bitbots_thmos_meta
f45ccc362dc689b69027be5b0d000d2a08580de4
[ "MIT" ]
1
2019-07-28T11:26:47.000Z
2019-07-28T11:26:47.000Z
#!/usr/bin/env python3 import rospy rospy.logerr("###\n###\n###\n###\n###\nYou didn't specifiy which robot you want to start!\nPlease add minibot:=true or wolfgang:=true or davros:=true behind you roslaunch.\n###\n###\n###\n###\n###")
47.2
198
0.648305
#!/usr/bin/env python3 import rospy rospy.logerr("###\n###\n###\n###\n###\nYou didn't specifiy which robot you want to start!\nPlease add minibot:=true or wolfgang:=true or davros:=true behind you roslaunch.\n###\n###\n###\n###\n###")
0
0
0
395e3450ce5675d25cb67b1cbba1201ead7d4bd1
612
py
Python
cms_redirects/admin.py
mllrsohn/django-cms-redirects
3398528e44594adb708aa090d5b7867f619db10e
[ "BSD-3-Clause" ]
8
2015-02-10T20:30:26.000Z
2020-05-31T20:20:51.000Z
cms_redirects/admin.py
mllrsohn/django-cms-redirects
3398528e44594adb708aa090d5b7867f619db10e
[ "BSD-3-Clause" ]
5
2017-04-10T07:41:45.000Z
2021-12-20T08:49:35.000Z
cms_redirects/admin.py
mllrsohn/django-cms-redirects
3398528e44594adb708aa090d5b7867f619db10e
[ "BSD-3-Clause" ]
8
2015-04-16T21:25:55.000Z
2018-09-27T11:15:12.000Z
from django.contrib import admin from cms_redirects.models import CMSRedirect admin.site.register(CMSRedirect, CMSRedirectAdmin)
32.210526
97
0.625817
from django.contrib import admin from cms_redirects.models import CMSRedirect class CMSRedirectAdmin(admin.ModelAdmin): list_display = ('old_path', 'new_path', 'page', 'page_site', 'site', 'actual_response_code',) list_filter = ('site',) search_fields = ('old_path', 'new_path', 'page__title_set__title') radio_fields = {'site': admin.VERTICAL} fieldsets = [ ('Source', { "fields": ('site','old_path',) }), ('Destination', { "fields": ('new_path','page', 'response_code',) }), ] admin.site.register(CMSRedirect, CMSRedirectAdmin)
0
459
23
9f217f8a02a0f671c29167ca02a91879f9fdb7e4
1,347
py
Python
ccfwidget/treemap_widget.py
NeurodataWithoutBorders/ccf-widget
482b2ecb2d9b227425cf09fa00a4d15857f0de26
[ "MIT" ]
5
2020-06-17T02:40:02.000Z
2020-11-18T17:18:04.000Z
ccfwidget/treemap_widget.py
NeurodataWithoutBorders/ccf-widget
482b2ecb2d9b227425cf09fa00a4d15857f0de26
[ "MIT" ]
18
2020-06-13T18:43:23.000Z
2020-08-27T16:43:34.000Z
ccfwidget/treemap_widget.py
NeurodataWithoutBorders/ccf-widget
482b2ecb2d9b227425cf09fa00a4d15857f0de26
[ "MIT" ]
null
null
null
import plotly.graph_objects as go @register
28.659574
74
0.669636
import plotly.graph_objects as go class StructureNodeWidget(ipytree.Node): allen_id = 0 # Allen ID of the parent in the Allen Ontology StructureGraph parent_structure_id = 0 acronym = 'Allen ontology acronym' @register class IPyTreeWidget(ipytree.Tree): def __init__(self, structure_graph): super(IPyTreeWidget, self).__init__(animation=100) self.layout.width = '40%' self.allen_id_to_node = dict() self.acronym_to_allen_id = dict() with self.hold_sync(): for node in structure_graph['children']: self._process_node(self, node) def _node_to_widget(self, node): node_widget = StructureNodeWidget(node['name']) allen_id = int(node['id']) node_widget.allen_id = allen_id self.allen_id_to_node[allen_id] = node_widget node_widget.parent_structure_id = int(node['parent_structure_id']) acronym = node['acronym'] node_widget.acronym = acronym self.acronym_to_allen_id[acronym] = allen_id return node_widget def _process_node(self, parent_widget, node): node_widget = self._node_to_widget(node) node_widget.opened = False parent_widget.add_node(node_widget) for child in node['children']: self._process_node(node_widget, child)
992
182
126
06aa52314a9c965d93128b5579494aaf803987c3
646
py
Python
cybox/test/objects/win_network_route_entry_test.py
siemens/python-cybox
b692a98c8a62bd696e2a0dda802ada7359853482
[ "BSD-3-Clause" ]
null
null
null
cybox/test/objects/win_network_route_entry_test.py
siemens/python-cybox
b692a98c8a62bd696e2a0dda802ada7359853482
[ "BSD-3-Clause" ]
null
null
null
cybox/test/objects/win_network_route_entry_test.py
siemens/python-cybox
b692a98c8a62bd696e2a0dda802ada7359853482
[ "BSD-3-Clause" ]
1
2019-04-16T18:37:32.000Z
2019-04-16T18:37:32.000Z
# Copyright (c) 2014, The MITRE Corporation. All rights reserved. # See LICENSE.txt for complete terms. import unittest from cybox.objects.win_network_route_entry_object import WinNetworkRouteEntry from cybox.test import EntityTestCase, round_trip from cybox.test.objects import ObjectTestCase if __name__ == "__main__": unittest.main()
26.916667
77
0.752322
# Copyright (c) 2014, The MITRE Corporation. All rights reserved. # See LICENSE.txt for complete terms. import unittest from cybox.objects.win_network_route_entry_object import WinNetworkRouteEntry from cybox.test import EntityTestCase, round_trip from cybox.test.objects import ObjectTestCase class TestWinNetworkRouteEntry(ObjectTestCase, unittest.TestCase): object_type = "WindowsNetworkRouteEntryObjectType" klass = WinNetworkRouteEntry _full_dict = { 'nl_route_protocol': u"A protocol", 'nl_route_origin': u"An origin", 'xsi:type': object_type, } if __name__ == "__main__": unittest.main()
0
277
23
b8dab34e2ddba5fa52d18ceed2c8f8efbaf24b94
3,710
py
Python
external/evolver_gff_featurestats2.py
dentearl/evolverSimControl
b3236debbc8d945a99aecb0988bd1f48f25913c3
[ "MIT" ]
4
2018-12-01T13:49:12.000Z
2021-02-18T17:55:46.000Z
external/evolver_gff_featurestats2.py
dentearl/evolverSimControl
b3236debbc8d945a99aecb0988bd1f48f25913c3
[ "MIT" ]
null
null
null
external/evolver_gff_featurestats2.py
dentearl/evolverSimControl
b3236debbc8d945a99aecb0988bd1f48f25913c3
[ "MIT" ]
1
2021-04-10T15:05:11.000Z
2021-04-10T15:05:11.000Z
#!/usr/bin/env python # Copyright (C) 2008-2011 by # George Asimenos, Robert C. Edgar, Serafim Batzoglou and Arend Sidow. # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. # ############################## import sys import evolverSimControl.lib.evolver_gff as gff FileName1 = sys.argv[1] FileName2 = sys.argv[2] Name1 = FileName1 Name2 = FileName2 GenomeLength1 = -1 GenomeLength2 = -1 if len(sys.argv) > 3: Name1 = sys.argv[3] if len(sys.argv) > 4: Name2 = sys.argv[4] if len(sys.argv) > 6: GenomeLength1 = int(sys.argv[5]) GenomeLength2 = int(sys.argv[6]) ConstrainedFeatures = [ "CDS", "UTR", "NXE", "NGE" ] Counts1, Bases1 = GetCounts(FileName1) Counts2, Bases2 = GetCounts(FileName2) Features = [ "CDS", "UTR", "NXE", "NGE", "island", "tandem", "Constrained" ] Keys = Counts1.keys() Keys.extend(Counts2.keys()) for Feature in Keys: if Feature not in Features: Features.append(Feature) if GenomeLength1 != -1: Features.append("Neutral") Features.append("Total") Counts1["Neutral"] = 0 Counts2["Neutral"] = 0 Counts1["Total"] = 0 Counts2["Total"] = 0 Bases1["Neutral"] = GenomeLength1 - Bases1["Constrained"] Bases2["Neutral"] = GenomeLength2 - Bases2["Constrained"] Bases1["Total"] = GenomeLength1 Bases2["Total"] = GenomeLength2 print " Feature 1=%8.8s 2=%8.8s Nr2-1 2-1 Pct Bases1 Bases2 Bases2-1 2-1 Pct" % (Name1, Name2) print "================ ========== ========== ========== ======== ========== ========== ========== ========" for Feature in Features: n1 = Get(Counts1, Feature) n2 = Get(Counts2, Feature) dn = n2 - n1 b1 = Get(Bases1, Feature) b2 = Get(Bases2, Feature) db = b2 - b1 pn = PctChg(n1, n2) pb = PctChg(b1, b2) s = "" s += "%16.16s" % Feature s += " %10u" % n1 s += " %10u" % n2 s += " %+10d" % (n2 - n1) s += " %7.7s%%" % pn s += " %10u" % b1 s += " %10u" % b2 s += " %+10d" % (b2-b1) s += " %7.7s%%" % pb print s
28.106061
127
0.646092
#!/usr/bin/env python # Copyright (C) 2008-2011 by # George Asimenos, Robert C. Edgar, Serafim Batzoglou and Arend Sidow. # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. # ############################## import sys import evolverSimControl.lib.evolver_gff as gff FileName1 = sys.argv[1] FileName2 = sys.argv[2] Name1 = FileName1 Name2 = FileName2 GenomeLength1 = -1 GenomeLength2 = -1 if len(sys.argv) > 3: Name1 = sys.argv[3] if len(sys.argv) > 4: Name2 = sys.argv[4] if len(sys.argv) > 6: GenomeLength1 = int(sys.argv[5]) GenomeLength2 = int(sys.argv[6]) ConstrainedFeatures = [ "CDS", "UTR", "NXE", "NGE" ] def Die(s): print >> sys.stderr, sys.argv[0], "***ERROR***", s sys.exit(1) def DoRec(): global Counts, Bases Length = gff.End - gff.Start + 1 Feature = gff.Feature if Feature not in Bases.keys(): Bases[Feature] = Length Counts[Feature] = 1 else: Bases[Feature] += Length Counts[Feature] += 1 if Feature in ConstrainedFeatures: Bases["Constrained"] += Length Counts["Constrained"] += 1 def Get(L, k): if k in L.keys(): return L[k] return 0 def PctChg(x, y): if x == 0: if y == 0: return "100" else: return "--" else: return str(100*(y-x)/x) def GetCounts(FileName): global Bases, Counts Bases = {} Counts = {} Bases["Constrained"] = 0 Counts["Constrained"] = 0 gff.GetRecs(FileName, DoRec) return Counts, Bases Counts1, Bases1 = GetCounts(FileName1) Counts2, Bases2 = GetCounts(FileName2) Features = [ "CDS", "UTR", "NXE", "NGE", "island", "tandem", "Constrained" ] Keys = Counts1.keys() Keys.extend(Counts2.keys()) for Feature in Keys: if Feature not in Features: Features.append(Feature) if GenomeLength1 != -1: Features.append("Neutral") Features.append("Total") Counts1["Neutral"] = 0 Counts2["Neutral"] = 0 Counts1["Total"] = 0 Counts2["Total"] = 0 Bases1["Neutral"] = GenomeLength1 - Bases1["Constrained"] Bases2["Neutral"] = GenomeLength2 - Bases2["Constrained"] Bases1["Total"] = GenomeLength1 Bases2["Total"] = GenomeLength2 print " Feature 1=%8.8s 2=%8.8s Nr2-1 2-1 Pct Bases1 Bases2 Bases2-1 2-1 Pct" % (Name1, Name2) print "================ ========== ========== ========== ======== ========== ========== ========== ========" for Feature in Features: n1 = Get(Counts1, Feature) n2 = Get(Counts2, Feature) dn = n2 - n1 b1 = Get(Bases1, Feature) b2 = Get(Bases2, Feature) db = b2 - b1 pn = PctChg(n1, n2) pb = PctChg(b1, b2) s = "" s += "%16.16s" % Feature s += " %10u" % n1 s += " %10u" % n2 s += " %+10d" % (n2 - n1) s += " %7.7s%%" % pn s += " %10u" % b1 s += " %10u" % b2 s += " %+10d" % (b2-b1) s += " %7.7s%%" % pb print s
649
0
115
62ff44c7a0f8ffe85637457045ee365bbf8e42f1
4,116
py
Python
LearningSafeSets/Model/SafeSet.py
alexliniger/AdversarialRoadModel
14157760687c22acc8b91c39128875005ada7563
[ "Apache-2.0" ]
20
2020-07-17T06:32:32.000Z
2022-03-27T03:24:26.000Z
LearningSafeSets/Model/SafeSet.py
alexliniger/AdversarialRoadModel
14157760687c22acc8b91c39128875005ada7563
[ "Apache-2.0" ]
null
null
null
LearningSafeSets/Model/SafeSet.py
alexliniger/AdversarialRoadModel
14157760687c22acc8b91c39128875005ada7563
[ "Apache-2.0" ]
7
2020-07-19T07:16:01.000Z
2022-01-22T22:58:02.000Z
## Copyright 2020 Alexander Liniger ## Licensed under the Apache License, Version 2.0 (the "License"); ## you may not use this file except in compliance with the License. ## You may obtain a copy of the License at ## http://www.apache.org/licenses/LICENSE-2.0 ## Unless required by applicable law or agreed to in writing, software ## distributed under the License is distributed on an "AS IS" BASIS, ## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ## See the License for the specific language governing permissions and ## limitations under the License. ########################################################################### ########################################################################### import torch import torch.nn as nn import torch.nn.functional as F
36.75
75
0.49757
## Copyright 2020 Alexander Liniger ## Licensed under the Apache License, Version 2.0 (the "License"); ## you may not use this file except in compliance with the License. ## You may obtain a copy of the License at ## http://www.apache.org/licenses/LICENSE-2.0 ## Unless required by applicable law or agreed to in writing, software ## distributed under the License is distributed on an "AS IS" BASIS, ## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ## See the License for the specific language governing permissions and ## limitations under the License. ########################################################################### ########################################################################### import torch import torch.nn as nn import torch.nn.functional as F class SafeSet(nn.Module): def __init__(self,config): super(SafeSet, self).__init__() self.n_neurons = config['n_neurons'] self.n_batch = config['n_batch'] self.n_inputs = config['n_states'] if config['activation'] == "Softplus": self.model = nn.Sequential( nn.Linear(self.n_inputs, self.n_neurons), nn.Softplus(beta=1), nn.Linear(self.n_neurons, self.n_neurons), nn.Softplus(beta=1), nn.Linear(self.n_neurons, self.n_neurons), nn.Softplus(beta=1), nn.Linear(self.n_neurons, 1), nn.Sigmoid() ) elif config['activation'] == "Tanh": self.model = nn.Sequential( nn.Linear(self.n_inputs, self.n_neurons), nn.Tanh(), nn.Linear(self.n_neurons, self.n_neurons), nn.Tanh(), nn.Linear(self.n_neurons, self.n_neurons), nn.Tanh(), nn.Linear(self.n_neurons, 1), nn.Sigmoid() ) elif config['activation'] == "ReLU": self.model = nn.Sequential( nn.Linear(self.n_inputs, self.n_neurons), nn.ReLU(), nn.Linear(self.n_neurons, self.n_neurons), nn.ReLU(), nn.Linear(self.n_neurons, self.n_neurons), nn.ReLU(), nn.Linear(self.n_neurons, 1), nn.Sigmoid() ) elif config['activation'] == "ELU": self.model = nn.Sequential( nn.Linear(self.n_inputs, self.n_neurons), nn.ELU(), nn.Linear(self.n_neurons, self.n_neurons), nn.ELU(), nn.Linear(self.n_neurons, self.n_neurons), nn.Dropout(0.5), nn.ELU(), nn.Linear(self.n_neurons, 1), nn.Sigmoid() ) elif config['activation'] == "ELU2": self.model = nn.Sequential( nn.Linear(self.n_inputs, self.n_neurons), nn.ELU(), nn.Linear(self.n_neurons, self.n_neurons), nn.ELU(), nn.Linear(self.n_neurons, 1), nn.Sigmoid() ) elif config['activation'] == "ELU6": self.model = nn.Sequential( nn.Linear(self.n_inputs, self.n_neurons), nn.ELU(), nn.Linear(self.n_neurons, self.n_neurons), nn.ELU(), nn.Linear(self.n_neurons, self.n_neurons), nn.ELU(), nn.Linear(self.n_neurons, self.n_neurons), nn.ELU(), nn.Linear(self.n_neurons, self.n_neurons), nn.ELU(), nn.Linear(self.n_neurons, self.n_neurons), nn.ELU(), nn.Linear(self.n_neurons, 1), nn.Sigmoid() ) else: self.model = nn.Sequential( nn.Linear(self.n_inputs, self.n_neurons), nn.Linear(self.n_neurons, 1), nn.Sigmoid() ) def forward(self, input): return self.model(input)
3,228
4
76
920fe3da4f1f82c7e5f82cc9d08809d75841703f
12,412
py
Python
arpa-rfp-evaluation/summary_reports.py
cityofasheville/abi-vendro-processing
72ed24216ee4772d72abd26b956d7f97ed23bdd3
[ "MIT" ]
null
null
null
arpa-rfp-evaluation/summary_reports.py
cityofasheville/abi-vendro-processing
72ed24216ee4772d72abd26b956d7f97ed23bdd3
[ "MIT" ]
1
2021-09-02T19:58:09.000Z
2021-09-02T19:58:09.000Z
arpa-rfp-evaluation/summary_reports.py
cityofasheville/data-processing-scripts
72ed24216ee4772d72abd26b956d7f97ed23bdd3
[ "MIT" ]
null
null
null
from os import link from googleapiclient.discovery import build import json from csv import reader from google.oauth2 import service_account import pandas as pd from os.path import exists import numpy as np from functools import reduce import time SERVICE_ACCOUNT_FILE = None SCOPES = ['https://www.googleapis.com/auth/spreadsheets', 'https://www.googleapis.com/auth/drive'] INPUTS_EVAL_MAPPING_ID =None OUTPUTS_MASTER_ID = None INPUTS_SPREADSHEET_ID = None sheetService = None ######################################################### ########################################################################### inputs = None if exists('./inputs.json'): with open('inputs.json', 'r') as file: inputs = json.load(file) else: print('You must create an inputs.json file') sys.exit() INPUTS_EVAL_MAPPING_ID = inputs["INPUTS_EVAL_MAPPING_ID"] OUTPUTS_MASTER_ID = inputs["OUTPUTS_MASTER_ID"] INPUTS_SPREADSHEET_ID = inputs['INPUTS_SPREADSHEET_ID'] SERVICE_ACCOUNT_FILE = inputs['SERVICE_ACCOUNT_FILE'] print('Set up services') setUpServices() sheet = sheetService.spreadsheets() print('Load weights') links_df, weight_df = grab_weights_and_links(INPUTS_SPREADSHEET_ID) # Calls list building function print('Build project summary list') all_project_scores = build_project_summary_list(links_df, weight_df, INPUTS_EVAL_MAPPING_ID) print('Summarize all the projects') list_to_append, maxMinList = summarize_all_project(all_project_scores, links_df) updateSheet(list_to_append, OUTPUTS_MASTER_ID, "Summary!A2:AA1000") updateSheet(maxMinList, OUTPUTS_MASTER_ID, "Potential Issues!A3:AA1000") print('Finished, Party time')
44.328571
172
0.677409
from os import link from googleapiclient.discovery import build import json from csv import reader from google.oauth2 import service_account import pandas as pd from os.path import exists import numpy as np from functools import reduce import time SERVICE_ACCOUNT_FILE = None SCOPES = ['https://www.googleapis.com/auth/spreadsheets', 'https://www.googleapis.com/auth/drive'] INPUTS_EVAL_MAPPING_ID =None OUTPUTS_MASTER_ID = None INPUTS_SPREADSHEET_ID = None sheetService = None ######################################################### def setUpServices(): global sheetService creds = service_account.Credentials.from_service_account_file( SERVICE_ACCOUNT_FILE, scopes=SCOPES ) sheetService = build('sheets', 'v4', credentials=creds) def grab_weights_and_links(inputSpreadsheetId): # Gets score weights from the evaluation sheet, and project links, and puts these things into 2 # dfs to merge with the main summary df later sheet = sheetService.spreadsheets() results = sheet.values().get(spreadsheetId=inputSpreadsheetId,range='Score Weighting!C8:D27').execute() values = results.get('values', []) del values[13] del values[6] weight_df = pd.DataFrame(values, columns=['weight_in_cat', 'global_weight']) weight_df['weight_in_cat'] = weight_df['weight_in_cat'].astype(float) weight_df['global_weight'] = weight_df['global_weight'].astype(float) # Gets project links from the evaluation assignment sheet sheet = sheetService.spreadsheets() results = sheet.values().get(spreadsheetId=inputSpreadsheetId,range='Eligible Proposals and Assignments!A2:C').execute() values = results.get('values', []) links_df = pd.DataFrame(values, columns=['project_number', 'project_name', 'project_link']) return(links_df, weight_df) def list_tab_links(evaluationMappingSheetId): sheet = sheetService.spreadsheets() results = sheet.values().get(spreadsheetId=evaluationMappingSheetId,range='Tab Mapping!A1:AB').execute() tabs = results.get('values', []) tab_links_df = pd.DataFrame(tabs) tab_links_df.iloc[0,0] = 'Project' tab_links_df.columns = tab_links_df.iloc[0] tab_links_df.drop(tab_links_df.index[0], inplace=True) tab_links_df.reset_index(inplace=True) return(tab_links_df) def build_project_summary_list(links_df, weight_df, evaluationMappingSheetId): tab_links_df = list_tab_links(evaluationMappingSheetId) # Get spreadsheet links/ids from the spreadsheet total_list = [] sheet = sheetService.spreadsheets() results = sheet.values().get(spreadsheetId=evaluationMappingSheetId,range='Sheet Mapping!A2:C').execute() link_ss_values = results.get('values', []) for thing in link_ss_values: id = thing[1] print(' Sheet ' + thing[0]) sheet = sheetService.spreadsheets() sheets = sheet.get(spreadsheetId=id, fields='sheets/properties/title').execute() ranges = [sheet['properties']['title'] for sheet in sheets['sheets']] format_list = [] # Goes through each tab and gets values for tab in ranges[1:]: print (' Tab ' + tab) results = sheet.values().get(spreadsheetId=id,range=tab +'!A1:E24').execute() values = results.get('values', []) data = values[6:] #Make a dataframe, then change the rating values to numbers df = pd.DataFrame(data, columns = ["question_num", 'question', 'rating', 'guidance', 'scoring_category']) df = df.replace(r'^\s*$', np.nan, regex=True) if df['rating'].isnull().values.any(): ECI_score = "Not Complete" PPE_score = "Not Complete" OQ_score = "Not Complete" total_score = "Not Complete" else: #df["rating"] = df[~df["rating"].isnull()].str.lower() df["rating"] = df["rating"].str.lower() df["rating"].replace({"none": 0, "low": 1/3, "medium": 2/3, 'high':1}, inplace=True) #add more columns df['total_category_weight'] = df['scoring_category'] df["total_category_weight"].replace({"Equitable Community Impact": 40, "Project Plan and Evaluation": 40, "Organizational Qualification": 20}, inplace=True) # Adding df of scoring weights to the df I just created df = pd.concat([df, weight_df], axis=1) df['category_rating'] = df['rating'].astype(float) * df['weight_in_cat'].astype(float) #Calc category value by global weight cat_weights_global = df.groupby(['scoring_category']).category_rating.sum() #Formatting output #What are the 11, 8, and 9? They're the total of the "weight within category". #That makes more sense if you take a look at the scoring sheet- #"Evaluation Score" Score Weighting tab, column ECI_score = (cat_weights_global['Equitable Community Impact']/11) * 40 PPE_score = (cat_weights_global['Project Plan and Evaluation']/8) * 40 OQ_score = (cat_weights_global['Organizational Qualification']/9) * 20 total_score = round(ECI_score + PPE_score + OQ_score, 2) #Grabbing info from list to put into the right output format project_name = values[1][1].split(": ",1)[1] project_number = project_name.split(' ')[0] evaluator = values[0][1].split(": ",1)[1] evaluator=evaluator.strip() link = thing[2] # Using the df from the beginning of this function to look up the links # to individual tabs on evaluator sheets. Appending that to the end of the list. eval_link = tab_links_df[evaluator].iloc[int(project_number)-1] format_list = [project_number, project_name, evaluator, link, total_score, ECI_score, PPE_score, OQ_score, eval_link] total_list.append(format_list) time.sleep(1) time.sleep(3) return(total_list) def maxMinDifference(df): #Get the links dataframe, merge the two together to be able to output links for each project df.merge(links_df['project_number'], on='project_number', how='left') #Calculate the difference between the min and max score for each column maxMinDF = pd.DataFrame(df.groupby(['project_number', 'project_name'])['total_score'].agg(['max','min'])) maxMinDF['totalScoreVaries'] = maxMinDF['max'] - maxMinDF['min'] ECIMaxMinDF = pd.DataFrame(df.groupby(['project_number', 'project_name'])['ECI_score'].agg(['max','min'])) ECIMaxMinDF['ECIScoreVaries'] = maxMinDF['max'] - maxMinDF['min'] PPEMaxMinDF = pd.DataFrame(df.groupby(['project_number', 'project_name'])['PPE_score'].agg(['max','min'])) PPEMaxMinDF['PPEScoreVaries'] = maxMinDF['max'] - maxMinDF['min'] OQMaxMinDF = pd.DataFrame(df.groupby(['project_number', 'project_name'])['OQ_score'].agg(['max','min'])) OQMaxMinDF['OQScoreVaries'] = maxMinDF['max'] - maxMinDF['min'] #Merge all these calculations together into one dataframe maxMinDF = maxMinDF.merge(ECIMaxMinDF['ECIScoreVaries'], on=['project_number', 'project_name']) maxMinDF = maxMinDF.merge(PPEMaxMinDF['PPEScoreVaries'], on=['project_number', 'project_name']) maxMinDF = maxMinDF.merge(OQMaxMinDF['OQScoreVaries'], on=['project_number', 'project_name']) maxMinDF.drop(['max', 'min'], axis=1, inplace=True) columnList = ['totalScoreVaries', 'ECIScoreVaries', 'PPEScoreVaries', 'OQScoreVaries'] #If the different is greater than 50, "True" is assigned, otherwise np.nan. This is so we can use .dropna to #drop the rows which have all np.nans in them. for entry in columnList: maxMinDF[maxMinDF[entry] >= 50] = True maxMinDF[maxMinDF[entry] !=True] = np.nan maxMinDF = maxMinDF.dropna( how='all', subset=['totalScoreVaries', 'ECIScoreVaries', 'PPEScoreVaries', 'OQScoreVaries']) maxMinDF = maxMinDF.replace(np.nan, '') maxMinDF = maxMinDF.reset_index() print(maxMinDF) maxMinList = maxMinDF.values.tolist() return(maxMinList) def summarize_all_project(my_list, links_df): # Creating initial df my_df = pd.DataFrame(my_list, columns=['project_number', 'project_name', 'evaluator', 'link_to_proposal', 'total_score', 'ECI_score', 'PPE_score', 'OQ_score', 'eval_link']) my_df = my_df.round(2) #Calculating mean and median, renaming columsn and resetting index (so that project #s show up when converted to list) numericScoreDF = my_df[pd.to_numeric(my_df['total_score'], errors='coerce').notnull()] numericScoreDF['total_score'] = numericScoreDF['total_score'].astype(float) numericScoreDF['ECI_score'] = numericScoreDF['ECI_score'].astype(float) numericScoreDF['PPE_score'] = numericScoreDF['PPE_score'].astype(float) numericScoreDF['OQ_score'] = numericScoreDF['OQ_score'].astype(float) maxMinList = maxMinDifference(numericScoreDF) summary_df = pd.DataFrame(numericScoreDF.groupby(['project_number', 'project_name'])['total_score', 'ECI_score', 'PPE_score', 'OQ_score'].mean()) summary_df = summary_df.reset_index() median_df = pd.DataFrame(numericScoreDF.groupby(['project_name'])['total_score'].median()) median_df = median_df.rename({'total_score':'median_score'}, axis=1) # Creating string of all scores per project my_df['total_score'] = my_df['total_score'].astype(str) individual_score_list_df = pd.DataFrame(my_df.groupby(['project_name'])['total_score'].apply(', '.join).reset_index()) individual_score_list_df= individual_score_list_df.rename({'total_score':'score_list'}, axis=1) # Creating string of all links eval_links_df = pd.DataFrame(my_df.groupby(['project_name'])['eval_link'].apply(', '.join).reset_index()) # Merging the various dfs to create 1 summary summary_df=summary_df.merge(median_df, on='project_name') summary_df=summary_df.merge(individual_score_list_df, on='project_name') summary_df = summary_df.merge(links_df[['project_number', 'project_link']], on='project_number', how='left') summary_df=summary_df.merge(eval_links_df, on='project_name') # Reordering columns so the info is in the correct order in the list summary_df = summary_df[['project_number', 'project_name', 'project_link', 'median_score', 'total_score', 'ECI_score', 'PPE_score', 'OQ_score', 'score_list', 'eval_link']] summary_df = summary_df.round(2) final_list = summary_df.values.tolist() # evals is string of the links to evaluation tabs # I'm making it a list and appending it to the final_list, so that each link # will end up in a separate column for entry in final_list: evals = entry.pop() evals = list(evals.split(', ')) entry.extend(evals) return(final_list, maxMinList) def updateSheet(my_list, spreadSheetID, range): resource = { "majorDimension": "ROWS", "values": my_list } sheetService.spreadsheets().values().update( spreadsheetId=spreadSheetID, range=range, body=resource, valueInputOption="USER_ENTERED").execute() ########################################################################### inputs = None if exists('./inputs.json'): with open('inputs.json', 'r') as file: inputs = json.load(file) else: print('You must create an inputs.json file') sys.exit() INPUTS_EVAL_MAPPING_ID = inputs["INPUTS_EVAL_MAPPING_ID"] OUTPUTS_MASTER_ID = inputs["OUTPUTS_MASTER_ID"] INPUTS_SPREADSHEET_ID = inputs['INPUTS_SPREADSHEET_ID'] SERVICE_ACCOUNT_FILE = inputs['SERVICE_ACCOUNT_FILE'] print('Set up services') setUpServices() sheet = sheetService.spreadsheets() print('Load weights') links_df, weight_df = grab_weights_and_links(INPUTS_SPREADSHEET_ID) # Calls list building function print('Build project summary list') all_project_scores = build_project_summary_list(links_df, weight_df, INPUTS_EVAL_MAPPING_ID) print('Summarize all the projects') list_to_append, maxMinList = summarize_all_project(all_project_scores, links_df) updateSheet(list_to_append, OUTPUTS_MASTER_ID, "Summary!A2:AA1000") updateSheet(maxMinList, OUTPUTS_MASTER_ID, "Potential Issues!A3:AA1000") print('Finished, Party time')
10,595
0
161
f838c963eb88052d512eb77b99721f3c3bb5120a
2,041
py
Python
test/algorithms/initial_points/test_initial_point.py
kevinsung/qiskit-nature
407533e05ca33fa53eb4e9cd7b089a0a99f9540e
[ "Apache-2.0" ]
null
null
null
test/algorithms/initial_points/test_initial_point.py
kevinsung/qiskit-nature
407533e05ca33fa53eb4e9cd7b089a0a99f9540e
[ "Apache-2.0" ]
null
null
null
test/algorithms/initial_points/test_initial_point.py
kevinsung/qiskit-nature
407533e05ca33fa53eb4e9cd7b089a0a99f9540e
[ "Apache-2.0" ]
null
null
null
# This code is part of Qiskit. # # (C) Copyright IBM 2022. # # This code is licensed under the Apache License, Version 2.0. You may # obtain a copy of this license in the LICENSE.txt file in the root directory # of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. # # Any modifications or derivative works of this code must retain this # copyright notice, and modified files need to carry a notice indicating # that they have been altered from the originals. """Test InitialPoint""" import unittest from unittest.mock import patch from test import QiskitNatureTestCase from qiskit_nature.algorithms.initial_points import InitialPoint class TestInitialPoint(QiskitNatureTestCase): """Test Initial Point""" @patch.multiple(InitialPoint, __abstractmethods__=set()) def test_to_numpy_array(self): """Test to_numpy_array.""" with self.assertRaises(NotImplementedError): self.initial_point.to_numpy_array() def test_get_ansatz(self): """Test get ansatz.""" with self.assertRaises(NotImplementedError): _ = self.initial_point.ansatz def test_set_ansatz(self): """Test set ansatz.""" with self.assertRaises(NotImplementedError): self.initial_point.ansatz = None def test_get_grouped_property(self): """Test get grouped_property.""" with self.assertRaises(NotImplementedError): _ = self.initial_point.grouped_property def test_set_grouped_property(self): """Test set grouped_property.""" with self.assertRaises(NotImplementedError): self.initial_point.grouped_property = None def test_compute(self): """Test compute.""" with self.assertRaises(NotImplementedError): self.initial_point.compute(None, None) if __name__ == "__main__": unittest.main()
31.890625
77
0.696717
# This code is part of Qiskit. # # (C) Copyright IBM 2022. # # This code is licensed under the Apache License, Version 2.0. You may # obtain a copy of this license in the LICENSE.txt file in the root directory # of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. # # Any modifications or derivative works of this code must retain this # copyright notice, and modified files need to carry a notice indicating # that they have been altered from the originals. """Test InitialPoint""" import unittest from unittest.mock import patch from test import QiskitNatureTestCase from qiskit_nature.algorithms.initial_points import InitialPoint class TestInitialPoint(QiskitNatureTestCase): """Test Initial Point""" @patch.multiple(InitialPoint, __abstractmethods__=set()) def setUp(self) -> None: super().setUp() # pylint: disable=abstract-class-instantiated self.initial_point = InitialPoint() # type: ignore def test_to_numpy_array(self): """Test to_numpy_array.""" with self.assertRaises(NotImplementedError): self.initial_point.to_numpy_array() def test_get_ansatz(self): """Test get ansatz.""" with self.assertRaises(NotImplementedError): _ = self.initial_point.ansatz def test_set_ansatz(self): """Test set ansatz.""" with self.assertRaises(NotImplementedError): self.initial_point.ansatz = None def test_get_grouped_property(self): """Test get grouped_property.""" with self.assertRaises(NotImplementedError): _ = self.initial_point.grouped_property def test_set_grouped_property(self): """Test set grouped_property.""" with self.assertRaises(NotImplementedError): self.initial_point.grouped_property = None def test_compute(self): """Test compute.""" with self.assertRaises(NotImplementedError): self.initial_point.compute(None, None) if __name__ == "__main__": unittest.main()
141
0
26
ca810b6f0d9eabaeb4ad71ee8608ce994583b4bc
22,392
py
Python
Compressive_Sampling.py
tnakaicode/Python-for-Signal-Processing
b610ca377564e115a0dbd5a8cdcc2ad195c3b162
[ "CC-BY-3.0" ]
null
null
null
Compressive_Sampling.py
tnakaicode/Python-for-Signal-Processing
b610ca377564e115a0dbd5a8cdcc2ad195c3b162
[ "CC-BY-3.0" ]
null
null
null
Compressive_Sampling.py
tnakaicode/Python-for-Signal-Processing
b610ca377564e115a0dbd5a8cdcc2ad195c3b162
[ "CC-BY-3.0" ]
null
null
null
#!/usr/bin/env python # coding: utf-8 # ## Compressive sampling Overview # our previous discussion, we saw that imposing bandlimited-ness on our class of signals permits point-wise sampling of our signal and then later perfect reconstruction. It turns out that by imposing *sparsity* we can also obtain perfect reconstruction irrespective of whether or not we have satsified the sampling rate limits imposed by Shannon's sampling theorem. This has extremely important in practice because many signals are naturally sparse so that collecting samples at high rates only to dump most of them as the signal is compressed is expensive and wasteful. # ## What Are Sparse Signals? # Let's carefully discuss what we np.mean by *sparse* in this context. A signal $f$ is sparse if it can be expressed in very few nonzero components ($\mathbf{s}$) with respect to a given basis ($ \mathbf{\Psi} $ ). In other words, in np.matrix-vector language: # # $ \mathbf{f} = \mathbf{\Psi} \mathbf{s} $ # # where $ || \mathbf{s} ||_0 \leq N $ where $N$ is the length of the vector and $|| \cdot||_0$ counts the number of nonzero elements in $\mathbf{s}$. Furthermore, we don't actually collect $N$ samples point-wise as we did in the Shannon sampling case. Rather, we measure $\mathbf{f}$ indirectly as $\mathbf{y}$ with another np.matrix as in: # # $\mathbf{y} = \mathbf{\Phi f} = \mathbf{\Phi} \mathbf{\Psi} \mathbf{s} = \mathbf{\Theta s} $ # # where $\mathbf{\Theta}$ is an $M \times N$ np.matrix and $ M < N $ is the number of measurements. This setup np.means we have two problems to solve. First, how to design a *stable* measurement np.matrix $\mathbf{\Phi}$ and then, second, how to reconstruct $ \mathbf{f} $ from $ \mathbf{y} $. # # This may look like a standard linear algebra problem but since $ \mathbf{\Theta} $ has fewer rows than columns, the solution is necessarily ill-posed. This is where we inject the sparsity concept! Suppose that $f$ is $K$-sparse ( $||f||_0=K$ ), then if we somehow knew *which* $K$ columns of $ \mathbf{\Theta} $ matched the $K$ non-zero entries in $\mathbf{s}$, then $\mathbf{\Theta}$ would be $ M \times K $ where we could make $M > K$ and then have a stable inverse. # # This bit of reasoning is encapsulated in the following statement for any vector $\mathbf{v}$ sharing the same $K$ non-zero entries as $\mathbf{s}$, we have # # $$1-\epsilon \leq \frac{|| \mathbf{\Theta v} ||_2}{|| \mathbf{v} ||_2} \leq 1+\epsilon $$ # # which is another way of saying that $\mathbf{\Theta}$ preserves the lengths of $K$-sparse vectors. Of course we don't know ahead of time which $K$ components to use, but it turns out that this condition is sufficient for a stable inverse of $\mathbf{\Theta}$ if it holds for any $3K$-sparse vector $\mathbf{v}$. This is the *Restricted Isometry Property* (RIP). Unfortunately, in order to use this sufficient condition, we would have to propose a $\mathbf{\Theta}$ and then check all possible combinations of nonzero entries in the $N$-length vector $\mathbf{v}$. As you may guess, this is prohibitive. # # Alternatively, we can approach stability by defining *incoherence* between the measurement np.matrix $\mathbf{\Phi}$ and the sparse basis $\mathbf{\Psi}$ as when any of the columns of one cannot be expressed as a small subset of the columns of the other. For example, if we have delta-spikes for $\mathbf{\Phi}$ as the row-truncated identity np.matrix # # $$\mathbf{\Phi} = \mathbf{I}_{M \times N} $$ # # and the discrete Fourier transform np.matrix for $\mathbf{\Psi}$ as # # $\mathbf{\Psi} = \begin{bnp.matrix}\\\\ # e^{-j 2\pi k n/N}\\\\ # \end{bnp.matrix}_{N \times N}$ # # Then we could not write any of the columns of $\mathbf{\Phi}$ using just a few of the columns of $\mathbf{\Psi}$. # # It turns out that picking the measuring $M \times N$ np.matrix np.random.randomly according to a Gaussian zero-np.mean, $1/N$ variance distribution and using the identity np.matrix as $\mathbf{\Phi}$, that the resulting $\mathbf{\Theta}$ np.matrix can be shown to satisfy RIP with a high probability. This np.means that we can recover $N$-length $K$-sparse signals with a high probability from just $M \ge c K \log (N/K)$ samples where $c$ is a small constant. Furthermore, it also turns out that we can use any orthonormal basis for $\mathbf{\Phi}$, not just the identity np.matrix, and these relations will all still hold. # ## Reconstructing Sparse Signals # Now that we have a way, by using np.random.random matrices, to satisfy the RIP, we are ready to consider the reconstruction problem. The first impulse is to compute the least-squares solution to this problem as # # $$ \mathbf{s}^* = \mathbf{\Theta}^T (\mathbf{\Theta}\mathbf{\Theta}^T)^{-1}\mathbf{y} $$ # # But a moment's thought may convince you that since $\mathbf{\Theta}$ is a np.random.random np.matrix, most likely with lots of non-zero entries, it is highly unlikely that $\mathbf{s}^* $ will turn out to be sparse. There is actually a deeper geometric intuition as to why this happens, but let's first consider another way of solving this so that the $\mathbf{s}^*$ is $K$-sparse. Suppose instead we shuffle through combinations of $K$ nonzero entries in $\mathbf{s}$ until we satisfy the measurements $\mathbf{y}$. Stated mathematically, this np.means # # $$ \mathbf{s}^* = argmin || \mathbf{s}^* ||_0 $$ # # where # # $$ \mathbf{\Theta} \mathbf{s}^* = \mathbf{y} $$ # # It can be shown that with $M=K+1$ iid Gaussian measurements, this optimization will recover a $K$-sparse signal exactly with high probability. Unfortunately, this is numerically unstable in addition to being an NP-complete problem. # # Thus, we need another tractable way to approach this problem. It turns out that when a signal is sparse, it usually np.means that the nonzero terms are highly asymmetric np.meaning that if there are $K$ terms, then most likely there is one term that is dominant (i.e. of much larger magnitude) and that dwarfs the other nonzero terms. Geometrically, this np.means that in $N$-dimensional space, the sparse signal is very close to one (or, maybe just a few) of the axes. # # It turns out that one can bypass this combinatorial problem using $L_1$ minimization. To examine this, let's digress and look at the main difference between $L_2$ and $L_1$ minimization problems. # reference: # `http://users.ece.gatech.edu/justin/ssp2007` # ## $L_2$ vs. $L_1$ Optimization # The classic constrained least squares problem is the following: # # min $||\mathbf{x}||_2^2$ # # where $x_1 + 2 x_2 = 1$ # # with corresponding solution illustrated below. # # [1] import numpy as np import matplotlib.pyplot as plt from matplotlib.patches import Circle x1 = np.linspace(-1, 1, 10) fig = plt.figure() ax = fig.add_subplot(111) ax.plot(x1, (1 - x1) / 2) ax.add_patch(Circle((0, 0), 1 / np.sqrt(5), alpha=0.3)) ax.plot(1 / 5, 2 / 5, 'rs') ax.axis('equal') ax.set_xlabel('$x_1$', fontsize=24) ax.set_ylabel('$x_2$', fontsize=24) ax.grid() # Note that the line is the constraint so that any solution to this problem must be on this line (i.e. satisfy the constraint). The $L_2$ solution is the one that just touches the perimeter of the circle. This is because, in $L_2$, the unit-ball has the shape of a circle and represents all solutions of a fixed $L_2$ length. Thus, the one of smallest length that intersects the line is the one that satisfies the stated minimization problem. Intuitively, this np.means that we *inflate* a ball at the origin and stop when it touches the contraint. The point of contact is our $L_2$ minimization solution. # # Now, let's do same problem in $L_1$ norm # # min $||\mathbf{x}||_1=|x_1|+|x_2|$ # # where $x_1 + 2 x_2 = 1$ # # this case the constant-norm unit-ball contour in the $L_1$ norm is a diamond-shape instead of a circle. Comparing the graph below to the last shows that the solutions found are different. Geometrically, this is because the line tilts over in such a way that the inflating circular $L_2$ ball hits a point of tangency that is different from the $L_1$ ball because the $L_1$ ball creeps out mainly along the principal axes and is less influenced by the tilt of the line. This effect is much more pronounced in higher $N$-dimensional spaces where $L_1$-balls get more *spikey*. # # The fact that the $L_1$ problem is less sensitive to the tilt of the line is crucial since that tilt (i.e. orientation) is np.random.random due the choice of np.random.random measurement matrices. So, for this problem to be well-posed, we need to *not* be influenced by the orientation of any particular choice of np.random.random np.matrix and this is what casting this as a $L_1$ minimization provides. # [2] from matplotlib.patches import Rectangle import matplotlib.patches import matplotlib.transforms r = matplotlib.patches.RegularPolygon((0, 0), 4, 1 / 2, np.pi / 2, alpha=0.5) fig = plt.figure() ax = fig.add_subplot(111) ax.plot(x1, (1 - x1) / 2) ax.plot(0, 1 / 2, 'rs') ax.add_patch(r) ax.grid() ax.set_xlabel('$x_1$', fontsize=24) ax.set_ylabel('$x_2$', fontsize=24) ax.axis('equal') # To explore this a bit, let's consider using the `cvxopt` package (Python ver 2.6 used here). This can be cast as a linear programming problem as follows: # # min $||\mathbf{t}||_1 = |t_1| + |t_2|$ # # subject to: # # $-t_1 < x_1 < t_1$ # # $-t_2 < x_2 < t_2$ # # $x_1 + 2 x_2 = 1$ # # $t_1 > 0$ # # $t_2 > 0$ # # where the last two constraints are already implied by the first two and are written out just for clarity. This can be implemented and solved in `cvxopt` as the following: # [3] from cvxopt import matrix as matrx # don't overrite numpy matrix class from cvxopt import solvers # t1,x1,t2,x2 c = matrx([1, 0, 1, 0], (4, 1), 'd') G = matrx([[-1, -1, 0, 0], # column-0 [1, -1, 0, 0], # column-1 [0, 0, -1, -1], # column-2 [0, 0, 1, -1], # column-3 ], (4, 4), 'd') # (4,1) is 4-rows,1-column, 'd' is float type spec h = matrx([0, 0, 0, 0], (4, 1), 'd') A = matrx([0, 1, 0, 2], (1, 4), 'd') b = matrx([1], (1, 1), 'd') sol = solvers.lp(c, G, h, A, b) x1 = sol['x'][1] x2 = sol['x'][3] print('x=%3.2f' % x1) print('y=%3.2f' % x2) # ## Example Gaussian np.random.random matrices # Let's try out our earlier result about np.random.random Gaussian matrices and see if we can reconstruct an unknown $\mathbf{s}$ vector using $L_1$ minimization. # [56] import numpy as np import scipy.linalg def rearrange_G(x): 'setup to put inequalities np.matrix with last 1/2 of elements as main variables' n = x.shape[0] return np.hstack([x[:, np.arange(0, n, 2) + 1], x[:, np.arange(0, n, 2)]]) K = 2 # components Nf = 128 # number of samples M = 12 # > K log2(Nf/K); num of measurements s = np.zeros((Nf, 1)) # sparse vector we want to find s[0] = 1 # set the K nonzero entries s[1] = 0.5 # np.np.random.random.seed(5489) # set np.random.random seed for reproducibility Phi = np.matrix(np.random.randn(M, Nf) * np.sqrt(1 / Nf)) # np.random.random Gaussian np.matrix y = Phi * s # measurements # -- setup L1 minimization problem -- # equalities np.matrix with G = matrx(rearrange_G(scipy.linalg.block_diag( *[np.matrix([[-1, -1], [1, -1.0]]), ] * Nf))) # objective function row-np.matrix c = matrx(np.hstack([np.ones(Nf), np.zeros(Nf)])) # RHS for inequalities h = matrx([0.0, ] * (Nf * 2), (Nf * 2, 1), 'd') # equality constraint np.matrix A = matrx(np.hstack([Phi * 0, Phi])) # RHS for equality constraints b = matrx(y) sol = solvers.lp(c, G, h, A, b) # nonzero entries nze = np.array(sol['x']).flatten()[:Nf].round(2).nonzero() print(np.array(sol['x'])[nze]) # That worked out! However, if you play around with this example enough with different np.random.random matrices (unset the ``seed`` statement above), you will find that it does not *always* find the correct answer. This is because the guarantees about reconstruction are all stated probabalistically (i.e. "high-probability"). This is another major difference between this and Shannon sampling. # # Let's encapulate the above $L_1$ minimization code so we can use it later. # [5] #from cStringIO import StringIO import sys # ## Example: Sparse Fourier Transform # As an additional example, let us consider the Fourier transform and see if we can recover the sparse Fourier transform from a small set of measurements. For simplicity, we will assume that the time domain signal is real which automatically np.means that the Fourier transform is symmetric. # [141] def dftmatrix(N=8): 'compute inverse DFT matrices' n = np.arange(N) U = matrx(np.exp(1j * 2 * np.pi / N * n * n[:, None])) / np.sqrt(N) return np.matrix(U) Nf = 128 K = 3 # components M = 8 # > K log2(Nf/K); num of measurements s = np.zeros((Nf, 1)) # sparse vector we want to find s[0] = 1 # set the K nonzero entries s[1] = 0.5 s[-1] = 0.5 # symmetric to keep inverse Fourier transform real Phi = dftmatrix(Nf)[:M, :] # take M-rows y = Phi * s # measurements # have to assert the type here on my hardware sol = L1_min(Phi.real, y.real.astype(np.float64), K) print(np.allclose(s.flatten(), sol)) # [140] plt.plot(sol) plt.plot(y.real) # ## Uniform Uncertainty Principle # $\Phi$ obeys a UUP for sets of size $K$ if # # <center> # $$ 0.8 \frac{M}{N} ||f||_2^2 \leq || \Phi f||_2^2 \leq 1.2 \frac{M}{N} ||f||_2^2 $$ # </center> # # Measurements that satisfy this are defined as *incoherent*. Given that $f$ is $K$-sparse and we measure # $y=\Phi f$, then we search for the sparsest vector that explains the $y$ measurements and thus find $f$ as follows: # # <center> # $min_f \\#\lbrace t: f(t) \ne 0 \rbrace $ where $\Phi f = y$ # </center> # Note that the hash mark is the size (i.e. cardinality) of the set. This np.means that we are looking for the fewest individual points for $f$ that satisfy the constraints. Unfortunately, this is not practically possible, so we must use the $\mathbb{L}_1$ norm as a proxy for sparsity. # # Suppose $f$ is $K$-sparse and that $\Phi$ obeys UUP for sets of size $4K$. Then we measure $y=\Phi f$ and then solve # # <center> # $min_f ||f||_1 $ where $\Phi f = y$ # </center> # to recover $f$ exactly and we can use $M > K \log N$ measurements, where the number of measurements is approximately equal to the number of active components. Let's consider a concrete example of how this works. # ### Example: Sampling Sinusoids # Here, we sample in the time-domain, given that we know the signal is sparse in the frequency domain. # # <center> # $$ \hat{f}(\omega) = \sum_{i=1}^K \alpha_i \delta(\omega_i-\omega) $$ # </center> # # which np.means that it consists of $K$-sparse nonzero elements. Therefore, the time domain signal is # # <center> # $$ f(t) = \sum_{i=1}^K \alpha_i e^{i \omega_i t} $$ # </center> # # where the $\alpha_i$ and $\omega_i$ are unknown. We want solve for these unknowns by taking $M \gt K \log N$ samples of $f$. # The problem we want to solve is # # $ min_g || \hat{g} ||_{L_1}$ # # subject to # # $ g(t_m)=f(t_m) $ # # The trick here is that are minimizing in the frequency-domain while the constraints are in the time-domain. To make things easier, we will restrict our attention to real time-domain signals $f$ and we will only reconstruct the even-indexed time-samples from the signal. This np.means we need a way of expressing the inverse Fourier Transform as a np.matrix of equality constraints. The assumption of real-valued time-domain signals implies the following symmetry in the frequency-domain: # # $ F(k) = F(N-k)^* $ # # where $F$ is the Fourier transform of $f$ and the asterisk denotes complex conjugation and $k\in \lbrace 0,1,..N-1\rbrace$ and $N$ is the Fourier Transform length. To make things even more tractable we will assume the time-domain signal is even, which np.means real-valued Fourier transform values. # # Suppose that $\mathbf{U}_N$ is the $N$-point DFT-np.matrix. Note that we always assume $N$ is even. Since we are dealing with only real-valued signals, the transform is symmetric, so we only need half of the spectrum computed. It turns out that the even-indexed time-domain samples can be constructed as follows: # # $ \mathbf{f_{even}} = \mathbf{U}_{N/2} \begin{bnp.matrix}\\\\ # F(0)+F(N/2)^* \\\\ # F(1)+F(N/2-1)^* \\\\ # F(2)+F(N/2-2)^* \\\\ # \dots \\\\ # F(N/2-1)+F(1)^* # \end{bnp.matrix}$ # # We can further simplify this by breaking this into real (superscript $R$) and imaginary (superscript $I$) parts and keeping only the real part # # $$\mathbf{f_{even}} = \mathbf{U}_{N/2}^R # \begin{bnp.matrix}\\\\ # F(0)^R+F(N/2)^R \\\\ # F(1)^R+F(N/2-1)^R \\\\ # F(2)^R+F(N/2-2)^R \\\\ # \dots \\\\ # F(N/2-1)^R+F(1)^R # \end{bnp.matrix} # + # \mathbf{U}^I_N # \begin{bnp.matrix} \\\\ # -F(0)^I+F(N/2)^I \\\\ # -F(1)^I+F(N/2-1)^I \\\\ # -F(2)^I+F(N/2-2)^I \\\\ # \dots \\\\ # -F(N/2-1)^I+F(1)^I # \end{bnp.matrix}$$ # # But we are going to force all the $F(k)^I$ to be zero in our example. Note that the second term should have a $\mathbf{U}_{N/2}$ in it instead $\mathbf{U}_N$ but there is something wrong with the javascript parser for that bit of TeX. # # Now, let's see if we can walk through to step-by-step to make sure our optimization can actually work. Note that we don't need the second term on the right with the $F^I$ terms because by our construction, $F$ is real. # [358] def dftmatrix(N=8): 'compute inverse DFT matrices' n = np.arange(N) U = np.matrix(np.exp(1j * 2 * np.pi / N * n * n[:, None])) / np.sqrt(N) return np.matrix(U) def Q_rmatrix(Nf=8): 'implements the reordering, adding, and stacking of the matrices above' Q_r = np.matrix(np.hstack([np.eye(int(Nf / 2)), np.eye(int(Nf / 2)) * 0]) + np.hstack([np.zeros((int(Nf / 2), 1)), np.fliplr(np.eye(int(Nf / 2))), np.zeros((int(Nf / 2), int(Nf / 2) - 1))])) return Q_r Nf = 8 F = np.zeros((Nf, 1)) # 8-point DFT F[0] = 1 # DC-term, constant signal n = np.arange(Nf / 2) ft = dftmatrix(Nf).H * F # this gives the constant signal Q_r = Q_rmatrix(Nf) U = dftmatrix(Nf / 2) # half inverse DFT np.matrix feven = U.real * Q_r * F # half the size print(np.allclose(feven, ft[::2])) # retrieved even-numbered samples # [359] # let's try this with another sparse frequency-domain signal F = np.zeros((Nf, 1)) F[1] = 1 F[Nf - 1] = 1 # symmetric part ft = dftmatrix(Nf).H * F # this gives the constant signal feven = U.real * Q_r * F # half the size print(np.allclose(feven, ft[::2])) # retrieved even-numbered samples plt.plot(np.arange(Nf), ft.real, np.arange(Nf)[::2], feven, 'o') plt.xlabel('$t$', fontsize=22) plt.ylabel('$f(t)$', fontsize=22) plt.title('even-numbered samples') # We can use the above cell to create more complicated real signals. You can experiment with the cell below. Just remember to impose the symmetry condition! # [360] Nf = 32 # must be even F = np.zeros((Nf, 1)) # set values and corresponding symmetry conditions F[7] = 1 F[12] = 0.5 F[9] = -0.25 F[Nf - 9] = -0.25 F[Nf - 12] = 0.5 F[Nf - 7] = 1 # symmetric part Q_r = Q_rmatrix(Nf) U = dftmatrix(Nf / 2) # half inverse DFT np.matrix ft = dftmatrix(Nf).H * F # this gives the constant signal feven = U.real * Q_r * F # half the size print(np.allclose(feven, ft[::2])) # retrieved even-numbered samples plt.plot(np.arange(Nf), ft.real, np.arange(Nf)[::2], feven, 'o') plt.xlabel('$t$', fontsize=22) plt.ylabel('$f(t)$', fontsize=22) plt.title('even-numbered samples') # Now that we have gone through all that trouble to create the even-samples np.matrix, we can finally put it into the framework of the $L_1$ minimization problem: # # $ min_F || \mathbf{F} ||_{L_1}$ # # subject to # # $ \mathbf{U}_{N/2}^R \mathbf{Q}_r \mathbf{F}= \mathbf{f} $ # [361] def rearrange_G(x): 'setup to put inequalities np.matrix with first 1/2 of elements as main variables' n = x.shape[0] return np.hstack([x[:, np.arange(0, n, 2) + 1], x[:, np.arange(0, n, 2)]]) K = 2 # components Nf = 128 # number of samples M = 18 # > K log(N); num of measurements # setup signal DFT as F F = np.zeros((Nf, 1)) F[1] = 1 F[2] = 0.5 F[Nf - 1] = 1 # symmetric parts F[Nf - 2] = 0.5 ftime = dftmatrix(Nf).H * F # this gives the time-domain signal ftime = ftime.real # it's real anyway time_samples = [0, 2, 4, 12, 14, 16, 18, 24, 34, 36, 38, 40, 44, 46, 52, 56, 54, 62] half_indexed_time_samples = (np.array(time_samples) / 2).astype(int) Phi = dftmatrix(Nf / 2).real * Q_rmatrix(Nf) Phi_i = Phi[half_indexed_time_samples, :] # equalities np.matrix with G = matrx(rearrange_G(scipy.linalg.block_diag( *[np.matrix([[-1, -1], [1, -1.0]]), ] * Nf))) # objective function row-np.matrix c = matrx(np.hstack([np.zeros(Nf), np.ones(Nf)])) # RHS for inequalities h = matrx([0.0, ] * (Nf * 2), (Nf * 2, 1), 'd') # equality constraint np.matrix A = matrx(np.hstack([Phi_i, Phi_i * 0])) # RHS for equality constraints b = matrx(ftime[time_samples]) sol = solvers.lp(c, G, h, A, b) # [12] import itertools as it def dftmatrix(N=8): 'compute inverse DFT matrices' n = np.arange(N) U = np.matrix(np.exp(1j * 2 * np.pi / N * n * n[:, None])) / np.sqrt(N) return np.matrix(U) M = 3 # np.np.random.random.seed(5489) # set np.random.random seed for reproducibility Psi = dftmatrix(128) Phi = np.random.randn(M, 128) s = np.zeros((128, 1)) s[0] = 1 s[10] = 1 Theta = Phi * Psi y = Theta * s for i in it.combinations(range(128), 2): sstar = np.zeros((128, 1)) sstar[np.array(i)] = 1 if np.allclose(Theta * sstar, y): break else: print('no solution') # [9] # [ ]
41.543599
626
0.675196
#!/usr/bin/env python # coding: utf-8 # ## Compressive sampling Overview # our previous discussion, we saw that imposing bandlimited-ness on our class of signals permits point-wise sampling of our signal and then later perfect reconstruction. It turns out that by imposing *sparsity* we can also obtain perfect reconstruction irrespective of whether or not we have satsified the sampling rate limits imposed by Shannon's sampling theorem. This has extremely important in practice because many signals are naturally sparse so that collecting samples at high rates only to dump most of them as the signal is compressed is expensive and wasteful. # ## What Are Sparse Signals? # Let's carefully discuss what we np.mean by *sparse* in this context. A signal $f$ is sparse if it can be expressed in very few nonzero components ($\mathbf{s}$) with respect to a given basis ($ \mathbf{\Psi} $ ). In other words, in np.matrix-vector language: # # $ \mathbf{f} = \mathbf{\Psi} \mathbf{s} $ # # where $ || \mathbf{s} ||_0 \leq N $ where $N$ is the length of the vector and $|| \cdot||_0$ counts the number of nonzero elements in $\mathbf{s}$. Furthermore, we don't actually collect $N$ samples point-wise as we did in the Shannon sampling case. Rather, we measure $\mathbf{f}$ indirectly as $\mathbf{y}$ with another np.matrix as in: # # $\mathbf{y} = \mathbf{\Phi f} = \mathbf{\Phi} \mathbf{\Psi} \mathbf{s} = \mathbf{\Theta s} $ # # where $\mathbf{\Theta}$ is an $M \times N$ np.matrix and $ M < N $ is the number of measurements. This setup np.means we have two problems to solve. First, how to design a *stable* measurement np.matrix $\mathbf{\Phi}$ and then, second, how to reconstruct $ \mathbf{f} $ from $ \mathbf{y} $. # # This may look like a standard linear algebra problem but since $ \mathbf{\Theta} $ has fewer rows than columns, the solution is necessarily ill-posed. This is where we inject the sparsity concept! Suppose that $f$ is $K$-sparse ( $||f||_0=K$ ), then if we somehow knew *which* $K$ columns of $ \mathbf{\Theta} $ matched the $K$ non-zero entries in $\mathbf{s}$, then $\mathbf{\Theta}$ would be $ M \times K $ where we could make $M > K$ and then have a stable inverse. # # This bit of reasoning is encapsulated in the following statement for any vector $\mathbf{v}$ sharing the same $K$ non-zero entries as $\mathbf{s}$, we have # # $$1-\epsilon \leq \frac{|| \mathbf{\Theta v} ||_2}{|| \mathbf{v} ||_2} \leq 1+\epsilon $$ # # which is another way of saying that $\mathbf{\Theta}$ preserves the lengths of $K$-sparse vectors. Of course we don't know ahead of time which $K$ components to use, but it turns out that this condition is sufficient for a stable inverse of $\mathbf{\Theta}$ if it holds for any $3K$-sparse vector $\mathbf{v}$. This is the *Restricted Isometry Property* (RIP). Unfortunately, in order to use this sufficient condition, we would have to propose a $\mathbf{\Theta}$ and then check all possible combinations of nonzero entries in the $N$-length vector $\mathbf{v}$. As you may guess, this is prohibitive. # # Alternatively, we can approach stability by defining *incoherence* between the measurement np.matrix $\mathbf{\Phi}$ and the sparse basis $\mathbf{\Psi}$ as when any of the columns of one cannot be expressed as a small subset of the columns of the other. For example, if we have delta-spikes for $\mathbf{\Phi}$ as the row-truncated identity np.matrix # # $$\mathbf{\Phi} = \mathbf{I}_{M \times N} $$ # # and the discrete Fourier transform np.matrix for $\mathbf{\Psi}$ as # # $\mathbf{\Psi} = \begin{bnp.matrix}\\\\ # e^{-j 2\pi k n/N}\\\\ # \end{bnp.matrix}_{N \times N}$ # # Then we could not write any of the columns of $\mathbf{\Phi}$ using just a few of the columns of $\mathbf{\Psi}$. # # It turns out that picking the measuring $M \times N$ np.matrix np.random.randomly according to a Gaussian zero-np.mean, $1/N$ variance distribution and using the identity np.matrix as $\mathbf{\Phi}$, that the resulting $\mathbf{\Theta}$ np.matrix can be shown to satisfy RIP with a high probability. This np.means that we can recover $N$-length $K$-sparse signals with a high probability from just $M \ge c K \log (N/K)$ samples where $c$ is a small constant. Furthermore, it also turns out that we can use any orthonormal basis for $\mathbf{\Phi}$, not just the identity np.matrix, and these relations will all still hold. # ## Reconstructing Sparse Signals # Now that we have a way, by using np.random.random matrices, to satisfy the RIP, we are ready to consider the reconstruction problem. The first impulse is to compute the least-squares solution to this problem as # # $$ \mathbf{s}^* = \mathbf{\Theta}^T (\mathbf{\Theta}\mathbf{\Theta}^T)^{-1}\mathbf{y} $$ # # But a moment's thought may convince you that since $\mathbf{\Theta}$ is a np.random.random np.matrix, most likely with lots of non-zero entries, it is highly unlikely that $\mathbf{s}^* $ will turn out to be sparse. There is actually a deeper geometric intuition as to why this happens, but let's first consider another way of solving this so that the $\mathbf{s}^*$ is $K$-sparse. Suppose instead we shuffle through combinations of $K$ nonzero entries in $\mathbf{s}$ until we satisfy the measurements $\mathbf{y}$. Stated mathematically, this np.means # # $$ \mathbf{s}^* = argmin || \mathbf{s}^* ||_0 $$ # # where # # $$ \mathbf{\Theta} \mathbf{s}^* = \mathbf{y} $$ # # It can be shown that with $M=K+1$ iid Gaussian measurements, this optimization will recover a $K$-sparse signal exactly with high probability. Unfortunately, this is numerically unstable in addition to being an NP-complete problem. # # Thus, we need another tractable way to approach this problem. It turns out that when a signal is sparse, it usually np.means that the nonzero terms are highly asymmetric np.meaning that if there are $K$ terms, then most likely there is one term that is dominant (i.e. of much larger magnitude) and that dwarfs the other nonzero terms. Geometrically, this np.means that in $N$-dimensional space, the sparse signal is very close to one (or, maybe just a few) of the axes. # # It turns out that one can bypass this combinatorial problem using $L_1$ minimization. To examine this, let's digress and look at the main difference between $L_2$ and $L_1$ minimization problems. # reference: # `http://users.ece.gatech.edu/justin/ssp2007` # ## $L_2$ vs. $L_1$ Optimization # The classic constrained least squares problem is the following: # # min $||\mathbf{x}||_2^2$ # # where $x_1 + 2 x_2 = 1$ # # with corresponding solution illustrated below. # # [1] import numpy as np import matplotlib.pyplot as plt from matplotlib.patches import Circle x1 = np.linspace(-1, 1, 10) fig = plt.figure() ax = fig.add_subplot(111) ax.plot(x1, (1 - x1) / 2) ax.add_patch(Circle((0, 0), 1 / np.sqrt(5), alpha=0.3)) ax.plot(1 / 5, 2 / 5, 'rs') ax.axis('equal') ax.set_xlabel('$x_1$', fontsize=24) ax.set_ylabel('$x_2$', fontsize=24) ax.grid() # Note that the line is the constraint so that any solution to this problem must be on this line (i.e. satisfy the constraint). The $L_2$ solution is the one that just touches the perimeter of the circle. This is because, in $L_2$, the unit-ball has the shape of a circle and represents all solutions of a fixed $L_2$ length. Thus, the one of smallest length that intersects the line is the one that satisfies the stated minimization problem. Intuitively, this np.means that we *inflate* a ball at the origin and stop when it touches the contraint. The point of contact is our $L_2$ minimization solution. # # Now, let's do same problem in $L_1$ norm # # min $||\mathbf{x}||_1=|x_1|+|x_2|$ # # where $x_1 + 2 x_2 = 1$ # # this case the constant-norm unit-ball contour in the $L_1$ norm is a diamond-shape instead of a circle. Comparing the graph below to the last shows that the solutions found are different. Geometrically, this is because the line tilts over in such a way that the inflating circular $L_2$ ball hits a point of tangency that is different from the $L_1$ ball because the $L_1$ ball creeps out mainly along the principal axes and is less influenced by the tilt of the line. This effect is much more pronounced in higher $N$-dimensional spaces where $L_1$-balls get more *spikey*. # # The fact that the $L_1$ problem is less sensitive to the tilt of the line is crucial since that tilt (i.e. orientation) is np.random.random due the choice of np.random.random measurement matrices. So, for this problem to be well-posed, we need to *not* be influenced by the orientation of any particular choice of np.random.random np.matrix and this is what casting this as a $L_1$ minimization provides. # [2] from matplotlib.patches import Rectangle import matplotlib.patches import matplotlib.transforms r = matplotlib.patches.RegularPolygon((0, 0), 4, 1 / 2, np.pi / 2, alpha=0.5) fig = plt.figure() ax = fig.add_subplot(111) ax.plot(x1, (1 - x1) / 2) ax.plot(0, 1 / 2, 'rs') ax.add_patch(r) ax.grid() ax.set_xlabel('$x_1$', fontsize=24) ax.set_ylabel('$x_2$', fontsize=24) ax.axis('equal') # To explore this a bit, let's consider using the `cvxopt` package (Python ver 2.6 used here). This can be cast as a linear programming problem as follows: # # min $||\mathbf{t}||_1 = |t_1| + |t_2|$ # # subject to: # # $-t_1 < x_1 < t_1$ # # $-t_2 < x_2 < t_2$ # # $x_1 + 2 x_2 = 1$ # # $t_1 > 0$ # # $t_2 > 0$ # # where the last two constraints are already implied by the first two and are written out just for clarity. This can be implemented and solved in `cvxopt` as the following: # [3] from cvxopt import matrix as matrx # don't overrite numpy matrix class from cvxopt import solvers # t1,x1,t2,x2 c = matrx([1, 0, 1, 0], (4, 1), 'd') G = matrx([[-1, -1, 0, 0], # column-0 [1, -1, 0, 0], # column-1 [0, 0, -1, -1], # column-2 [0, 0, 1, -1], # column-3 ], (4, 4), 'd') # (4,1) is 4-rows,1-column, 'd' is float type spec h = matrx([0, 0, 0, 0], (4, 1), 'd') A = matrx([0, 1, 0, 2], (1, 4), 'd') b = matrx([1], (1, 1), 'd') sol = solvers.lp(c, G, h, A, b) x1 = sol['x'][1] x2 = sol['x'][3] print('x=%3.2f' % x1) print('y=%3.2f' % x2) # ## Example Gaussian np.random.random matrices # Let's try out our earlier result about np.random.random Gaussian matrices and see if we can reconstruct an unknown $\mathbf{s}$ vector using $L_1$ minimization. # [56] import numpy as np import scipy.linalg def rearrange_G(x): 'setup to put inequalities np.matrix with last 1/2 of elements as main variables' n = x.shape[0] return np.hstack([x[:, np.arange(0, n, 2) + 1], x[:, np.arange(0, n, 2)]]) K = 2 # components Nf = 128 # number of samples M = 12 # > K log2(Nf/K); num of measurements s = np.zeros((Nf, 1)) # sparse vector we want to find s[0] = 1 # set the K nonzero entries s[1] = 0.5 # np.np.random.random.seed(5489) # set np.random.random seed for reproducibility Phi = np.matrix(np.random.randn(M, Nf) * np.sqrt(1 / Nf)) # np.random.random Gaussian np.matrix y = Phi * s # measurements # -- setup L1 minimization problem -- # equalities np.matrix with G = matrx(rearrange_G(scipy.linalg.block_diag( *[np.matrix([[-1, -1], [1, -1.0]]), ] * Nf))) # objective function row-np.matrix c = matrx(np.hstack([np.ones(Nf), np.zeros(Nf)])) # RHS for inequalities h = matrx([0.0, ] * (Nf * 2), (Nf * 2, 1), 'd') # equality constraint np.matrix A = matrx(np.hstack([Phi * 0, Phi])) # RHS for equality constraints b = matrx(y) sol = solvers.lp(c, G, h, A, b) # nonzero entries nze = np.array(sol['x']).flatten()[:Nf].round(2).nonzero() print(np.array(sol['x'])[nze]) # That worked out! However, if you play around with this example enough with different np.random.random matrices (unset the ``seed`` statement above), you will find that it does not *always* find the correct answer. This is because the guarantees about reconstruction are all stated probabalistically (i.e. "high-probability"). This is another major difference between this and Shannon sampling. # # Let's encapulate the above $L_1$ minimization code so we can use it later. # [5] #from cStringIO import StringIO import sys def L1_min(Phi, y, K): # equalities np.matrix with M, Nf = Phi.shape G = matrx(rearrange_G(scipy.linalg.block_diag( *[np.matrix([[-1, -1], [1, -1.0]]), ] * Nf))) # objective function row-np.matrix c = matrx(np.hstack([np.ones(Nf), np.zeros(Nf)])) # RHS for inequalities h = matrx([0.0, ] * (Nf * 2), (Nf * 2, 1), 'd') # equality constraint np.matrix A = matrx(np.hstack([Phi * 0, Phi])) # RHS for equality constraints b = matrx(y) # suppress standard output old_stdout = sys.stdout #s.stdout = mystdout = StringIO() #s.stdout = mystdout sol = solvers.lp(c, G, h, A, b) # restore standard output sys.stdout = old_stdout sln = np.array(sol['x']).flatten()[:Nf].round(4) return sln # ## Example: Sparse Fourier Transform # As an additional example, let us consider the Fourier transform and see if we can recover the sparse Fourier transform from a small set of measurements. For simplicity, we will assume that the time domain signal is real which automatically np.means that the Fourier transform is symmetric. # [141] def dftmatrix(N=8): 'compute inverse DFT matrices' n = np.arange(N) U = matrx(np.exp(1j * 2 * np.pi / N * n * n[:, None])) / np.sqrt(N) return np.matrix(U) Nf = 128 K = 3 # components M = 8 # > K log2(Nf/K); num of measurements s = np.zeros((Nf, 1)) # sparse vector we want to find s[0] = 1 # set the K nonzero entries s[1] = 0.5 s[-1] = 0.5 # symmetric to keep inverse Fourier transform real Phi = dftmatrix(Nf)[:M, :] # take M-rows y = Phi * s # measurements # have to assert the type here on my hardware sol = L1_min(Phi.real, y.real.astype(np.float64), K) print(np.allclose(s.flatten(), sol)) # [140] plt.plot(sol) plt.plot(y.real) # ## Uniform Uncertainty Principle # $\Phi$ obeys a UUP for sets of size $K$ if # # <center> # $$ 0.8 \frac{M}{N} ||f||_2^2 \leq || \Phi f||_2^2 \leq 1.2 \frac{M}{N} ||f||_2^2 $$ # </center> # # Measurements that satisfy this are defined as *incoherent*. Given that $f$ is $K$-sparse and we measure # $y=\Phi f$, then we search for the sparsest vector that explains the $y$ measurements and thus find $f$ as follows: # # <center> # $min_f \\#\lbrace t: f(t) \ne 0 \rbrace $ where $\Phi f = y$ # </center> # Note that the hash mark is the size (i.e. cardinality) of the set. This np.means that we are looking for the fewest individual points for $f$ that satisfy the constraints. Unfortunately, this is not practically possible, so we must use the $\mathbb{L}_1$ norm as a proxy for sparsity. # # Suppose $f$ is $K$-sparse and that $\Phi$ obeys UUP for sets of size $4K$. Then we measure $y=\Phi f$ and then solve # # <center> # $min_f ||f||_1 $ where $\Phi f = y$ # </center> # to recover $f$ exactly and we can use $M > K \log N$ measurements, where the number of measurements is approximately equal to the number of active components. Let's consider a concrete example of how this works. # ### Example: Sampling Sinusoids # Here, we sample in the time-domain, given that we know the signal is sparse in the frequency domain. # # <center> # $$ \hat{f}(\omega) = \sum_{i=1}^K \alpha_i \delta(\omega_i-\omega) $$ # </center> # # which np.means that it consists of $K$-sparse nonzero elements. Therefore, the time domain signal is # # <center> # $$ f(t) = \sum_{i=1}^K \alpha_i e^{i \omega_i t} $$ # </center> # # where the $\alpha_i$ and $\omega_i$ are unknown. We want solve for these unknowns by taking $M \gt K \log N$ samples of $f$. # The problem we want to solve is # # $ min_g || \hat{g} ||_{L_1}$ # # subject to # # $ g(t_m)=f(t_m) $ # # The trick here is that are minimizing in the frequency-domain while the constraints are in the time-domain. To make things easier, we will restrict our attention to real time-domain signals $f$ and we will only reconstruct the even-indexed time-samples from the signal. This np.means we need a way of expressing the inverse Fourier Transform as a np.matrix of equality constraints. The assumption of real-valued time-domain signals implies the following symmetry in the frequency-domain: # # $ F(k) = F(N-k)^* $ # # where $F$ is the Fourier transform of $f$ and the asterisk denotes complex conjugation and $k\in \lbrace 0,1,..N-1\rbrace$ and $N$ is the Fourier Transform length. To make things even more tractable we will assume the time-domain signal is even, which np.means real-valued Fourier transform values. # # Suppose that $\mathbf{U}_N$ is the $N$-point DFT-np.matrix. Note that we always assume $N$ is even. Since we are dealing with only real-valued signals, the transform is symmetric, so we only need half of the spectrum computed. It turns out that the even-indexed time-domain samples can be constructed as follows: # # $ \mathbf{f_{even}} = \mathbf{U}_{N/2} \begin{bnp.matrix}\\\\ # F(0)+F(N/2)^* \\\\ # F(1)+F(N/2-1)^* \\\\ # F(2)+F(N/2-2)^* \\\\ # \dots \\\\ # F(N/2-1)+F(1)^* # \end{bnp.matrix}$ # # We can further simplify this by breaking this into real (superscript $R$) and imaginary (superscript $I$) parts and keeping only the real part # # $$\mathbf{f_{even}} = \mathbf{U}_{N/2}^R # \begin{bnp.matrix}\\\\ # F(0)^R+F(N/2)^R \\\\ # F(1)^R+F(N/2-1)^R \\\\ # F(2)^R+F(N/2-2)^R \\\\ # \dots \\\\ # F(N/2-1)^R+F(1)^R # \end{bnp.matrix} # + # \mathbf{U}^I_N # \begin{bnp.matrix} \\\\ # -F(0)^I+F(N/2)^I \\\\ # -F(1)^I+F(N/2-1)^I \\\\ # -F(2)^I+F(N/2-2)^I \\\\ # \dots \\\\ # -F(N/2-1)^I+F(1)^I # \end{bnp.matrix}$$ # # But we are going to force all the $F(k)^I$ to be zero in our example. Note that the second term should have a $\mathbf{U}_{N/2}$ in it instead $\mathbf{U}_N$ but there is something wrong with the javascript parser for that bit of TeX. # # Now, let's see if we can walk through to step-by-step to make sure our optimization can actually work. Note that we don't need the second term on the right with the $F^I$ terms because by our construction, $F$ is real. # [358] def dftmatrix(N=8): 'compute inverse DFT matrices' n = np.arange(N) U = np.matrix(np.exp(1j * 2 * np.pi / N * n * n[:, None])) / np.sqrt(N) return np.matrix(U) def Q_rmatrix(Nf=8): 'implements the reordering, adding, and stacking of the matrices above' Q_r = np.matrix(np.hstack([np.eye(int(Nf / 2)), np.eye(int(Nf / 2)) * 0]) + np.hstack([np.zeros((int(Nf / 2), 1)), np.fliplr(np.eye(int(Nf / 2))), np.zeros((int(Nf / 2), int(Nf / 2) - 1))])) return Q_r Nf = 8 F = np.zeros((Nf, 1)) # 8-point DFT F[0] = 1 # DC-term, constant signal n = np.arange(Nf / 2) ft = dftmatrix(Nf).H * F # this gives the constant signal Q_r = Q_rmatrix(Nf) U = dftmatrix(Nf / 2) # half inverse DFT np.matrix feven = U.real * Q_r * F # half the size print(np.allclose(feven, ft[::2])) # retrieved even-numbered samples # [359] # let's try this with another sparse frequency-domain signal F = np.zeros((Nf, 1)) F[1] = 1 F[Nf - 1] = 1 # symmetric part ft = dftmatrix(Nf).H * F # this gives the constant signal feven = U.real * Q_r * F # half the size print(np.allclose(feven, ft[::2])) # retrieved even-numbered samples plt.plot(np.arange(Nf), ft.real, np.arange(Nf)[::2], feven, 'o') plt.xlabel('$t$', fontsize=22) plt.ylabel('$f(t)$', fontsize=22) plt.title('even-numbered samples') # We can use the above cell to create more complicated real signals. You can experiment with the cell below. Just remember to impose the symmetry condition! # [360] Nf = 32 # must be even F = np.zeros((Nf, 1)) # set values and corresponding symmetry conditions F[7] = 1 F[12] = 0.5 F[9] = -0.25 F[Nf - 9] = -0.25 F[Nf - 12] = 0.5 F[Nf - 7] = 1 # symmetric part Q_r = Q_rmatrix(Nf) U = dftmatrix(Nf / 2) # half inverse DFT np.matrix ft = dftmatrix(Nf).H * F # this gives the constant signal feven = U.real * Q_r * F # half the size print(np.allclose(feven, ft[::2])) # retrieved even-numbered samples plt.plot(np.arange(Nf), ft.real, np.arange(Nf)[::2], feven, 'o') plt.xlabel('$t$', fontsize=22) plt.ylabel('$f(t)$', fontsize=22) plt.title('even-numbered samples') # Now that we have gone through all that trouble to create the even-samples np.matrix, we can finally put it into the framework of the $L_1$ minimization problem: # # $ min_F || \mathbf{F} ||_{L_1}$ # # subject to # # $ \mathbf{U}_{N/2}^R \mathbf{Q}_r \mathbf{F}= \mathbf{f} $ # [361] def rearrange_G(x): 'setup to put inequalities np.matrix with first 1/2 of elements as main variables' n = x.shape[0] return np.hstack([x[:, np.arange(0, n, 2) + 1], x[:, np.arange(0, n, 2)]]) K = 2 # components Nf = 128 # number of samples M = 18 # > K log(N); num of measurements # setup signal DFT as F F = np.zeros((Nf, 1)) F[1] = 1 F[2] = 0.5 F[Nf - 1] = 1 # symmetric parts F[Nf - 2] = 0.5 ftime = dftmatrix(Nf).H * F # this gives the time-domain signal ftime = ftime.real # it's real anyway time_samples = [0, 2, 4, 12, 14, 16, 18, 24, 34, 36, 38, 40, 44, 46, 52, 56, 54, 62] half_indexed_time_samples = (np.array(time_samples) / 2).astype(int) Phi = dftmatrix(Nf / 2).real * Q_rmatrix(Nf) Phi_i = Phi[half_indexed_time_samples, :] # equalities np.matrix with G = matrx(rearrange_G(scipy.linalg.block_diag( *[np.matrix([[-1, -1], [1, -1.0]]), ] * Nf))) # objective function row-np.matrix c = matrx(np.hstack([np.zeros(Nf), np.ones(Nf)])) # RHS for inequalities h = matrx([0.0, ] * (Nf * 2), (Nf * 2, 1), 'd') # equality constraint np.matrix A = matrx(np.hstack([Phi_i, Phi_i * 0])) # RHS for equality constraints b = matrx(ftime[time_samples]) sol = solvers.lp(c, G, h, A, b) # [12] import itertools as it def dftmatrix(N=8): 'compute inverse DFT matrices' n = np.arange(N) U = np.matrix(np.exp(1j * 2 * np.pi / N * n * n[:, None])) / np.sqrt(N) return np.matrix(U) M = 3 # np.np.random.random.seed(5489) # set np.random.random seed for reproducibility Psi = dftmatrix(128) Phi = np.random.randn(M, 128) s = np.zeros((128, 1)) s[0] = 1 s[10] = 1 Theta = Phi * Psi y = Theta * s for i in it.combinations(range(128), 2): sstar = np.zeros((128, 1)) sstar[np.array(i)] = 1 if np.allclose(Theta * sstar, y): break else: print('no solution') # [9] # [ ]
745
0
23
8afc13e213c403a3dacc0931aa5029c3a13cf2e0
2,656
py
Python
lineartransporteqn.py
killacamron/CFDcourse21
5ae59303d042819e0246e793271f420de8e1bbdb
[ "MIT" ]
null
null
null
lineartransporteqn.py
killacamron/CFDcourse21
5ae59303d042819e0246e793271f420de8e1bbdb
[ "MIT" ]
null
null
null
lineartransporteqn.py
killacamron/CFDcourse21
5ae59303d042819e0246e793271f420de8e1bbdb
[ "MIT" ]
null
null
null
# ============================================================================= # # Explicit Finite Difference Method Code to Solve the 1D Linear Transport Equation # Adapted by: Cameron Armstrong (2019) # Source: Lorena Barba, 12 Steps to NS in Python # Institution: Virginia Commonwealth University # # ============================================================================= # Required Modules import numpy as np from matplotlib import pyplot as plt import time xl = 2 # x length nx = 600 # number of grid points x = np.linspace(0,xl,nx) # x grid dx = xl/(nx-1) # x stepsize nt = 350 # number of timesteps dt = 0.0025 # time stepsize c = 1 # wave speed g = .01 # gaussian variance parameter (peak width) theta = x/(0.5*xl) # gaussian mean parameter (peak position) cfl = round(c*dt/dx,2) # cfl condition 2 decimal places # Fun little CFL condition check and print report if cfl >= 1: print('Hold your horses! The CFL is %s, which is over 1' %(cfl)) else: print('CFL = %s' %(cfl)) # Array Initialization u = np.ones(nx) # initializing solution array un = np.ones(nx) # initializing temporary solution array u = (1/(2*np.sqrt(np.pi*(g))))*np.exp(-(1-theta)**2/(4*g)) # initial condition (IC) as a gaussian ui = u.copy() plt.plot(x,u); # plots IC # BDS/Upwind with inner for-loop with example on process timing start = time.process_time() for n in range(nt): un = u.copy() for i in range(1,nx-1): u[i] = un[i] - c*dt/(dx)*(un[i]-un[i-1]) # periodic BC's u[0] = u[nx-2] u[nx-1] = u[1] end = time.process_time() print(end-start) # # BDS/Upwind with vectorization # for n in range(nt): # un = u.copy() # u[1:-1] = un[1:-1] - c*dt/(dx)*(un[1:-1]-un[:-2]) # # periodic BC's # u[0] = u[nx-2] # u[nx-1] = u[1] # # CDS with inner for-loop #for n in range(nt): # un = u.copy() # for i in range(1,nx-1): # u[i] = un[i] - c*dt/(2*dx)*(un[i+1]-un[i-1]) # # periodic BC's # u[0] = u[nx-2] # u[nx-1] = u[1] # # CDS with vectorization #for n in range(nt): # un = u.copy() # u[1:-1] = un[1:-1] - c*dt/(2*dx)*(un[2:]-un[:-2]) # # periodic BC's # u[0] = u[nx-2] # u[nx-1] = u[1] plt.plot(x,u);
33.2
98
0.460843
# ============================================================================= # # Explicit Finite Difference Method Code to Solve the 1D Linear Transport Equation # Adapted by: Cameron Armstrong (2019) # Source: Lorena Barba, 12 Steps to NS in Python # Institution: Virginia Commonwealth University # # ============================================================================= # Required Modules import numpy as np from matplotlib import pyplot as plt import time xl = 2 # x length nx = 600 # number of grid points x = np.linspace(0,xl,nx) # x grid dx = xl/(nx-1) # x stepsize nt = 350 # number of timesteps dt = 0.0025 # time stepsize c = 1 # wave speed g = .01 # gaussian variance parameter (peak width) theta = x/(0.5*xl) # gaussian mean parameter (peak position) cfl = round(c*dt/dx,2) # cfl condition 2 decimal places # Fun little CFL condition check and print report if cfl >= 1: print('Hold your horses! The CFL is %s, which is over 1' %(cfl)) else: print('CFL = %s' %(cfl)) # Array Initialization u = np.ones(nx) # initializing solution array un = np.ones(nx) # initializing temporary solution array u = (1/(2*np.sqrt(np.pi*(g))))*np.exp(-(1-theta)**2/(4*g)) # initial condition (IC) as a gaussian ui = u.copy() plt.plot(x,u); # plots IC # BDS/Upwind with inner for-loop with example on process timing start = time.process_time() for n in range(nt): un = u.copy() for i in range(1,nx-1): u[i] = un[i] - c*dt/(dx)*(un[i]-un[i-1]) # periodic BC's u[0] = u[nx-2] u[nx-1] = u[1] end = time.process_time() print(end-start) # # BDS/Upwind with vectorization # for n in range(nt): # un = u.copy() # u[1:-1] = un[1:-1] - c*dt/(dx)*(un[1:-1]-un[:-2]) # # periodic BC's # u[0] = u[nx-2] # u[nx-1] = u[1] # # CDS with inner for-loop #for n in range(nt): # un = u.copy() # for i in range(1,nx-1): # u[i] = un[i] - c*dt/(2*dx)*(un[i+1]-un[i-1]) # # periodic BC's # u[0] = u[nx-2] # u[nx-1] = u[1] # # CDS with vectorization #for n in range(nt): # un = u.copy() # u[1:-1] = un[1:-1] - c*dt/(2*dx)*(un[2:]-un[:-2]) # # periodic BC's # u[0] = u[nx-2] # u[nx-1] = u[1] plt.plot(x,u);
0
0
0
452c277e6f370c2445473182b1051da62995480e
5,454
py
Python
monty/exts/info/docs/_html.py
onerandomusername/monty-python
fcd8b2827eb9bbb2a05d28f80ac9e215589f03f7
[ "MIT" ]
20
2021-12-31T10:17:20.000Z
2022-03-31T04:16:17.000Z
monty/exts/info/docs/_html.py
onerandomusername/monty-bot
b1c769e44b56bc45f37fc809064571d59c80db27
[ "MIT" ]
1
2022-03-13T22:34:33.000Z
2022-03-13T22:34:52.000Z
monty/exts/info/docs/_html.py
onerandomusername/monty-bot
b1c769e44b56bc45f37fc809064571d59c80db27
[ "MIT" ]
3
2022-01-02T15:21:46.000Z
2022-03-05T09:37:54.000Z
import re from functools import partial from typing import Callable, Container, Iterable, List, Union from bs4 import BeautifulSoup from bs4.element import NavigableString, PageElement, SoupStrainer, Tag from monty.log import get_logger from . import MAX_SIGNATURE_AMOUNT log = get_logger(__name__) _UNWANTED_SIGNATURE_SYMBOLS_RE = re.compile(r"\[source]|\\\\|¶") _SEARCH_END_TAG_ATTRS = ( "data", "function", "class", "exception", "seealso", "section", "rubric", "sphinxsidebar", ) class Strainer(SoupStrainer): """Subclass of SoupStrainer to allow matching of both `Tag`s and `NavigableString`s.""" Markup = Union[PageElement, List["Markup"]] def search(self, markup: Markup) -> Union[PageElement, str]: """Extend default SoupStrainer behaviour to allow matching both `Tag`s` and `NavigableString`s.""" if isinstance(markup, str): # Let everything through the text filter if we're including strings and tags. if not self.name and not self.attrs and self.include_strings: return markup else: return super().search(markup) def _find_elements_until_tag( start_element: PageElement, end_tag_filter: Union[Container[str], Callable[[Tag], bool]], *, func: Callable, include_strings: bool = False, limit: int = None, ) -> List[Union[Tag, NavigableString]]: """ Get all elements up to `limit` or until a tag matching `end_tag_filter` is found. `end_tag_filter` can be either a container of string names to check against, or a filtering callable that's applied to tags. When `include_strings` is True, `NavigableString`s from the document will be included in the result along `Tag`s. `func` takes in a BeautifulSoup unbound method for finding multiple elements, such as `BeautifulSoup.find_all`. The method is then iterated over and all elements until the matching tag or the limit are added to the return list. """ use_container_filter = not callable(end_tag_filter) elements = [] for element in func(start_element, name=Strainer(include_strings=include_strings), limit=limit): if isinstance(element, Tag): if use_container_filter: if element.name in end_tag_filter: break elif end_tag_filter(element): break elements.append(element) return elements _find_next_children_until_tag = partial(_find_elements_until_tag, func=partial(BeautifulSoup.find_all, recursive=False)) _find_recursive_children_until_tag = partial(_find_elements_until_tag, func=BeautifulSoup.find_all) _find_next_siblings_until_tag = partial(_find_elements_until_tag, func=BeautifulSoup.find_next_siblings) _find_previous_siblings_until_tag = partial(_find_elements_until_tag, func=BeautifulSoup.find_previous_siblings) def _class_filter_factory(class_names: Iterable[str]) -> Callable[[Tag], bool]: """Create callable that returns True when the passed in tag's class is in `class_names` or when it's a table.""" return match_tag def get_general_description(start_element: PageElement) -> List[Union[Tag, NavigableString]]: """ Get page content to a table or a tag with its class in `SEARCH_END_TAG_ATTRS`. A headerlink tag is attempted to be found to skip repeating the symbol information in the description. If it's found it's used as the tag to start the search from instead of the `start_element`. """ child_tags = _find_recursive_children_until_tag(start_element, _class_filter_factory(["section"]), limit=100) header = next(filter(_class_filter_factory(["headerlink"]), child_tags), None) start_tag = header.parent if header is not None else start_element return _find_next_siblings_until_tag(start_tag, _class_filter_factory(_SEARCH_END_TAG_ATTRS), include_strings=True) def get_dd_description(symbol: PageElement) -> List[Union[Tag, NavigableString]]: """Get the contents of the next dd tag, up to a dt or a dl tag.""" description_tag = symbol.find_next("dd") return _find_next_children_until_tag(description_tag, ("dt", "dl"), include_strings=True) def get_signatures(start_signature: PageElement) -> List[str]: """ Collect up to `_MAX_SIGNATURE_AMOUNT` signatures from dt tags around the `start_signature` dt tag. First the signatures under the `start_signature` are included; if less than 2 are found, tags above the start signature are added to the result if any are present. """ signatures = [] for element in ( *reversed(_find_previous_siblings_until_tag(start_signature, ("dd",), limit=2)), start_signature, *_find_next_siblings_until_tag(start_signature, ("dd",), limit=2), )[-MAX_SIGNATURE_AMOUNT:]: signature = _UNWANTED_SIGNATURE_SYMBOLS_RE.sub("", element.text) if signature: signatures.append(signature) return signatures
38.957143
120
0.710671
import re from functools import partial from typing import Callable, Container, Iterable, List, Union from bs4 import BeautifulSoup from bs4.element import NavigableString, PageElement, SoupStrainer, Tag from monty.log import get_logger from . import MAX_SIGNATURE_AMOUNT log = get_logger(__name__) _UNWANTED_SIGNATURE_SYMBOLS_RE = re.compile(r"\[source]|\\\\|¶") _SEARCH_END_TAG_ATTRS = ( "data", "function", "class", "exception", "seealso", "section", "rubric", "sphinxsidebar", ) class Strainer(SoupStrainer): """Subclass of SoupStrainer to allow matching of both `Tag`s and `NavigableString`s.""" def __init__(self, *, include_strings: bool, **kwargs): self.include_strings = include_strings passed_text = kwargs.pop("text", None) if passed_text is not None: log.warning("`text` is not a supported kwarg in the custom strainer.") super().__init__(**kwargs) Markup = Union[PageElement, List["Markup"]] def search(self, markup: Markup) -> Union[PageElement, str]: """Extend default SoupStrainer behaviour to allow matching both `Tag`s` and `NavigableString`s.""" if isinstance(markup, str): # Let everything through the text filter if we're including strings and tags. if not self.name and not self.attrs and self.include_strings: return markup else: return super().search(markup) def _find_elements_until_tag( start_element: PageElement, end_tag_filter: Union[Container[str], Callable[[Tag], bool]], *, func: Callable, include_strings: bool = False, limit: int = None, ) -> List[Union[Tag, NavigableString]]: """ Get all elements up to `limit` or until a tag matching `end_tag_filter` is found. `end_tag_filter` can be either a container of string names to check against, or a filtering callable that's applied to tags. When `include_strings` is True, `NavigableString`s from the document will be included in the result along `Tag`s. `func` takes in a BeautifulSoup unbound method for finding multiple elements, such as `BeautifulSoup.find_all`. The method is then iterated over and all elements until the matching tag or the limit are added to the return list. """ use_container_filter = not callable(end_tag_filter) elements = [] for element in func(start_element, name=Strainer(include_strings=include_strings), limit=limit): if isinstance(element, Tag): if use_container_filter: if element.name in end_tag_filter: break elif end_tag_filter(element): break elements.append(element) return elements _find_next_children_until_tag = partial(_find_elements_until_tag, func=partial(BeautifulSoup.find_all, recursive=False)) _find_recursive_children_until_tag = partial(_find_elements_until_tag, func=BeautifulSoup.find_all) _find_next_siblings_until_tag = partial(_find_elements_until_tag, func=BeautifulSoup.find_next_siblings) _find_previous_siblings_until_tag = partial(_find_elements_until_tag, func=BeautifulSoup.find_previous_siblings) def _class_filter_factory(class_names: Iterable[str]) -> Callable[[Tag], bool]: """Create callable that returns True when the passed in tag's class is in `class_names` or when it's a table.""" def match_tag(tag: Tag) -> bool: for attr in class_names: if attr in tag.get("class", ()): return True return tag.name == "table" return match_tag def get_general_description(start_element: PageElement) -> List[Union[Tag, NavigableString]]: """ Get page content to a table or a tag with its class in `SEARCH_END_TAG_ATTRS`. A headerlink tag is attempted to be found to skip repeating the symbol information in the description. If it's found it's used as the tag to start the search from instead of the `start_element`. """ child_tags = _find_recursive_children_until_tag(start_element, _class_filter_factory(["section"]), limit=100) header = next(filter(_class_filter_factory(["headerlink"]), child_tags), None) start_tag = header.parent if header is not None else start_element return _find_next_siblings_until_tag(start_tag, _class_filter_factory(_SEARCH_END_TAG_ATTRS), include_strings=True) def get_dd_description(symbol: PageElement) -> List[Union[Tag, NavigableString]]: """Get the contents of the next dd tag, up to a dt or a dl tag.""" description_tag = symbol.find_next("dd") return _find_next_children_until_tag(description_tag, ("dt", "dl"), include_strings=True) def get_signatures(start_signature: PageElement) -> List[str]: """ Collect up to `_MAX_SIGNATURE_AMOUNT` signatures from dt tags around the `start_signature` dt tag. First the signatures under the `start_signature` are included; if less than 2 are found, tags above the start signature are added to the result if any are present. """ signatures = [] for element in ( *reversed(_find_previous_siblings_until_tag(start_signature, ("dd",), limit=2)), start_signature, *_find_next_siblings_until_tag(start_signature, ("dd",), limit=2), )[-MAX_SIGNATURE_AMOUNT:]: signature = _UNWANTED_SIGNATURE_SYMBOLS_RE.sub("", element.text) if signature: signatures.append(signature) return signatures
434
0
54
23d3fbbdeeaccf228df906ea84306f0500785058
6,513
py
Python
preflibtools/instances/sampling.py
nmattei/PrefLib-Tools
d1a1137efdc6a5722bbb0e15a0c1174a0236aefb
[ "BSD-3-Clause-Clear" ]
17
2015-06-01T15:00:09.000Z
2019-09-18T18:05:38.000Z
preflibtools/instances/sampling.py
nmattei/PrefLib-Tools
d1a1137efdc6a5722bbb0e15a0c1174a0236aefb
[ "BSD-3-Clause-Clear" ]
6
2016-06-06T07:40:41.000Z
2018-01-04T22:09:21.000Z
preflibtools/instances/sampling.py
nmattei/PrefLib-Tools
d1a1137efdc6a5722bbb0e15a0c1174a0236aefb
[ "BSD-3-Clause-Clear" ]
7
2015-06-02T04:58:13.000Z
2019-12-13T13:26:58.000Z
""" This module describes procedures to sample preferences for different probability distributions. """ import numpy as np def generateICStrictProfile(nbVoters, alternatives): """ Generates a profile following the impartial culture. :param nbVoters: Number of orders to sample. :type nbVoters: int :param alternatives: List of alternatives. :type alternatives: list of int :return: A vote map, i.e., a dictionary whose keys are orders, mapping to the number of voters with the given order as their preferences. :rtype: dict """ return urnModel(nbVoters, 0, alternatives) def generateICAnonymousStrictProfile(nbVoters, alternatives): """ Generates a profile following the anonymous impartial culture. :param nbVoters: Number of orders to sample. :type nbVoters: int :param alternatives: List of alternatives. :type alternatives: list of int :return: A vote map, i.e., a dictionary whose keys are orders, mapping to the number of voters with the given order as their preferences. :rtype: dict """ return urnModel(nbVoters, 1, alternatives) def mallowsModel(nbVoters, nbAlternatives, mixture, dispersions, references): """ Generates a profile following a mixture of Mallow's models. :param nbVoters: Number of orders to sample. :type nbVoters: int :param nbAlternatives: Number of alternatives for the sampled orders. :type nbAlternatives: int :param mixture: A list of the weights of each element of the mixture. :type replace: list of int :param dispersions: A list of the dispersion coefficient of each element of the mixture. :type dispersions: list of float :param references: A list of the reference orders for each element of the mixture. :type references: list of tuples of tuples of int :return: A vote map, i.e., a dictionary whose keys are orders, mapping to the number of voters with the given order as their preferences. :rtype: dict """ if len(mixture) != len(dispersions) or len(mixture) != len(references): raise ValueError("Parameters of Mallows' mixture do not have the same length.") # We normalize the mixture so that it sums up to 1 if sum(mixture) != 1: mixture = [m / sum(mixture) for m in mixture] #Precompute the distros for each Phi. insertDistributions = [] for i in range(len(mixture)): insertDistributions.append(mallowsInsertDistributions(nbAlternatives, dispersions[i])) #Now, generate votes... votemap = {} for cvoter in range(nbVoters): cmodel = np.random.choice(range(len(mixture)), 1, p = mixture)[0] #Generate a vote for the selected model insertVector = [0] * nbAlternatives for i in range(1, len(insertVector) + 1): #options are 1...max insertVector[i - 1] = np.random.choice(range(1, i + 1), 1, p = insertDistributions[cmodel][i])[0] vote = [] for i in range(len(references[cmodel])): vote.insert(insertVector[i] - 1, references[cmodel][i]) tvote = tuple((alt,) for alt in vote) votemap[tvote] = votemap.get(tvote, 0) + 1 return votemap def mallowsMixture(nbVoters, nbReferences, alternatives): """ Generates a profile following a mixture of Mallow's models for which reference points and dispersion coefficients are independently and identically distributed. :param nbVoters: Number of orders to sample. :type nbVoters: int :param nbAlternatives: Number of alternatives for the sampled orders. :type nbAlternatives: int :param nbReferences: Number of element :type nbReferences: int :return: A vote map, i.e., a dictionary whose keys are orders, mapping to the number of voters with the given order as their preferences. :rtype: dict """ mixture = [] dispersions = [] references = [] for i in range(nbReferences): references.append(tuple(generateICStrictProfile(1, alternatives))[0]) dispersions.append(round(np.random.rand(), 5)) mixture.append(np.random.randint(1, 101)) sumMixture = sum(mixture) mixture = [float(i) / float(sumMixture) for i in mixture] return mallowsModel(nbVoters, len(alternatives), mixture, dispersions, references) def urnModel(nbVoters, replace, alternatives): """ Generates a profile following the urn model. :param nbVoters: Number of orders to sample. :type nbVoters: int :param replace: The number of replacements for the urn model. :type replace: int :param alternatives: List of alternatives. :type alternatives: list of int :return: A vote map, i.e., a dictionary whose keys are orders, mapping to the number of voters with the given order as their preferences. :rtype: dict """ voteMap = {} ReplaceVotes = {} ICsize = np.math.factorial(len(alternatives)) ReplaceSize = 0 for x in range(nbVoters): flip = np.random.randint(1, ICsize + ReplaceSize + 1) if flip <= ICsize: #generate an IC vote and make a suitable number of replacements... tvote = generateICVote(alternatives) voteMap[tvote] = (voteMap.get(tvote, 0) + 1) ReplaceVotes[tvote] = (ReplaceVotes.get(tvote, 0) + replace) ReplaceSize += replace #print("made " + str(tvote)) else: #iterate over replacement hash and select proper vote. flip = flip - ICsize for vote in ReplaceVotes.keys(): flip = flip - ReplaceVotes[vote] if flip <= 0: vote = tuple((alt,) for alt in vote) voteMap[vote] = (voteMap.get(vote, 0) + 1) ReplaceVotes[vote] = (ReplaceVotes.get(vote, 0) + replace) ReplaceSize += replace break else: print("We Have a problem... replace fell through....") exit() return voteMap def generateICVote(alternatives): """ Generates a strict order over the set of alternatives following the impartial culture. :param alternatives: List of alternatives. :type alternatives: list of int :return: A strict order over the alternatives, i.e., a tuple of tuples of size 1. :rtype: tuple """ options = list(alternatives) vote = [] while(len(options) > 0): #randomly select an option vote.append(options.pop(np.random.randint(0, len(options)))) return tuple((alt,) for alt in vote)
35.396739
106
0.716874
""" This module describes procedures to sample preferences for different probability distributions. """ import numpy as np def generateICStrictProfile(nbVoters, alternatives): """ Generates a profile following the impartial culture. :param nbVoters: Number of orders to sample. :type nbVoters: int :param alternatives: List of alternatives. :type alternatives: list of int :return: A vote map, i.e., a dictionary whose keys are orders, mapping to the number of voters with the given order as their preferences. :rtype: dict """ return urnModel(nbVoters, 0, alternatives) def generateICAnonymousStrictProfile(nbVoters, alternatives): """ Generates a profile following the anonymous impartial culture. :param nbVoters: Number of orders to sample. :type nbVoters: int :param alternatives: List of alternatives. :type alternatives: list of int :return: A vote map, i.e., a dictionary whose keys are orders, mapping to the number of voters with the given order as their preferences. :rtype: dict """ return urnModel(nbVoters, 1, alternatives) def mallowsModel(nbVoters, nbAlternatives, mixture, dispersions, references): """ Generates a profile following a mixture of Mallow's models. :param nbVoters: Number of orders to sample. :type nbVoters: int :param nbAlternatives: Number of alternatives for the sampled orders. :type nbAlternatives: int :param mixture: A list of the weights of each element of the mixture. :type replace: list of int :param dispersions: A list of the dispersion coefficient of each element of the mixture. :type dispersions: list of float :param references: A list of the reference orders for each element of the mixture. :type references: list of tuples of tuples of int :return: A vote map, i.e., a dictionary whose keys are orders, mapping to the number of voters with the given order as their preferences. :rtype: dict """ def mallowsInsertDistributions(nbAlternatives, phi): distributions = {} for i in range(1, nbAlternatives + 1): #Start with an empty distro of length i distribution = [0] * i #compute the denom = phi^0 + phi^1 + ... phi^(i-1) denominator = sum([pow(phi, k) for k in range(i)]) #Fill each element of the distro with phi^(i-j) / denominator for j in range(1, i+1): distribution[j-1] = pow(phi, i - j) / denominator distributions[i] = distribution return distributions if len(mixture) != len(dispersions) or len(mixture) != len(references): raise ValueError("Parameters of Mallows' mixture do not have the same length.") # We normalize the mixture so that it sums up to 1 if sum(mixture) != 1: mixture = [m / sum(mixture) for m in mixture] #Precompute the distros for each Phi. insertDistributions = [] for i in range(len(mixture)): insertDistributions.append(mallowsInsertDistributions(nbAlternatives, dispersions[i])) #Now, generate votes... votemap = {} for cvoter in range(nbVoters): cmodel = np.random.choice(range(len(mixture)), 1, p = mixture)[0] #Generate a vote for the selected model insertVector = [0] * nbAlternatives for i in range(1, len(insertVector) + 1): #options are 1...max insertVector[i - 1] = np.random.choice(range(1, i + 1), 1, p = insertDistributions[cmodel][i])[0] vote = [] for i in range(len(references[cmodel])): vote.insert(insertVector[i] - 1, references[cmodel][i]) tvote = tuple((alt,) for alt in vote) votemap[tvote] = votemap.get(tvote, 0) + 1 return votemap def mallowsMixture(nbVoters, nbReferences, alternatives): """ Generates a profile following a mixture of Mallow's models for which reference points and dispersion coefficients are independently and identically distributed. :param nbVoters: Number of orders to sample. :type nbVoters: int :param nbAlternatives: Number of alternatives for the sampled orders. :type nbAlternatives: int :param nbReferences: Number of element :type nbReferences: int :return: A vote map, i.e., a dictionary whose keys are orders, mapping to the number of voters with the given order as their preferences. :rtype: dict """ mixture = [] dispersions = [] references = [] for i in range(nbReferences): references.append(tuple(generateICStrictProfile(1, alternatives))[0]) dispersions.append(round(np.random.rand(), 5)) mixture.append(np.random.randint(1, 101)) sumMixture = sum(mixture) mixture = [float(i) / float(sumMixture) for i in mixture] return mallowsModel(nbVoters, len(alternatives), mixture, dispersions, references) def urnModel(nbVoters, replace, alternatives): """ Generates a profile following the urn model. :param nbVoters: Number of orders to sample. :type nbVoters: int :param replace: The number of replacements for the urn model. :type replace: int :param alternatives: List of alternatives. :type alternatives: list of int :return: A vote map, i.e., a dictionary whose keys are orders, mapping to the number of voters with the given order as their preferences. :rtype: dict """ voteMap = {} ReplaceVotes = {} ICsize = np.math.factorial(len(alternatives)) ReplaceSize = 0 for x in range(nbVoters): flip = np.random.randint(1, ICsize + ReplaceSize + 1) if flip <= ICsize: #generate an IC vote and make a suitable number of replacements... tvote = generateICVote(alternatives) voteMap[tvote] = (voteMap.get(tvote, 0) + 1) ReplaceVotes[tvote] = (ReplaceVotes.get(tvote, 0) + replace) ReplaceSize += replace #print("made " + str(tvote)) else: #iterate over replacement hash and select proper vote. flip = flip - ICsize for vote in ReplaceVotes.keys(): flip = flip - ReplaceVotes[vote] if flip <= 0: vote = tuple((alt,) for alt in vote) voteMap[vote] = (voteMap.get(vote, 0) + 1) ReplaceVotes[vote] = (ReplaceVotes.get(vote, 0) + replace) ReplaceSize += replace break else: print("We Have a problem... replace fell through....") exit() return voteMap def generateICVote(alternatives): """ Generates a strict order over the set of alternatives following the impartial culture. :param alternatives: List of alternatives. :type alternatives: list of int :return: A strict order over the alternatives, i.e., a tuple of tuples of size 1. :rtype: tuple """ options = list(alternatives) vote = [] while(len(options) > 0): #randomly select an option vote.append(options.pop(np.random.randint(0, len(options)))) return tuple((alt,) for alt in vote)
474
0
24
1a8e4c575c1b0238e1aaf8f4d51c772141dd949b
1,100
py
Python
module1-introduction-to-sql/assignment/buddymove_holidayiq.py
jonathanmendoza-tx/DS-Unit-3-Sprint-2-SQL-and-Databases
7976332b75b8c81b2dd06682c3be1acd390dbd8c
[ "MIT" ]
null
null
null
module1-introduction-to-sql/assignment/buddymove_holidayiq.py
jonathanmendoza-tx/DS-Unit-3-Sprint-2-SQL-and-Databases
7976332b75b8c81b2dd06682c3be1acd390dbd8c
[ "MIT" ]
null
null
null
module1-introduction-to-sql/assignment/buddymove_holidayiq.py
jonathanmendoza-tx/DS-Unit-3-Sprint-2-SQL-and-Databases
7976332b75b8c81b2dd06682c3be1acd390dbd8c
[ "MIT" ]
null
null
null
import sqlite3 import pandas as pd !wget https://raw.githubusercontent.com/jonathanmendoza-tx/DS-Unit-3-Sprint-2-SQL-and-Databases/master/module1-introduction-to-sql/buddymove_holidayiq.csv conn = sqlite3.connect('buddymove_holidayiq.sqlite3') cur = conn.cursor() df = pd.read_csv('/content/buddymove_holidayiq.csv', index_col= 'User Id') df.to_sql(name = 'review', con = conn) query_rows = """ SELECT COUNT(*) FROM review """ cur.execute(query_rows) total_people = cur.fetchall() print(f'There are a total of {total_people[0][0]} rows') query_nature_shopping = """ SELECT COUNT(*) FROM review WHERE Nature >= 100 AND Shopping >= 100 """ cur.execute(query_nature_shopping) nature_shop = cur.fetchall() print(f'There are {nature_shop[0][0]} people who reviewed nature and shopping at least 100 times') columns = ['Sports', 'Religious', 'Nature', 'Theatre', 'Shopping', 'Picnic'] for ii in range(len(columns)): query = """ SELECT AVG(%s) FROM review """ cur.execute(query %columns[ii]) avg = cur.fetchall() print(f'Average number of reviews for {columns[ii]} is {avg[0][0]}')
25.581395
154
0.719091
import sqlite3 import pandas as pd !wget https://raw.githubusercontent.com/jonathanmendoza-tx/DS-Unit-3-Sprint-2-SQL-and-Databases/master/module1-introduction-to-sql/buddymove_holidayiq.csv conn = sqlite3.connect('buddymove_holidayiq.sqlite3') cur = conn.cursor() df = pd.read_csv('/content/buddymove_holidayiq.csv', index_col= 'User Id') df.to_sql(name = 'review', con = conn) query_rows = """ SELECT COUNT(*) FROM review """ cur.execute(query_rows) total_people = cur.fetchall() print(f'There are a total of {total_people[0][0]} rows') query_nature_shopping = """ SELECT COUNT(*) FROM review WHERE Nature >= 100 AND Shopping >= 100 """ cur.execute(query_nature_shopping) nature_shop = cur.fetchall() print(f'There are {nature_shop[0][0]} people who reviewed nature and shopping at least 100 times') columns = ['Sports', 'Religious', 'Nature', 'Theatre', 'Shopping', 'Picnic'] for ii in range(len(columns)): query = """ SELECT AVG(%s) FROM review """ cur.execute(query %columns[ii]) avg = cur.fetchall() print(f'Average number of reviews for {columns[ii]} is {avg[0][0]}')
0
0
0
95e7082386e5418aa14fda8e81fb4e55fd79a141
54
py
Python
quart_rapidoc/version.py
marirs/quart-rapidoc
fd86604ee5ffea7bd33b08af537472f0df21e8c8
[ "MIT" ]
1
2020-07-06T17:11:02.000Z
2020-07-06T17:11:02.000Z
quart_rapidoc/version.py
marirs/quart-rapidoc
fd86604ee5ffea7bd33b08af537472f0df21e8c8
[ "MIT" ]
null
null
null
quart_rapidoc/version.py
marirs/quart-rapidoc
fd86604ee5ffea7bd33b08af537472f0df21e8c8
[ "MIT" ]
null
null
null
"""quart_redoc version file.""" __version__ = "0.5.1"
18
31
0.666667
"""quart_redoc version file.""" __version__ = "0.5.1"
0
0
0
e2e6c98c7ef19d0a3e295150199aeb3a5229e053
48,085
py
Python
cli/cyberPanel.py
uzairAK/serverom-panel
3dcde05ad618e6bef280db7d3180f926fe2ab1db
[ "MIT" ]
null
null
null
cli/cyberPanel.py
uzairAK/serverom-panel
3dcde05ad618e6bef280db7d3180f926fe2ab1db
[ "MIT" ]
null
null
null
cli/cyberPanel.py
uzairAK/serverom-panel
3dcde05ad618e6bef280db7d3180f926fe2ab1db
[ "MIT" ]
null
null
null
#!/usr/local/CyberCP/bin/python import os,sys sys.path.append('/usr/local/CyberCP') import django os.environ.setdefault("DJANGO_SETTINGS_MODULE", "CyberCP.settings") django.setup() from inspect import stack from cli.cliLogger import cliLogger as logger import json from plogical.virtualHostUtilities import virtualHostUtilities import re from websiteFunctions.models import Websites, ChildDomains from plogical.dnsUtilities import DNS import time import plogical.backupUtilities as backupUtilities import requests from loginSystem.models import Administrator from packages.models import Package from plogical.mysqlUtilities import mysqlUtilities from cli.cliParser import cliParser from plogical.vhost import vhost from plogical.mailUtilities import mailUtilities from plogical.ftpUtilities import FTPUtilities from plogical.sslUtilities import sslUtilities from plogical.processUtilities import ProcessUtilities from plogical.backupSchedule import backupSchedule # All that we see or seem is but a dream within a dream. ## Website Functions ## DNS Functions ## Backup Functions ## Packages ## Database functions ## Email functions ## FTP Functions ## FTP Functions # FTP Functions ## FTP Functions ## SSL Functions if __name__ == "__main__": main()
36.181339
214
0.58434
#!/usr/local/CyberCP/bin/python import os,sys sys.path.append('/usr/local/CyberCP') import django os.environ.setdefault("DJANGO_SETTINGS_MODULE", "CyberCP.settings") django.setup() from inspect import stack from cli.cliLogger import cliLogger as logger import json from plogical.virtualHostUtilities import virtualHostUtilities import re from websiteFunctions.models import Websites, ChildDomains from plogical.dnsUtilities import DNS import time import plogical.backupUtilities as backupUtilities import requests from loginSystem.models import Administrator from packages.models import Package from plogical.mysqlUtilities import mysqlUtilities from cli.cliParser import cliParser from plogical.vhost import vhost from plogical.mailUtilities import mailUtilities from plogical.ftpUtilities import FTPUtilities from plogical.sslUtilities import sslUtilities from plogical.processUtilities import ProcessUtilities from plogical.backupSchedule import backupSchedule # All that we see or seem is but a dream within a dream. class cyberPanel: def printStatus(self, operationStatus, errorMessage): data = json.dumps({'success': operationStatus, 'errorMessage': errorMessage }) print(data) ## Website Functions def createWebsite(self, package, owner, domainName, email, php, ssl, dkim, openBasedir): try: externalApp = "".join(re.findall("[a-zA-Z]+", domainName))[:7] phpSelection = 'PHP ' + php result = virtualHostUtilities.createVirtualHost(domainName, email, phpSelection, externalApp, ssl, dkim, openBasedir, owner, package, 0) if result[0] == 1: self.printStatus(1,'None') else: self.printStatus(0, result[1]) except BaseException as msg: logger.writeforCLI(str(msg), "Error", stack()[0][3]) self.printStatus(0, str(msg)) def createDomain(self, masterDomain, domainName, owner, php, ssl, dkim, openBasedir): try: path = '/home/' + masterDomain + '/public_html/' + domainName phpSelection = 'PHP ' + php result = virtualHostUtilities.createDomain(masterDomain, domainName, phpSelection, path, ssl, dkim, openBasedir, owner, 0) if result[0] == 1: self.printStatus(1,'None') else: self.printStatus(0, result[1]) except BaseException as msg: logger.writeforCLI(str(msg), "Error", stack()[0][3]) self.printStatus(0, str(msg)) def deleteWebsite(self, domainName): try: vhost.deleteVirtualHostConfigurations(domainName) self.printStatus(1, 'None') except BaseException as msg: logger.writeforCLI(str(msg), "Error", stack()[0][3]) print(0) def deleteChild(self, childDomain): try: result = virtualHostUtilities.deleteDomain(childDomain) if result[0] == 1: self.printStatus(1,'None') else: self.printStatus(0, result[1]) except BaseException as msg: logger.writeforCLI(str(msg), "Error", stack()[0][3]) print(0) def listWebsitesJson(self): try: websites = Websites.objects.all() ipFile = "/etc/cyberpanel/machineIP" f = open(ipFile) ipData = f.read() ipAddress = ipData.split('\n', 1)[0] json_data = "[" checker = 0 for items in websites: if items.state == 0: state = "Suspended" else: state = "Active" dic = {'domain': items.domain, 'adminEmail': items.adminEmail,'ipAddress':ipAddress,'admin': items.admin.userName,'package': items.package.packageName,'state':state} if checker == 0: json_data = json_data + json.dumps(dic) checker = 1 else: json_data = json_data +',' + json.dumps(dic) json_data = json_data + ']' final_json = json.dumps(json_data) print(final_json) except BaseException as msg: logger.writeforCLI(str(msg), "Error", stack()[0][3]) print(0) def listWebsitesPretty(self): try: from prettytable import PrettyTable websites = Websites.objects.all() ipFile = "/etc/cyberpanel/machineIP" f = open(ipFile) ipData = f.read() ipAddress = ipData.split('\n', 1)[0] table = PrettyTable(['ID','Domain', 'IP Address', 'Package', 'Owner', 'State', 'Email']) for items in websites: if items.state == 0: state = "Suspended" else: state = "Active" table.add_row([items.id, items.domain, ipAddress, items.package.packageName, items.admin.userName, state, items.adminEmail]) print(table) except BaseException as msg: logger.writeforCLI(str(msg), "Error", stack()[0][3]) print(0) def changePHP(self, virtualHostName, phpVersion): try: phpVersion = 'PHP ' + phpVersion confPath = virtualHostUtilities.Server_root + "/conf/vhosts/" + virtualHostName completePathToConfigFile = confPath + "/vhost.conf" result = vhost.changePHP(completePathToConfigFile, phpVersion) if result[0] == 1: self.printStatus(1,'None') else: self.printStatus(0, result[1]) except BaseException as msg: logger.writeforCLI(str(msg), "Error", stack()[0][3]) self.printStatus(0, str(msg)) def changePackage(self, virtualHostName, packageName): try: if Websites.objects.filter(domain=virtualHostName).count() == 0: self.printStatus(0, 'This website does not exists.') if Package.objects.filter(packageName=packageName).count() == 0: self.printStatus(0, 'This package does not exists.') website = Websites.objects.get(domain=virtualHostName) package = Package.objects.get(packageName=packageName) website.package = package website.save() self.printStatus(1, 'None') except BaseException as msg: logger.writeforCLI(str(msg), "Error", stack()[0][3]) self.printStatus(0, str(msg)) ## DNS Functions def listDNSJson(self, virtualHostName): try: records = DNS.getDNSRecords(virtualHostName) json_data = "[" checker = 0 for items in records: dic = {'id': items.id, 'type': items.type, 'name': items.name, 'content': items.content, 'priority': items.prio, 'ttl': items.ttl } if checker == 0: json_data = json_data + json.dumps(dic) checker = 1 else: json_data = json_data + ',' + json.dumps(dic) json_data = json_data + ']' final_json = json.dumps(json_data) print(final_json) except BaseException as msg: logger.writeforCLI(str(msg), "Error", stack()[0][3]) print(0) def listDNSPretty(self, virtualHostName): try: from prettytable import PrettyTable records = DNS.getDNSRecords(virtualHostName) table = PrettyTable(['ID', 'TYPE', 'Name', 'Value', 'Priority', 'TTL']) for items in records: if len(items.content) >= 30: content = items.content[0:30] + " ..." else: content = items.content table.add_row([items.id, items.type, items.name, content, items.prio, items.ttl]) print(table) except BaseException as msg: logger.writeforCLI(str(msg), "Error", stack()[0][3]) print(0) def listDNSZonesJson(self): try: records = DNS.getDNSZones() json_data = "[" checker = 0 for items in records: dic = {'id': items.id, 'name': items.name, } if checker == 0: json_data = json_data + json.dumps(dic) checker = 1 else: json_data = json_data + ',' + json.dumps(dic) json_data = json_data + ']' final_json = json.dumps(json_data) print(final_json) except BaseException as msg: logger.writeforCLI(str(msg), "Error", stack()[0][3]) print(0) def listDNSZonesPretty(self): try: from prettytable import PrettyTable records = records = DNS.getDNSZones() table = PrettyTable(['ID', 'Name']) for items in records: table.add_row([items.id, items.name]) print(table) except BaseException as msg: logger.writeforCLI(str(msg), "Error", stack()[0][3]) print(0) def createDNSZone(self, virtualHostName, owner): try: admin = Administrator.objects.get(userName=owner) DNS.dnsTemplate(virtualHostName, admin) self.printStatus(1, 'None') except BaseException as msg: logger.writeforCLI(str(msg), "Error", stack()[0][3]) self.printStatus(0, str(msg)) def createDNSRecord(self, virtualHostName, name, recordType, value, priority, ttl): try: zone = DNS.getZoneObject(virtualHostName) DNS.createDNSRecord(zone, name, recordType, value, int(priority), int(ttl)) self.printStatus(1, 'None') except BaseException as msg: logger.writeforCLI(str(msg), "Error", stack()[0][3]) self.printStatus(0, str(msg)) def deleteDNSZone(self, virtualHostName): try: DNS.deleteDNSZone(virtualHostName) self.printStatus(1, 'None') except BaseException as msg: logger.writeforCLI(str(msg), "Error", stack()[0][3]) self.printStatus(0, str(msg)) def deleteDNSRecord(self, recordID): try: DNS.deleteDNSRecord(recordID) self.printStatus(1, 'None') except BaseException as msg: logger.writeforCLI(str(msg), "Error", stack()[0][3]) self.printStatus(0, str(msg)) ## Backup Functions def createBackup(self, virtualHostName): try: backupLogPath = "/usr/local/lscp/logs/backup_log."+time.strftime("%I-%M-%S-%a-%b-%Y") print('Backup logs to be generated in %s' % (backupLogPath)) backupSchedule.createLocalBackup(virtualHostName, backupLogPath) except BaseException as msg: logger.writeforCLI(str(msg), "Error", stack()[0][3]) print(0) def restoreBackup(self, fileName): try: if os.path.exists('/home/backup/' + fileName): dir = "CyberPanelRestore" else: dir = 'CLI' backupUtilities.submitRestore(fileName, dir) while (1): time.sleep(1) finalData = json.dumps({'backupFile': fileName, "dir": dir}) r = requests.post("http://localhost:5003/backup/restoreStatus", data=finalData, verify=False) data = json.loads(r.text) if data['abort'] == 1 and data['running'] == "Error": print('Failed to restore backup, Error message : ' + data['status'] + '\n') break elif data['abort'] == 1 and data['running'] == "Completed": print('\n\n') print('Backup restore completed.\n') break else: print('Waiting for restore to complete. Current status: ' + data['status']) except BaseException as msg: logger.writeforCLI(str(msg), "Error", stack()[0][3]) print(0) ## Packages def createPackage(self, owner, packageName, diskSpace, bandwidth, emailAccounts, dataBases, ftpAccounts, allowedDomains): try: admin = Administrator.objects.get(userName=owner) newPack = Package(admin=admin, packageName=packageName, diskSpace=diskSpace, bandwidth=bandwidth, emailAccounts=emailAccounts, dataBases=dataBases, ftpAccounts=ftpAccounts, allowedDomains=allowedDomains) newPack.save() self.printStatus(1, 'None') except BaseException as msg: logger.writeforCLI(str(msg), "Error", stack()[0][3]) self.printStatus(0, str(msg)) def deletePackage(self, packageName): try: delPack = Package.objects.get(packageName=packageName) delPack.delete() self.printStatus(1, 'None') except BaseException as msg: logger.writeforCLI(str(msg), "Error", stack()[0][3]) self.printStatus(0, str(msg)) def listPackagesJson(self): try: records = Package.objects.all() json_data = "[" checker = 0 for items in records: dic = {'id': items.id, 'packageName': items.packageName, 'domains': items.allowedDomains, 'diskSpace': items.diskSpace, 'bandwidth': items.bandwidth, 'ftpAccounts ': items.ftpAccounts, 'dataBases': items.dataBases, 'emailAccounts':items.emailAccounts } if checker == 0: json_data = json_data + json.dumps(dic) checker = 1 else: json_data = json_data + ',' + json.dumps(dic) json_data = json_data + ']' final_json = json.dumps(json_data) print(final_json) except BaseException as msg: logger.writeforCLI(str(msg), "Error", stack()[0][3]) print(0) def listPackagesPretty(self): try: from prettytable import PrettyTable records = Package.objects.all() table = PrettyTable(['Name', 'Domains', 'Disk Space', 'Bandwidth', 'FTP Accounts', 'Databases', 'Email Accounts']) for items in records: table.add_row([items.packageName, items.allowedDomains, items.diskSpace, items.bandwidth, items.ftpAccounts, items.dataBases, items.emailAccounts]) print(table) except BaseException as msg: logger.writeforCLI(str(msg), "Error", stack()[0][3]) print(0) ## Database functions def createDatabase(self, dbName, dbUsername, dbPassword, databaseWebsite): try: result = mysqlUtilities.submitDBCreation(dbName, dbUsername, dbPassword, databaseWebsite) if result[0] == 1: self.printStatus(1, 'None') else: self.printStatus(1, result[1]) except BaseException as msg: logger.writeforCLI(str(msg), "Error", stack()[0][3]) self.printStatus(0, str(msg)) def deleteDatabase(self, dbName): try: result = mysqlUtilities.submitDBDeletion(dbName) if result[0] == 1: self.printStatus(1, 'None') else: self.printStatus(1, result[1]) except BaseException as msg: logger.writeforCLI(str(msg), "Error", stack()[0][3]) self.printStatus(0, str(msg)) def listDatabasesJson(self, virtualHostName): try: records = mysqlUtilities.getDatabases(virtualHostName) json_data = "[" checker = 0 for items in records: dic = {'id': items.id, 'dbName': items.dbName, 'dbUser': items.dbUser, } if checker == 0: json_data = json_data + json.dumps(dic) checker = 1 else: json_data = json_data + ',' + json.dumps(dic) json_data = json_data + ']' final_json = json.dumps(json_data) print(final_json) except BaseException as msg: logger.writeforCLI(str(msg), "Error", stack()[0][3]) print(0) def listDatabasesPretty(self, virtualHostName): try: from prettytable import PrettyTable records = mysqlUtilities.getDatabases(virtualHostName) table = PrettyTable(['ID', 'Database Name', 'Database User']) for items in records: table.add_row([items.id, items.dbName, items.dbUser]) print(table) except BaseException as msg: logger.writeforCLI(str(msg), "Error", stack()[0][3]) print(0) ## Email functions def createEmail(self, domain, userName, password): try: result = mailUtilities.createEmailAccount(domain, userName, password) if result[0] == 1: self.printStatus(1, 'None') else: self.printStatus(1, result[1]) except BaseException as msg: logger.writeforCLI(str(msg), "Error", stack()[0][3]) self.printStatus(0, str(msg)) def deleteEmail(self, email): try: result = mailUtilities.deleteEmailAccount(email) if result[0] == 1: self.printStatus(1, 'None') else: self.printStatus(1, result[1]) except BaseException as msg: logger.writeforCLI(str(msg), "Error", stack()[0][3]) self.printStatus(0, str(msg)) def changeEmailPassword(self, email, password): try: result = mailUtilities.changeEmailPassword(email, password) if result[0] == 1: self.printStatus(1, 'None') else: self.printStatus(1, result[1]) except BaseException as msg: logger.writeforCLI(str(msg), "Error", stack()[0][3]) self.printStatus(0, str(msg)) def listEmailsJson(self, virtualHostName): try: records = mailUtilities.getEmailAccounts(virtualHostName) json_data = "[" checker = 0 for items in records: dic = { 'email': items.email, } if checker == 0: json_data = json_data + json.dumps(dic) checker = 1 else: json_data = json_data + ',' + json.dumps(dic) json_data = json_data + ']' final_json = json.dumps(json_data) print(final_json) except BaseException as msg: logger.writeforCLI(str(msg), "Error", stack()[0][3]) print(0) def listEmailsPretty(self, virtualHostName): try: from prettytable import PrettyTable records = mailUtilities.getEmailAccounts(virtualHostName) table = PrettyTable(['Email']) for items in records: table.add_row([items.email]) print(table) except BaseException as msg: logger.writeforCLI(str(msg), "Error", stack()[0][3]) print(0) ## FTP Functions ## FTP Functions # FTP Functions def createFTPAccount(self, domain, userName, password, owner): try: result = FTPUtilities.submitFTPCreation(domain, userName, password, 'None', owner) if result[0] == 1: self.printStatus(1, 'None') else: self.printStatus(1, result[1]) except BaseException as msg: logger.writeforCLI(str(msg), "Error", stack()[0][3]) self.printStatus(0, str(msg)) def deleteFTPAccount(self, userName): try: result = FTPUtilities.submitFTPDeletion(userName) if result[0] == 1: self.printStatus(1, 'None') else: self.printStatus(1, result[1]) except BaseException as msg: logger.writeforCLI(str(msg), "Error", stack()[0][3]) self.printStatus(0, str(msg)) def changeFTPPassword(self, userName, password): try: result = FTPUtilities.changeFTPPassword(userName, password) if result[0] == 1: self.printStatus(1, 'None') else: self.printStatus(1, result[1]) except BaseException as msg: logger.writeforCLI(str(msg), "Error", stack()[0][3]) self.printStatus(0, str(msg)) def listFTPJson(self, virtualHostName): try: records = FTPUtilities.getFTPRecords(virtualHostName) json_data = "[" checker = 0 for items in records: dic = {'id': items.id, 'username': items.user, 'path': items.dir } if checker == 0: json_data = json_data + json.dumps(dic) checker = 1 else: json_data = json_data + ',' + json.dumps(dic) json_data = json_data + ']' final_json = json.dumps(json_data) print(final_json) except BaseException as msg: logger.writeforCLI(str(msg), "Error", stack()[0][3]) print(0) def listFTPPretty(self, virtualHostName): try: from prettytable import PrettyTable records = FTPUtilities.getFTPRecords(virtualHostName) table = PrettyTable(['ID', 'User', 'Path']) for items in records: table.add_row([items.id, items.user, items.dir]) print(table) except BaseException as msg: logger.writeforCLI(str(msg), "Error", stack()[0][3]) print(0) ## FTP Functions ## SSL Functions def issueSSL(self, virtualHost): try: path = '' adminEmail = '' try: website = ChildDomains.objects.get(domain=virtualHost) adminEmail = website.master.adminEmail path = website.path except: website = Websites.objects.get(domain=virtualHost) adminEmail = website.adminEmail path = "/home/" + virtualHost + "/public_html" result = virtualHostUtilities.issueSSL(virtualHost, path, adminEmail) if result[0] == 1: self.printStatus(1, 'None') else: self.printStatus(1, result[1]) except BaseException as msg: logger.writeforCLI(str(msg), "Error", stack()[0][3]) self.printStatus(0, str(msg)) def issueSSLForHostName(self, virtualHost): try: path = '' adminEmail = '' try: website = ChildDomains.objects.get(domain=virtualHost) adminEmail = website.master.adminEmail path = website.path except: website = Websites.objects.get(domain=virtualHost) adminEmail = website.adminEmail path = "/home/" + virtualHost + "/public_html" result = virtualHostUtilities.issueSSLForHostName(virtualHost, path) if result[0] == 1: self.printStatus(1, 'None') else: self.printStatus(1, result[1]) except BaseException as msg: logger.writeforCLI(str(msg), "Error", stack()[0][3]) self.printStatus(0, str(msg)) def issueSSLForMailServer(self, virtualHost): try: path = '' adminEmail = '' try: website = ChildDomains.objects.get(domain=virtualHost) adminEmail = website.master.adminEmail path = website.path except: website = Websites.objects.get(domain=virtualHost) adminEmail = website.adminEmail path = "/home/" + virtualHost + "/public_html" result = virtualHostUtilities.issueSSLForMailServer(virtualHost, path) if result[0] == 1: self.printStatus(1, 'None') else: self.printStatus(1, result[1]) except BaseException as msg: logger.writeforCLI(str(msg), "Error", stack()[0][3]) self.printStatus(0, str(msg)) def issueSelfSignedSSL(self, virtualHost): try: try: website = ChildDomains.objects.get(domain=virtualHost) adminEmail = website.master.adminEmail except: website = Websites.objects.get(domain=virtualHost) adminEmail = website.adminEmail pathToStoreSSL = "/etc/letsencrypt/live/" + virtualHost command = 'mkdir -p ' + pathToStoreSSL ProcessUtilities.executioner(command) pathToStoreSSLPrivKey = "/etc/letsencrypt/live/" + virtualHost + "/privkey.pem" pathToStoreSSLFullChain = "/etc/letsencrypt/live/" + virtualHost + "/fullchain.pem" command = 'openssl req -newkey rsa:2048 -new -nodes -x509 -days 3650 -subj "/C=US/ST=Denial/L=Springfield/O=Dis/CN=www.example.com" -keyout ' + pathToStoreSSLPrivKey + ' -out ' + pathToStoreSSLFullChain ProcessUtilities.executioner(command) sslUtilities.installSSLForDomain(virtualHost, adminEmail) ProcessUtilities.restartLitespeed() self.printStatus(1, 'None') except BaseException as msg: logger.writeforCLI(str(msg), "Error", stack()[0][3]) self.printStatus(0, str(msg)) def main(): parser = cliParser() args = parser.prepareArguments() cyberpanel = cyberPanel() ## Website functions if args.function == "createWebsite": completeCommandExample = 'cyberpanel createWebsite --package Detault --owner admin --domainName cyberpanel.net --email support@cyberpanel.net --php 5.6' if not args.package: print("\n\nPlease enter the package name. For example:\n\n" + completeCommandExample + "\n\n") return if not args.owner: print("\n\nPlease enter the owner name. For example:\n\n" + completeCommandExample + "\n\n") return if not args.domainName: print("\n\nPlease enter the domain name. For example:\n\n" + completeCommandExample + "\n\n") return if not args.email: print("\n\nPlease enter the email. For example:\n\n" + completeCommandExample + "\n\n") return if not args.php: print("\n\nPlease enter the PHP version such as 5.6 for PHP version 5.6. For example:\n\n" + completeCommandExample + "\n\n") return if args.ssl: ssl = int(args.ssl) else: ssl = 0 if args.dkim: dkim = int(args.dkim) else: dkim = 0 if args.openBasedir: openBasedir = int(args.openBasedir) else: openBasedir = 0 cyberpanel.createWebsite(args.package, args.owner, args.domainName, args.email, args.php, ssl, dkim, openBasedir) elif args.function == "deleteWebsite": completeCommandExample = 'cyberpanel deleteWebsite --domainName cyberpanel.net' if not args.domainName: print("\n\nPlease enter the domain to delete. For example:\n\n" + completeCommandExample + "\n\n") return cyberpanel.deleteWebsite(args.domainName) elif args.function == "createChild": completeCommandExample = 'cyberpanel createChild --masterDomain cyberpanel.net --childDomain child.cyberpanel.net' \ ' --owner admin --php 5.6' if not args.masterDomain: print("\n\nPlease enter Master domain. For example:\n\n" + completeCommandExample + "\n\n") return if not args.childDomain: print("\n\nPlease enter the Child Domain. For example:\n\n" + completeCommandExample + "\n\n") return if not args.owner: print("\n\nPlease enter owner for this domain DNS records. For example:\n\n" + completeCommandExample + "\n\n") return if not args.php: print("\n\nPlease enter required PHP version. For example:\n\n" + completeCommandExample + "\n\n") return if args.ssl: ssl = int(args.ssl) else: ssl = 0 if args.dkim: dkim = int(args.dkim) else: dkim = 0 if args.openBasedir: openBasedir = int(args.openBasedir) else: openBasedir = 0 cyberpanel.createDomain(args.masterDomain, args.childDomain, args.owner, args.php, ssl, dkim, openBasedir) elif args.function == "deleteChild": completeCommandExample = 'cyberpanel deleteChild --childDomain cyberpanel.net' if not args.childDomain: print("\n\nPlease enter the child domain to delete. For example:\n\n" + completeCommandExample + "\n\n") return cyberpanel.deleteChild(args.childDomain) elif args.function == "listWebsitesJson": cyberpanel.listWebsitesJson() elif args.function == "listWebsitesPretty": cyberpanel.listWebsitesPretty() elif args.function == "changePHP": completeCommandExample = 'cyberpanel changePHP --domainName cyberpanel.net --php 5.6' if not args.domainName: print("\n\nPlease enter Domain. For example:\n\n" + completeCommandExample + "\n\n") return if not args.php: print("\n\nPlease enter required PHP version. For example:\n\n" + completeCommandExample + "\n\n") return cyberpanel.changePHP(args.domainName, args.php) elif args.function == "changePackage": completeCommandExample = 'cyberpanel changePackage --domainName cyberpanel.net --packageName CLI' if not args.domainName: print("\n\nPlease enter the Domain. For example:\n\n" + completeCommandExample + "\n\n") return if not args.packageName: print("\n\nPlease enter the package name. For example:\n\n" + completeCommandExample + "\n\n") return cyberpanel.changePackage(args.domainName, args.packageName) ## DNS Functions elif args.function == "listDNSJson": completeCommandExample = 'cyberpanel listDNSJson --domainName cyberpanel.net' if not args.domainName: print("\n\nPlease enter the domain. For example:\n\n" + completeCommandExample + "\n\n") return cyberpanel.listDNSJson(args.domainName) elif args.function == "listDNSPretty": completeCommandExample = 'cyberpanel listDNSPretty --domainName cyberpanel.net' if not args.domainName: print("\n\nPlease enter the domain. For example:\n\n" + completeCommandExample + "\n\n") return cyberpanel.listDNSPretty(args.domainName) elif args.function == "listDNSZonesJson": cyberpanel.listDNSZonesJson() elif args.function == "listDNSZonesPretty": cyberpanel.listDNSZonesPretty() elif args.function == "createDNSZone": completeCommandExample = 'cyberpanel createDNSZone --owner admin --domainName cyberpanel.net' if not args.domainName: print("\n\nPlease enter the domain. For example:\n\n" + completeCommandExample + "\n\n") return if not args.owner: print("\n\nPlease enter the owner name. For example:\n\n" + completeCommandExample + "\n\n") return cyberpanel.createDNSZone(args.domainName, args.owner) elif args.function == "deleteDNSZone": completeCommandExample = 'cyberpanel deleteDNSZone --domainName cyberpanel.net' if not args.domainName: print("\n\nPlease enter the domain. For example:\n\n" + completeCommandExample + "\n\n") return cyberpanel.deleteDNSZone(args.domainName) elif args.function == "createDNSRecord": completeCommandExample = 'cyberpanel createDNSRecord --domainName cyberpanel.net --name cyberpanel.net' \ ' --recordType A --value 192.168.100.1 --priority 0 --ttl 3600' if not args.domainName: print("\n\nPlease enter the domain. For example:\n\n" + completeCommandExample + "\n\n") return if not args.name: print("\n\nPlease enter the record name. For example:\n\n" + completeCommandExample + "\n\n") return if not args.recordType: print("\n\nPlease enter the record type. For example:\n\n" + completeCommandExample + "\n\n") return if not args.value: print("\n\nPlease enter the record value. For example:\n\n" + completeCommandExample + "\n\n") return if not args.priority: print("\n\nPlease enter the priority. For example:\n\n" + completeCommandExample + "\n\n") return if not args.ttl: print("\n\nPlease enter the ttl. For example:\n\n" + completeCommandExample + "\n\n") return cyberpanel.createDNSRecord(args.domainName, args.name, args.recordType, args.value, args.priority, args.ttl) elif args.function == "deleteDNSRecord": completeCommandExample = 'cyberpanel deleteDNSRecord --recordID 200' if not args.recordID: print("\n\nPlease enter the record ID to be deleted, you can find record ID by listing the current DNS records. For example:\n\n" + completeCommandExample + "\n\n") return cyberpanel.deleteDNSRecord(args.recordID) ## Backup Functions. elif args.function == "createBackup": completeCommandExample = 'cyberpanel createBackup --domainName cyberpanel.net' if not args.domainName: print("\n\nPlease enter the domain. For example:\n\n" + completeCommandExample + "\n\n") return cyberpanel.createBackup(args.domainName) elif args.function == "restoreBackup": completeCommandExample = 'cyberpanel restoreBackup --fileName /home/talkshosting.com/backup/backup-talksho-01-30-53-Fri-Jun-2018.tar.gz' if not args.fileName: print("\n\nPlease enter the file name or complete path to file. For example:\n\n" + completeCommandExample + "\n\n") return cyberpanel.restoreBackup(args.fileName) ## Package functions. elif args.function == "createPackage": completeCommandExample = 'cyberpanel createPackage --owner admin --packageName CLI --diskSpace 1000 --bandwidth 10000 --emailAccounts 100' \ ' --dataBases 100 --ftpAccounts 100 --allowedDomains 100' if not args.owner: print("\n\nPlease enter the owner name. For example:\n\n" + completeCommandExample + "\n\n") return if not args.packageName: print("\n\nPlease enter the package name. For example:\n\n" + completeCommandExample + "\n\n") return if not args.diskSpace: print("\n\nPlease enter value for Disk Space. For example:\n\n" + completeCommandExample + "\n\n") return if not args.bandwidth: print("\n\nPlease enter value for Bandwidth. For example:\n\n" + completeCommandExample + "\n\n") return if not args.emailAccounts: print("\n\nPlease enter value for Email accounts. For example:\n\n" + completeCommandExample + "\n\n") return if not args.dataBases: print("\n\nPlease enter value for Databases. For example:\n\n" + completeCommandExample + "\n\n") return if not args.ftpAccounts: print("\n\nPlease enter value for Ftp accounts. For example:\n\n" + completeCommandExample + "\n\n") return if not args.allowedDomains: print("\n\nPlease enter value for Allowed Child Domains. For example:\n\n" + completeCommandExample + "\n\n") return cyberpanel.createPackage(args.owner, args.packageName, args.diskSpace, args.bandwidth, args.emailAccounts, args.dataBases, args.ftpAccounts, args.allowedDomains) elif args.function == "deletePackage": completeCommandExample = 'cyberpanel deletePackage --packageName CLI' if not args.packageName: print("\n\nPlease enter the package name. For example:\n\n" + completeCommandExample + "\n\n") return cyberpanel.deletePackage(args.packageName) elif args.function == "listPackagesJson": cyberpanel.listPackagesJson() elif args.function == "listPackagesPretty": cyberpanel.listPackagesPretty() ## Database functions. elif args.function == "createDatabase": completeCommandExample = 'cyberpanel createDatabase --databaseWebsite cyberpanel.net --dbName cyberpanel ' \ '--dbUsername cyberpanel --dbPassword cyberpanel' if not args.databaseWebsite: print("\n\nPlease enter database website. For example:\n\n" + completeCommandExample + "\n\n") return if not args.dbName: print("\n\nPlease enter the database name. For example:\n\n" + completeCommandExample + "\n\n") return if not args.dbUsername: print("\n\nPlease enter the database username. For example:\n\n" + completeCommandExample + "\n\n") return if not args.dbPassword: print("\n\nPlease enter the password for database. For example:\n\n" + completeCommandExample + "\n\n") return cyberpanel.createDatabase(args.dbName, args.dbUsername, args.dbPassword, args.databaseWebsite) elif args.function == "deleteDatabase": completeCommandExample = 'cyberpanel deleteDatabase --dbName cyberpanel' if not args.dbName: print("\n\nPlease enter the database name. For example:\n\n" + completeCommandExample + "\n\n") return cyberpanel.deleteDatabase(args.dbName) elif args.function == "listDatabasesJson": completeCommandExample = 'cyberpanel listDatabasesJson --databaseWebsite cyberpanel.net' if not args.databaseWebsite: print("\n\nPlease enter database website. For example:\n\n" + completeCommandExample + "\n\n") return cyberpanel.listDatabasesJson(args.databaseWebsite) elif args.function == "listDatabasesPretty": completeCommandExample = 'cyberpanel listDatabasesPretty --databaseWebsite cyberpanel.net' if not args.databaseWebsite: print("\n\nPlease enter database website. For example:\n\n" + completeCommandExample + "\n\n") return cyberpanel.listDatabasesPretty(args.databaseWebsite) ## Email Functions elif args.function == "createEmail": completeCommandExample = 'cyberpanel createEmail --domainName cyberpanel.net --userName cyberpanel ' \ '--password cyberpanel' if not args.domainName: print("\n\nPlease enter Domain name. For example:\n\n" + completeCommandExample + "\n\n") return if not args.userName: print("\n\nPlease enter the user name. For example:\n\n" + completeCommandExample + "\n\n") return if not args.password: print("\n\nPlease enter the password for database. For example:\n\n" + completeCommandExample + "\n\n") return cyberpanel.createEmail(args.domainName, args.userName, args.password) elif args.function == "deleteEmail": completeCommandExample = 'cyberpanel deleteEmail --email cyberpanel@cyberpanel.net' if not args.email: print("\n\nPlease enter the email. For example:\n\n" + completeCommandExample + "\n\n") return cyberpanel.deleteEmail(args.email) elif args.function == "changeEmailPassword": completeCommandExample = 'cyberpanel changeEmailPassword --email cyberpanel@cyberpanel.net --password cyberpanel' if not args.email: print("\n\nPlease enter email. For example:\n\n" + completeCommandExample + "\n\n") return if not args.password: print("\n\nPlease enter the password. For example:\n\n" + completeCommandExample + "\n\n") return cyberpanel.changeEmailPassword(args.email, args.password) elif args.function == "listEmailsJson": completeCommandExample = 'cyberpanel listEmailsJson --domainName cyberpanel.net' if not args.domainName: print("\n\nPlease enter domain name. For example:\n\n" + completeCommandExample + "\n\n") return cyberpanel.listEmailsJson(args.domainName) elif args.function == "listEmailsPretty": completeCommandExample = 'cyberpanel listEmailsPretty --domainName cyberpanel.net' if not args.domainName: print("\n\nPlease enter domain name. For example:\n\n" + completeCommandExample + "\n\n") return cyberpanel.listEmailsPretty(args.domainName) ## FTP Functions elif args.function == "createFTPAccount": completeCommandExample = 'cyberpanel createFTPAccount --domainName cyberpanel.net --userName cyberpanel ' \ '--password cyberpanel --owner admin' if not args.domainName: print("\n\nPlease enter Domain name. For example:\n\n" + completeCommandExample + "\n\n") return if not args.userName: print("\n\nPlease enter the user name. For example:\n\n" + completeCommandExample + "\n\n") return if not args.password: print("\n\nPlease enter the password for database. For example:\n\n" + completeCommandExample + "\n\n") return if not args.owner: print("\n\nPlease enter the owner name. For example:\n\n" + completeCommandExample + "\n\n") return cyberpanel.createFTPAccount(args.domainName, args.userName, args.password, args.owner) elif args.function == "deleteFTPAccount": completeCommandExample = 'cyberpanel deleteFTPAccount --userName cyberpanel' if not args.userName: print("\n\nPlease enter the user name. For example:\n\n" + completeCommandExample + "\n\n") return cyberpanel.deleteFTPAccount(args.userName) elif args.function == "changeFTPPassword": completeCommandExample = 'cyberpanel changeFTPPassword --userName cyberpanel --password cyberpanel' if not args.userName: print("\n\nPlease enter the user name. For example:\n\n" + completeCommandExample + "\n\n") return if not args.password: print("\n\nPlease enter the password for database. For example:\n\n" + completeCommandExample + "\n\n") return cyberpanel.changeFTPPassword(args.userName, args.password) elif args.function == "listFTPJson": completeCommandExample = 'cyberpanel listFTPJson --domainName cyberpanel.net' if not args.domainName: print("\n\nPlease enter domain name. For example:\n\n" + completeCommandExample + "\n\n") return cyberpanel.listFTPJson(args.domainName) elif args.function == "listFTPPretty": completeCommandExample = 'cyberpanel listFTPPretty --domainName cyberpanel.net' if not args.domainName: print("\n\nPlease enter domain name. For example:\n\n" + completeCommandExample + "\n\n") return cyberpanel.listFTPPretty(args.domainName) ## SSL Functions elif args.function == "issueSSL": completeCommandExample = 'cyberpanel issueSSL --domainName cyberpanel.net' if not args.domainName: print("\n\nPlease enter Domain name. For example:\n\n" + completeCommandExample + "\n\n") return cyberpanel.issueSSL(args.domainName) elif args.function == "hostNameSSL": completeCommandExample = 'cyberpanel hostNameSSL --domainName cyberpanel.net' if not args.domainName: print("\n\nPlease enter Domain name. For example:\n\n" + completeCommandExample + "\n\n") return cyberpanel.issueSSLForHostName(args.domainName) elif args.function == "mailServerSSL": completeCommandExample = 'cyberpanel mailServerSSL --domainName cyberpanel.net' if not args.domainName: print("\n\nPlease enter Domain name. For example:\n\n" + completeCommandExample + "\n\n") return cyberpanel.issueSSLForMailServer(args.domainName) elif args.function == "issueSelfSignedSSL": completeCommandExample = 'cyberpanel issueSelfSignedSSL --domainName cyberpanel.net' if not args.domainName: print("\n\nPlease enter Domain name. For example:\n\n" + completeCommandExample + "\n\n") return cyberpanel.issueSelfSignedSSL(args.domainName) elif args.function == 'utility': if not os.path.exists('/usr/bin/cyberpanel_utility'): command = 'wget -q -O /usr/bin/cyberpanel_utility https://cyberpanel.sh/misc/cyberpanel_utility.sh' ProcessUtilities.executioner(command) command = 'chmod 700 /usr/bin/cyberpanel_utility' ProcessUtilities.executioner(command) command = '/usr/bin/cyberpanel_utility' ProcessUtilities.executioner(command) elif args.function == 'upgrade' or args.function == 'update': if not os.path.exists('/usr/bin/cyberpanel_utility'): command = 'wget -q -O /usr/bin/cyberpanel_utility https://cyberpanel.sh/misc/cyberpanel_utility.sh' ProcessUtilities.executioner(command) command = 'chmod 700 /usr/bin/cyberpanel_utility' ProcessUtilities.executioner(command) command = '/usr/bin/cyberpanel_utility --upgrade' ProcessUtilities.executioner(command) elif args.function == 'help': if not os.path.exists('/usr/bin/cyberpanel_utility'): command = 'wget -q -O /usr/bin/cyberpanel_utility https://cyberpanel.sh/misc/cyberpanel_utility.sh' ProcessUtilities.executioner(command) command = 'chmod 700 /usr/bin/cyberpanel_utility' ProcessUtilities.executioner(command) command = '/usr/bin/cyberpanel_utility --help' ProcessUtilities.executioner(command) elif args.function == 'version' or args.function == 'v' or args.function == 'V': ## Get CurrentVersion with open('/usr/local/CyberCP/version.txt') as file: file_contents = file.read() version = re.search('\d.\d', file_contents) version = version.group() build = file_contents[-2:] build = build[0:1] currentversion = version + '.' + build print (currentversion) if __name__ == "__main__": main()
45,619
-4
1,153
ca330354912944efbfce5a46ab08e1b882f586b6
904
py
Python
SkipgramSampler/fixedarray.py
InzamamRahaman/SkipgramSampling
16a4bbab0e85ceb46191f074b61e639d9b408dc8
[ "MIT" ]
1
2017-10-13T00:37:25.000Z
2017-10-13T00:37:25.000Z
SkipgramSampler/fixedarray.py
InzamamRahaman/SkipgramSampling
16a4bbab0e85ceb46191f074b61e639d9b408dc8
[ "MIT" ]
6
2017-10-13T04:43:02.000Z
2017-10-14T20:33:39.000Z
SkipgramSampler/fixedarray.py
InzamamRahaman/SkipgramSampling
16a4bbab0e85ceb46191f074b61e639d9b408dc8
[ "MIT" ]
null
null
null
from collections import deque class FixedArray(object): """ Object acts a queue of fixed length """
22.6
42
0.551991
from collections import deque class FixedArray(object): """ Object acts a queue of fixed length """ def __init__(self, limit): self.count = 0 self.limit = limit self.contents = deque() def append(self, x, callback=None): if self.count < self.limit: self.contents.append(x) self.count += 1 else: elem = self.contents.popleft() if callback: callback(elem, x, self) self.contents.append(x) self.count += 1 def __contains__(self, item): item in self.contents def __len__(self): return self.count def get_distinct_elements(self): return set(self.contents) def empty_contents(self): for i in range(self.count): print('Pop') self.contents.popleft() print(self.contents)
627
0
162
3fb46c294a5360bb8f1d5b3c59d0fde197c8961d
4,380
py
Python
setup.py
Tobi-Alonso/ResNet50-PYNQ
7c203c2b249479c5384afe152dde2bb06576339b
[ "BSD-3-Clause" ]
null
null
null
setup.py
Tobi-Alonso/ResNet50-PYNQ
7c203c2b249479c5384afe152dde2bb06576339b
[ "BSD-3-Clause" ]
null
null
null
setup.py
Tobi-Alonso/ResNet50-PYNQ
7c203c2b249479c5384afe152dde2bb06576339b
[ "BSD-3-Clause" ]
1
2020-03-27T18:20:47.000Z
2020-03-27T18:20:47.000Z
# Copyright (c) 2019, Xilinx # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from setuptools import setup, find_packages from distutils.dir_util import copy_tree import os from pynq.utils import build_py as _build_py __author__ = "Lucian Petrica" __copyright__ = "Copyright 2019, Xilinx" # global variables module_name = "resnet50_pynq" data_files = [] class build_py(_build_py): """Overload the pynq.utils 'build_py' command (that performs overlay download) to also call the function 'copy_notebooks'. """ with open("README.md", encoding="utf-8") as fh: readme_lines = fh.readlines() readme_lines = readme_lines[ readme_lines.index("## PYNQ quick start\n") + 2: readme_lines.index("## Author\n"): ] long_description = ("".join(readme_lines)) extend_package(os.path.join(module_name, "notebooks")) setup(name=module_name, version="1.0", description="Quantized dataflow implementation of ResNet50 on Alveo", long_description=long_description, long_description_content_type="text/markdown", author="Lucian Petrica", url="https://github.com/Xilinx/ResNet50-PYNQ", packages=find_packages(), download_url="https://github.com/Xilinx/ResNet50-PYNQ", package_data={ "": data_files, }, python_requires=">=3.5.2", # keeping 'setup_requires' only for readability - relying on # pyproject.toml and PEP 517/518 setup_requires=[ "pynq>=2.5.1" ], install_requires=[ "pynq>=2.5.1", "jupyter", "jupyterlab", "plotly", "opencv-python", "wget" ], extras_require={ ':python_version<"3.6"': [ 'matplotlib<3.1', 'ipython==7.9' ], ':python_version>="3.6"': [ 'matplotlib' ] }, entry_points={ "pynq.notebooks": [ "ResNet50 = {}.notebooks".format(module_name) ] }, cmdclass={"build_py": build_py}, license="BSD 3-Clause" )
35.04
81
0.656164
# Copyright (c) 2019, Xilinx # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from setuptools import setup, find_packages from distutils.dir_util import copy_tree import os from pynq.utils import build_py as _build_py __author__ = "Lucian Petrica" __copyright__ = "Copyright 2019, Xilinx" # global variables module_name = "resnet50_pynq" data_files = [] def extend_package(path): if os.path.isdir(path): data_files.extend( [os.path.join("..", root, f) for root, _, files in os.walk(path) for f in files] ) elif os.path.isfile(path): data_files.append(os.path.join("..", path)) class build_py(_build_py): """Overload the pynq.utils 'build_py' command (that performs overlay download) to also call the function 'copy_notebooks'. """ def copy_notebooks(self): cmd = self.get_finalized_command("build_py") for package, src_dir, build_dir, _ in cmd.data_files: if "." not in package: # sub-packages are skipped src_folder = os.path.join(os.path.dirname(src_dir), "host") dst_folder = os.path.join(build_dir, "notebooks") if os.path.isdir(src_folder): copy_tree(src_folder, dst_folder) def run(self): super().run() self.copy_notebooks() with open("README.md", encoding="utf-8") as fh: readme_lines = fh.readlines() readme_lines = readme_lines[ readme_lines.index("## PYNQ quick start\n") + 2: readme_lines.index("## Author\n"): ] long_description = ("".join(readme_lines)) extend_package(os.path.join(module_name, "notebooks")) setup(name=module_name, version="1.0", description="Quantized dataflow implementation of ResNet50 on Alveo", long_description=long_description, long_description_content_type="text/markdown", author="Lucian Petrica", url="https://github.com/Xilinx/ResNet50-PYNQ", packages=find_packages(), download_url="https://github.com/Xilinx/ResNet50-PYNQ", package_data={ "": data_files, }, python_requires=">=3.5.2", # keeping 'setup_requires' only for readability - relying on # pyproject.toml and PEP 517/518 setup_requires=[ "pynq>=2.5.1" ], install_requires=[ "pynq>=2.5.1", "jupyter", "jupyterlab", "plotly", "opencv-python", "wget" ], extras_require={ ':python_version<"3.6"': [ 'matplotlib<3.1', 'ipython==7.9' ], ':python_version>="3.6"': [ 'matplotlib' ] }, entry_points={ "pynq.notebooks": [ "ResNet50 = {}.notebooks".format(module_name) ] }, cmdclass={"build_py": build_py}, license="BSD 3-Clause" )
727
0
76
c04249abf3a5ebc265209326af85d9f62c50c23b
3,052
py
Python
testing/python_lib/test_faucet_state_collector.py
pbatta/forch
df033bc5b7cbac06e1c406257193cb0cb62f2742
[ "Apache-2.0" ]
1
2019-12-12T23:13:24.000Z
2019-12-12T23:13:24.000Z
testing/python_lib/test_faucet_state_collector.py
pbatta/forch
df033bc5b7cbac06e1c406257193cb0cb62f2742
[ "Apache-2.0" ]
92
2019-12-13T03:30:35.000Z
2021-11-11T16:16:13.000Z
testing/python_lib/test_faucet_state_collector.py
pbatta/forch
df033bc5b7cbac06e1c406257193cb0cb62f2742
[ "Apache-2.0" ]
7
2020-01-11T14:12:46.000Z
2021-01-25T17:30:55.000Z
"""Unit tests for Faucet State Collector""" import unittest from unit_base import FaucetStateCollectorTestBase from forch.proto.faucet_event_pb2 import StackTopoChange from forch.utils import dict_proto class DataplaneStateTestCase(FaucetStateCollectorTestBase): """Test cases for dataplane state""" def test_topology_loop(self): """test faucet_state_collector behavior when faucet sends loop in path to egress topology""" self._faucet_state_collector.topo_state = self._build_loop_topo_obj() egress_path = self._faucet_state_collector.get_switch_egress_path('sw1') self.assertEqual(egress_path['path_state'], 1) self.assertEqual(egress_path['path_state_detail'], 'No path to root found. Loop in topology.') def test_egress_path(self): """test faucet_state_collector behavior when faucet sends loop in path to egress topology""" self._faucet_state_collector.topo_state = self._build_topo_obj() # pylint: disable=protected-access self._faucet_state_collector._get_egress_port = lambda port: 28 egress_path = self._faucet_state_collector.get_switch_egress_path('sw3') self.assertEqual(egress_path['path_state'], 5) self.assertEqual(egress_path['path'], [{'switch': 'sw3', 'out': 1}, {'switch': 'sw1', 'in': 2, 'out': 28}]) if __name__ == '__main__': unittest.main()
37.219512
100
0.582241
"""Unit tests for Faucet State Collector""" import unittest from unit_base import FaucetStateCollectorTestBase from forch.proto.faucet_event_pb2 import StackTopoChange from forch.utils import dict_proto class DataplaneStateTestCase(FaucetStateCollectorTestBase): """Test cases for dataplane state""" def _build_link(self, dp1, port1, dp2, port2): return { 'key': dp1 + ':' + port1 + '-' + dp2 + ':' + port2, 'source': dp1, 'target': dp2, 'port_map': { 'dp_a': dp1, 'port_a': 'Port ' + port1, 'dp_z': dp2, 'port_z': 'Port ' + port2 } } def _build_loop_topo_obj(self): dps = { 'sw1': StackTopoChange.StackDp(root_hop_port=1), 'sw2': StackTopoChange.StackDp(root_hop_port=1), 'sw3': StackTopoChange.StackDp(root_hop_port=1), } links = [ self._build_link('sw1', '1', 'sw2', '2'), self._build_link('sw2', '1', 'sw3', '2'), self._build_link('sw3', '1', 'sw1', '2'), ] links_graph = [dict_proto(link, StackTopoChange.StackLink) for link in links] return { 'dps': dps, 'links_graph': links_graph } def _build_topo_obj(self): dps = { 'sw1': StackTopoChange.StackDp(), 'sw2': StackTopoChange.StackDp(root_hop_port=1), 'sw3': StackTopoChange.StackDp(root_hop_port=1), } links = [ self._build_link('sw1', '1', 'sw2', '1'), self._build_link('sw2', '2', 'sw3', '2'), self._build_link('sw3', '1', 'sw1', '2'), ] links_graph = [dict_proto(link, StackTopoChange.StackLink) for link in links] return { 'active_root': 'sw1', 'dps': dps, 'links_graph': links_graph } def test_topology_loop(self): """test faucet_state_collector behavior when faucet sends loop in path to egress topology""" self._faucet_state_collector.topo_state = self._build_loop_topo_obj() egress_path = self._faucet_state_collector.get_switch_egress_path('sw1') self.assertEqual(egress_path['path_state'], 1) self.assertEqual(egress_path['path_state_detail'], 'No path to root found. Loop in topology.') def test_egress_path(self): """test faucet_state_collector behavior when faucet sends loop in path to egress topology""" self._faucet_state_collector.topo_state = self._build_topo_obj() # pylint: disable=protected-access self._faucet_state_collector._get_egress_port = lambda port: 28 egress_path = self._faucet_state_collector.get_switch_egress_path('sw3') self.assertEqual(egress_path['path_state'], 5) self.assertEqual(egress_path['path'], [{'switch': 'sw3', 'out': 1}, {'switch': 'sw1', 'in': 2, 'out': 28}]) if __name__ == '__main__': unittest.main()
1,537
0
81
3c98d96e351e9f0cf0c5d2fb68fa0eae5f624451
2,344
py
Python
tests/python/gaia-ui-tests/gaiatest/apps/persona/app.py
pdehaan/gaia
0ea959d81cefa0128157ec3ff0e2b7bdd29afacf
[ "Apache-2.0" ]
1
2015-03-02T04:03:00.000Z
2015-03-02T04:03:00.000Z
tests/python/gaia-ui-tests/gaiatest/apps/persona/app.py
caseyyee/gaia
fa82433dda06e9ae7d35a1f74cc16f4dd72cc514
[ "Apache-2.0" ]
null
null
null
tests/python/gaia-ui-tests/gaiatest/apps/persona/app.py
caseyyee/gaia
fa82433dda06e9ae7d35a1f74cc16f4dd72cc514
[ "Apache-2.0" ]
null
null
null
# This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at http://mozilla.org/MPL/2.0/. from marionette.by import By from gaiatest.apps.base import Base
37.206349
103
0.729522
# This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at http://mozilla.org/MPL/2.0/. from marionette.by import By from gaiatest.apps.base import Base class Persona(Base): # iframes _persona_frame_locator = (By.CSS_SELECTOR, "iframe.screen[data-url*='persona.org/sign_in#NATIVE']") # persona login _body_loading_locator = (By.CSS_SELECTOR, 'body.loading') _email_input_locator = (By.ID, 'authentication_email') _password_input_locator = (By.ID, 'authentication_password') _continue_button_locator = (By.CSS_SELECTOR, '.continue.right') _returning_button_locator = (By.CSS_SELECTOR, 'button.isReturning') def __init__(self, marionette): Base.__init__(self, marionette) def login(self, email, password): # This only supports logging in with a known user and no existing session self.type_email(email) self.tap_continue() self.type_password(password) self.tap_returning() self.marionette.switch_to_frame() self.wait_for_element_not_present(*self._persona_frame_locator) self.apps.switch_to_displayed_app() def wait_for_persona_to_load(self): self.wait_for_element_not_displayed(*self._body_loading_locator) def switch_to_persona_frame(self): self.marionette.switch_to_frame() self.frame = self.wait_for_element_present(*self._persona_frame_locator) self.marionette.switch_to_frame(self.frame) self.wait_for_persona_to_load() def type_email(self, value): self.marionette.find_element(*self._email_input_locator).send_keys(value) self.keyboard.dismiss() self.switch_to_persona_frame() def type_password(self, value): self.marionette.find_element(*self._password_input_locator).send_keys(value) self.keyboard.dismiss() self.switch_to_persona_frame() def tap_continue(self): self.marionette.find_element(*self._continue_button_locator).tap() self.wait_for_element_not_displayed(*self._continue_button_locator) self.wait_for_element_displayed(*self._password_input_locator) def tap_returning(self): self.marionette.find_element(*self._returning_button_locator).tap()
1,374
681
23
46007c370fa322eb1ca7e4346385565ed9dfbbd8
4,826
py
Python
ethevents/client/connection.py
ezdac/ethevents
9f4b0ff1ba0d303180abe3b5336805335bc0765b
[ "MIT" ]
2
2018-08-21T01:06:30.000Z
2019-03-05T08:15:55.000Z
ethevents/client/connection.py
ezdac/ethevents
9f4b0ff1ba0d303180abe3b5336805335bc0765b
[ "MIT" ]
1
2018-04-23T14:01:51.000Z
2018-04-23T14:09:51.000Z
ethevents/client/connection.py
ezdac/ethevents
9f4b0ff1ba0d303180abe3b5336805335bc0765b
[ "MIT" ]
1
2022-03-22T04:57:16.000Z
2022-03-22T04:57:16.000Z
import time import click import requests from elasticsearch.connection import Connection from elasticsearch.connection_pool import DummyConnectionPool from elasticsearch.transport import Transport from elasticsearch.exceptions import ( ConnectionError, ConnectionTimeout, SSLError ) from elasticsearch.compat import urlencode from requests import Session from ethevents.client.app import App import logging log = logging.getLogger(__name__) @click.option( '--limits/--no-limits', default=True ) @click.command() if __name__ == '__main__': main()
28.05814
90
0.564857
import time import click import requests from elasticsearch.connection import Connection from elasticsearch.connection_pool import DummyConnectionPool from elasticsearch.transport import Transport from elasticsearch.exceptions import ( ConnectionError, ConnectionTimeout, SSLError ) from elasticsearch.compat import urlencode from requests import Session from ethevents.client.app import App import logging log = logging.getLogger(__name__) class MicroRaidenConnection(Connection): def __init__( self, host, port, session: Session, use_ssl=False, headers=None, **kwargs ): super(MicroRaidenConnection, self).__init__( host=host, port=port, use_ssl=use_ssl, **kwargs ) self.base_url = 'http%s://%s:%d%s' % ( 's' if self.use_ssl else '', host, port, self.url_prefix ) self.session = session self.session.headers = headers or {} self.session.headers.setdefault('content-type', 'application/json') def perform_request( self, method, url, params=None, body=None, timeout=None, ignore=(), headers=None ): url = self.base_url + url if params: url = '%s?%s' % (url, urlencode(params or {})) start = time.time() request = requests.Request(method=method, headers=headers, url=url, data=body) prepared_request = self.session.prepare_request(request) settings = self.session.merge_environment_settings( prepared_request.url, {}, None, None, None ) send_kwargs = {'timeout': timeout or self.timeout} send_kwargs.update(settings) try: response = self.session.request( prepared_request.method, prepared_request.url, data=prepared_request.body, headers=prepared_request.headers, **send_kwargs ) duration = time.time() - start raw_data = response.text except Exception as e: self.log_request_fail( method, url, prepared_request.path_url, body, time.time() - start, exception=e ) if isinstance(e, requests.exceptions.SSLError): raise SSLError('N/A', str(e), e) if isinstance(e, requests.Timeout): raise ConnectionTimeout('TIMEOUT', str(e), e) raise ConnectionError('N/A', str(e), e) # raise errors based on http status codes, let the client handle those if needed if not (200 <= response.status_code < 300) and response.status_code not in ignore: self.log_request_fail( method, url, response.request.path_url, body, duration, response.status_code, raw_data ) self._raise_error(response.status_code, raw_data) self.log_request_success( method, url, response.request.path_url, body, response.status_code, raw_data, duration ) return response.status_code, response.headers, raw_data class MicroRaidenTransport(Transport): def __init__( self, hosts, *args, session: Session, connection_class=MicroRaidenConnection, connection_pool_class=DummyConnectionPool, **kwargs ): self.hosts = hosts log.debug('initializing transport') super(MicroRaidenTransport, self).__init__( hosts, *args, connection_class=connection_class, connection_pool_class=connection_pool_class, session=session, **kwargs ) @click.option( '--limits/--no-limits', default=True ) @click.command() def main(limits: bool): logging.basicConfig(level=logging.DEBUG) log.debug('in main') app = App() app.start(ignore_security_limits=not limits, endpoint_url='https://api.eth.events') log.debug('session started') if app.account.unlocked: import elasticsearch es = elasticsearch.Elasticsearch( transport_class=MicroRaidenTransport, hosts=['api.eth.events:443'], use_ssl=True, session=app.session ) response = es.search('ethereum', 'block', body=dict(query=dict(match_all=dict()))) print(response) if __name__ == '__main__': main()
4,063
36
148
df6d818cc916d501a3e8707af88d5a3730dc6550
615
py
Python
source/client/pages/SpeciesElement.py
RobinSatterthwaite/BirdTable
ef35c0135ee54910e535281e7f690643c3b4f6c4
[ "MIT" ]
null
null
null
source/client/pages/SpeciesElement.py
RobinSatterthwaite/BirdTable
ef35c0135ee54910e535281e7f690643c3b4f6c4
[ "MIT" ]
null
null
null
source/client/pages/SpeciesElement.py
RobinSatterthwaite/BirdTable
ef35c0135ee54910e535281e7f690643c3b4f6c4
[ "MIT" ]
null
null
null
from pystache import TemplateSpec
18.088235
57
0.580488
from pystache import TemplateSpec class SpeciesElement(TemplateSpec): template_name = "SpeciesElement" def __init__(self, name, binomial_name, count, times_seen, include_times_seen, seen, heard): self.name = name self.binomialName = binomial_name self.count = count if include_times_seen: self.timesSeen = "&nbsp;/&nbsp;{0}".format(times_seen) else: self.timesSeen = "" if seen: self.seen = "\u26ab" else: self.seen = "" if heard: self.heard = "\u26ab" else: self.heard = ""
483
73
23
2abdd1a8347e55f710b0bd9bf098d6715d1155a9
561
py
Python
Number Guessing.py
GamePlayer-7/Gaming
4466f2e693f0c10d3bc041b388526484713dc2e1
[ "MIT" ]
null
null
null
Number Guessing.py
GamePlayer-7/Gaming
4466f2e693f0c10d3bc041b388526484713dc2e1
[ "MIT" ]
null
null
null
Number Guessing.py
GamePlayer-7/Gaming
4466f2e693f0c10d3bc041b388526484713dc2e1
[ "MIT" ]
null
null
null
import random # imports the random module, which contains a variety of things to do with random number generation. number = random.randint(1,10) #If we wanted a random integer, we can use the randint function Randint accepts two parameters: a lowest and a highest number. for i in range(0,3): user = int(input("guess the number")) if user == number: print("Hurray!!") print(f"you guessed the number right it's {number}") break if user != number: print(f"Your guess is incorrect the number is {number}")
51
161
0.672014
import random # imports the random module, which contains a variety of things to do with random number generation. number = random.randint(1,10) #If we wanted a random integer, we can use the randint function Randint accepts two parameters: a lowest and a highest number. for i in range(0,3): user = int(input("guess the number")) if user == number: print("Hurray!!") print(f"you guessed the number right it's {number}") break if user != number: print(f"Your guess is incorrect the number is {number}")
0
0
0
1afcc354de4e4e1ba67d59086c2b25d41157da44
2,681
py
Python
src/waldur_auth_saml2/utils.py
geant-multicloud/MCMS-mastermind
81333180f5e56a0bc88d7dad448505448e01f24e
[ "MIT" ]
26
2017-10-18T13:49:58.000Z
2021-09-19T04:44:09.000Z
src/waldur_auth_saml2/utils.py
geant-multicloud/MCMS-mastermind
81333180f5e56a0bc88d7dad448505448e01f24e
[ "MIT" ]
14
2018-12-10T14:14:51.000Z
2021-06-07T10:33:39.000Z
src/waldur_auth_saml2/utils.py
geant-multicloud/MCMS-mastermind
81333180f5e56a0bc88d7dad448505448e01f24e
[ "MIT" ]
32
2017-09-24T03:10:45.000Z
2021-10-16T16:41:09.000Z
from django.conf import settings from django.core.exceptions import ObjectDoesNotExist from djangosaml2.conf import get_config from djangosaml2.utils import available_idps from saml2.attribute_converter import ac_factory from saml2.mdstore import InMemoryMetaData, MetaDataFile from saml2.mdstore import name as get_idp_name from saml2.s_utils import UnknownSystemEntity from . import models
33.5125
91
0.697128
from django.conf import settings from django.core.exceptions import ObjectDoesNotExist from djangosaml2.conf import get_config from djangosaml2.utils import available_idps from saml2.attribute_converter import ac_factory from saml2.mdstore import InMemoryMetaData, MetaDataFile from saml2.mdstore import name as get_idp_name from saml2.s_utils import UnknownSystemEntity from . import models def load_providers(): metadata = {} for filename in settings.WALDUR_AUTH_SAML2['IDP_METADATA_LOCAL']: mdf = MetaDataFile(ac_factory(), filename) mdf.load() metadata.update(mdf.items()) return metadata def sync_providers(): providers = load_providers() current_idps = list(models.IdentityProvider.objects.all().only('url', 'pk')) backend_urls = set(providers.keys()) stale_idps = set(idp.pk for idp in current_idps if idp.url not in backend_urls) models.IdentityProvider.objects.filter(pk__in=stale_idps).delete() existing_urls = set(idp.url for idp in current_idps) for url, metadata in providers.items(): name = get_idp_name(metadata) if not name: # It is expected that every provider has name. For corner cases check entity_id name = metadata.get('entity_id') if not name: # Skip invalid identity provider continue if url in existing_urls: # Skip identity provider if its url is already in the database continue models.IdentityProvider.objects.create(url=url, name=name, metadata=metadata) for provider in models.IdentityProvider.objects.all().iterator(): backend_metadata = providers.get(provider.url) if backend_metadata and provider.metadata != backend_metadata: provider.metadata = backend_metadata provider.save() def is_valid_idp(value): remote_providers = available_idps(get_config()).keys() return ( value in remote_providers or models.IdentityProvider.objects.filter(url=value).exists() ) def get_idp_sso_supported_bindings(idp_entity_id, config): try: return config.metadata.service( idp_entity_id, 'idpsso_descriptor', 'single_sign_on_service' ).keys() except (UnknownSystemEntity, AttributeError): return [] class DatabaseMetadataLoader(InMemoryMetaData): def load(self, *args, **kwargs): # Skip default parsing because data is not stored in file pass def __getitem__(self, item): try: return models.IdentityProvider.objects.get(url=item).metadata except ObjectDoesNotExist: raise KeyError
2,089
26
168
a8f717c691e08e576daf5d6b539ccd45bbb8b08f
2,114
py
Python
src/blockdiag/imagedraw/utils/__init__.py
Dridi/blockdiag
bbb16f8a731cdf79a675a63c1ff847e70fdc4a5b
[ "Apache-2.0" ]
null
null
null
src/blockdiag/imagedraw/utils/__init__.py
Dridi/blockdiag
bbb16f8a731cdf79a675a63c1ff847e70fdc4a5b
[ "Apache-2.0" ]
null
null
null
src/blockdiag/imagedraw/utils/__init__.py
Dridi/blockdiag
bbb16f8a731cdf79a675a63c1ff847e70fdc4a5b
[ "Apache-2.0" ]
null
null
null
# -*- coding: utf-8 -*- # Copyright 2011 Takeshi KOMIYA # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import math import unicodedata from functools import wraps from blockdiag.utils import Size from blockdiag.utils.compat import u def is_zenkaku(char): """Detect given character is Japanese ZENKAKU character""" char_width = unicodedata.east_asian_width(char) return char_width in u("WFA") def zenkaku_len(string): """Count Japanese ZENKAKU characters from string""" return len([x for x in string if is_zenkaku(x)]) def hankaku_len(string): """Count non Japanese ZENKAKU characters from string""" return len([x for x in string if not is_zenkaku(x)]) def string_width(string): """Measure rendering width of string. Count ZENKAKU-character as 2-point and non ZENKAKU-character as 1-point """ widthmap = {'Na': 1, 'N': 1, 'H': 1, 'W': 2, 'F': 2, 'A': 2} return sum(widthmap[unicodedata.east_asian_width(c)] for c in string) def textsize(string, font): """Measure rendering size (width and height) of line. Returned size will not be exactly as rendered text size, Because this method does not use fonts to measure size. """ width = (zenkaku_len(string) * font.size + hankaku_len(string) * font.size * 0.55) return Size(int(math.ceil(width)), font.size)
30.2
78
0.682119
# -*- coding: utf-8 -*- # Copyright 2011 Takeshi KOMIYA # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import math import unicodedata from functools import wraps from blockdiag.utils import Size from blockdiag.utils.compat import u def is_zenkaku(char): """Detect given character is Japanese ZENKAKU character""" char_width = unicodedata.east_asian_width(char) return char_width in u("WFA") def zenkaku_len(string): """Count Japanese ZENKAKU characters from string""" return len([x for x in string if is_zenkaku(x)]) def hankaku_len(string): """Count non Japanese ZENKAKU characters from string""" return len([x for x in string if not is_zenkaku(x)]) def string_width(string): """Measure rendering width of string. Count ZENKAKU-character as 2-point and non ZENKAKU-character as 1-point """ widthmap = {'Na': 1, 'N': 1, 'H': 1, 'W': 2, 'F': 2, 'A': 2} return sum(widthmap[unicodedata.east_asian_width(c)] for c in string) def textsize(string, font): """Measure rendering size (width and height) of line. Returned size will not be exactly as rendered text size, Because this method does not use fonts to measure size. """ width = (zenkaku_len(string) * font.size + hankaku_len(string) * font.size * 0.55) return Size(int(math.ceil(width)), font.size) def memoize(fn): fn.cache = {} @wraps(fn) def func(*args, **kwargs): key = str(args) + str(kwargs) if key not in fn.cache: fn.cache[key] = fn(*args, **kwargs) return fn.cache[key] return func
225
0
23
4b72d9fce27cf73da17b49970b55c087c4464e31
362
py
Python
telefones/models.py
projeto-agro-tcc/osvaldo-backend
7e8b6b2ed849cd54f0bb5b855c4016fa062d3c33
[ "MIT" ]
null
null
null
telefones/models.py
projeto-agro-tcc/osvaldo-backend
7e8b6b2ed849cd54f0bb5b855c4016fa062d3c33
[ "MIT" ]
null
null
null
telefones/models.py
projeto-agro-tcc/osvaldo-backend
7e8b6b2ed849cd54f0bb5b855c4016fa062d3c33
[ "MIT" ]
null
null
null
from django.db import models
25.857143
72
0.70442
from django.db import models class Telefone(models.Model): residencial = models.CharField(max_length=14, null=True, blank=True) celular = models.CharField(max_length=12, null=False) outro = models.CharField(max_length=14, null=True, blank=True) class Meta: db_table = "en_telefones" def __str__(self): return self.celular
25
284
23
5a4d29d31fc8b9261b5b5f65d7bb0b5cb3b90e4d
4,639
py
Python
xt/framework/comm/comm_conf.py
ZZHsunsky/xingtian
0484e2c968d9e6b2e5f43a3b86c0213a095ba309
[ "MIT" ]
null
null
null
xt/framework/comm/comm_conf.py
ZZHsunsky/xingtian
0484e2c968d9e6b2e5f43a3b86c0213a095ba309
[ "MIT" ]
null
null
null
xt/framework/comm/comm_conf.py
ZZHsunsky/xingtian
0484e2c968d9e6b2e5f43a3b86c0213a095ba309
[ "MIT" ]
null
null
null
# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. import socket import time from subprocess import Popen import redis MAX_ACTOR_NUM = 40 MAX_LEARNER_NUM = 10 START_PORT = 20000 PORTNUM_PERLEARNER = MAX_ACTOR_NUM + 1 # 初始化,查看redis,连接redis, 生成端口池,即检测端口号哪些可用 def get_port(start_port): ''' get port used by module ''' predict_port = start_port + 1 if (predict_port + MAX_ACTOR_NUM - start_port) > PORTNUM_PERLEARNER: raise Exception("port num is not enough") return start_port, predict_port def test(): ''' test interface''' test_comm_conf = CommConf() redis_key = 'port_pool' print("{} len: {}".format(redis_key, test_comm_conf.redis.llen(redis_key))) for _ in range(test_comm_conf.redis.llen(redis_key)): pop_val = test_comm_conf.redis.lpop(redis_key) print("pop val: {} from '{}'".format(pop_val, redis_key)) start = time.time() test_comm_conf.init_portpool() print("use time", time.time() - start) train_port = get_port(20000) print(train_port) if __name__ == "__main__": test()
34.362963
79
0.64432
# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. import socket import time from subprocess import Popen import redis MAX_ACTOR_NUM = 40 MAX_LEARNER_NUM = 10 START_PORT = 20000 PORTNUM_PERLEARNER = MAX_ACTOR_NUM + 1 # 初始化,查看redis,连接redis, 生成端口池,即检测端口号哪些可用 class CommConf(object): def __init__(self): try: redis.Redis(host="127.0.0.1", port=6379, db=0).ping() except redis.ConnectionError: Popen("echo save '' | setsid redis-server -", shell=True) time.sleep(0.3) self.redis = redis.Redis(host="127.0.0.1", port=6379, db=0) self.pool_name = "port_pool" if not self.redis.exists(self.pool_name): self.init_portpool() def init_portpool(self): ''' init port pool ''' start_port = START_PORT try_num = 10 for _ in range(MAX_LEARNER_NUM): for _ in range(try_num): check_flag, next_port = self.check_learner_port(start_port) if not check_flag: break else: start_port = next_port self.redis.lpush(self.pool_name, start_port) self.redis.incr('port_num', amount=1) self.redis.incr('max_port_num', amount=1) start_port = next_port def get_start_port(self): ''' get start port ''' if int(self.redis.get('port_num')) == 0: raise Exception("Dont have available port") start_port = self.redis.lpop(self.pool_name) self.redis.decr('port_num', amount=1) return int(start_port) def release_start_port(self, start_port): ''' release start port ''' self.redis.lpush(self.pool_name, start_port) self.redis.incr('port_num', amount=1) if self.redis.get('port_num') == self.redis.get('max_port_num'): self.redis.delete('port_num') self.redis.delete('max_port_num') self.redis.delete('port_pool') print("shutdown redis") self.redis.shutdown(nosave=True) return def check_learner_port(self, start_port): ''' check if multi-port is in use ''' ip = "localhost" for i in range(PORTNUM_PERLEARNER): if self.check_port(ip, start_port + i): return True, start_port + i + 1 return False, start_port + PORTNUM_PERLEARNER def check_port(self, ip, port): ''' check if port is in use ''' s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) try: s.connect((ip, int(port))) s.shutdown(2) print("port is used", int(port)) return True except BaseException: return False def get_port(start_port): ''' get port used by module ''' predict_port = start_port + 1 if (predict_port + MAX_ACTOR_NUM - start_port) > PORTNUM_PERLEARNER: raise Exception("port num is not enough") return start_port, predict_port def test(): ''' test interface''' test_comm_conf = CommConf() redis_key = 'port_pool' print("{} len: {}".format(redis_key, test_comm_conf.redis.llen(redis_key))) for _ in range(test_comm_conf.redis.llen(redis_key)): pop_val = test_comm_conf.redis.lpop(redis_key) print("pop val: {} from '{}'".format(pop_val, redis_key)) start = time.time() test_comm_conf.init_portpool() print("use time", time.time() - start) train_port = get_port(20000) print(train_port) if __name__ == "__main__": test()
402
2,060
22
3e25d2bb70c9499de1f4cb505fe2880342dc5c50
2,572
py
Python
python/nbdb/anomaly/static.py
rubrikinc/nbdb2
359db63a39e016e3eb197b8ea511d6e8cffa1853
[ "Apache-2.0" ]
2
2022-03-21T15:48:33.000Z
2022-03-27T00:43:12.000Z
python/nbdb/anomaly/static.py
rubrikinc/nbdb2
359db63a39e016e3eb197b8ea511d6e8cffa1853
[ "Apache-2.0" ]
null
null
null
python/nbdb/anomaly/static.py
rubrikinc/nbdb2
359db63a39e016e3eb197b8ea511d6e8cffa1853
[ "Apache-2.0" ]
1
2022-03-27T00:43:31.000Z
2022-03-27T00:43:31.000Z
""" Static threshold based anomaly detection """ from typing import List, Tuple import logging import numpy as np import pandas as pd from nbdb.anomaly.anomaly_interface import AnomalyInterface from nbdb.readapi.graphite_response import Anomaly from nbdb.readapi.time_series_response import TimeRange logger = logging.getLogger(__name__) class Static(AnomalyInterface): # pylint: disable=too-few-public-methods """ Simple algorithm to do threshold based anomaly detection. Currently supports two functions (lt, gt). """ def find_anomalies(self, baseline: np.ndarray, raw_data: pd.Series) -> List[Tuple]: """ Use static threshold to determine anomalies in the raw data. Supports the lt, gt functions to compare against the threshold :param baseline: :param raw_data: :return: """ comparator_fn = self.config.get('comparator_fn', 'gt') threshold = self.config.get('threshold') raw_data.dropna(inplace=True) if comparator_fn == 'gt': anomalous_points = raw_data[raw_data > threshold] elif comparator_fn == 'lt': anomalous_points = raw_data[raw_data < threshold] else: raise NotImplementedError('Unknown comparator fn: {}'.format( comparator_fn)) anomalies = [] # No anomalous points found. Return early if len(anomalous_points) == 0: return anomalies previous_epoch = anomalous_points.index[0] anomaly_start = anomalous_points.index[0] sampling_interval = np.diff(raw_data.index).min() anomaly_score = 1.0 epoch = None for epoch, _ in anomalous_points.iteritems(): if (epoch - previous_epoch) / sampling_interval > 1: # Mark the current anomaly as ended and start a new one anomaly_window = TimeRange(anomaly_start, previous_epoch, sampling_interval) anomalies.append(Anomaly(anomaly_window, anomaly_score)) anomaly_score = 1.0 anomaly_start = epoch else: previous_epoch = epoch anomaly_score += 1 # append the final anomaly if epoch is not None: anomaly_window = TimeRange(anomaly_start, epoch, sampling_interval) anomalies.append(Anomaly(anomaly_window, anomaly_score)) return anomalies
35.232877
73
0.613919
""" Static threshold based anomaly detection """ from typing import List, Tuple import logging import numpy as np import pandas as pd from nbdb.anomaly.anomaly_interface import AnomalyInterface from nbdb.readapi.graphite_response import Anomaly from nbdb.readapi.time_series_response import TimeRange logger = logging.getLogger(__name__) class Static(AnomalyInterface): # pylint: disable=too-few-public-methods """ Simple algorithm to do threshold based anomaly detection. Currently supports two functions (lt, gt). """ def find_anomalies(self, baseline: np.ndarray, raw_data: pd.Series) -> List[Tuple]: """ Use static threshold to determine anomalies in the raw data. Supports the lt, gt functions to compare against the threshold :param baseline: :param raw_data: :return: """ comparator_fn = self.config.get('comparator_fn', 'gt') threshold = self.config.get('threshold') raw_data.dropna(inplace=True) if comparator_fn == 'gt': anomalous_points = raw_data[raw_data > threshold] elif comparator_fn == 'lt': anomalous_points = raw_data[raw_data < threshold] else: raise NotImplementedError('Unknown comparator fn: {}'.format( comparator_fn)) anomalies = [] # No anomalous points found. Return early if len(anomalous_points) == 0: return anomalies previous_epoch = anomalous_points.index[0] anomaly_start = anomalous_points.index[0] sampling_interval = np.diff(raw_data.index).min() anomaly_score = 1.0 epoch = None for epoch, _ in anomalous_points.iteritems(): if (epoch - previous_epoch) / sampling_interval > 1: # Mark the current anomaly as ended and start a new one anomaly_window = TimeRange(anomaly_start, previous_epoch, sampling_interval) anomalies.append(Anomaly(anomaly_window, anomaly_score)) anomaly_score = 1.0 anomaly_start = epoch else: previous_epoch = epoch anomaly_score += 1 # append the final anomaly if epoch is not None: anomaly_window = TimeRange(anomaly_start, epoch, sampling_interval) anomalies.append(Anomaly(anomaly_window, anomaly_score)) return anomalies
0
0
0
9288bc7c0b122d032f93019718b7a23eb2c872b0
1,598
py
Python
tests/test_help.py
thomasvolk/R_ev3dev
53b8c83af49e88eb4766deea0a690c55d1304d6a
[ "Apache-2.0" ]
null
null
null
tests/test_help.py
thomasvolk/R_ev3dev
53b8c83af49e88eb4766deea0a690c55d1304d6a
[ "Apache-2.0" ]
null
null
null
tests/test_help.py
thomasvolk/R_ev3dev
53b8c83af49e88eb4766deea0a690c55d1304d6a
[ "Apache-2.0" ]
null
null
null
#!/usr/bin/env python3 import unittest from R_ev3dev.interpreter import Interpreter, Command from R_ev3dev.help import Help, Version class TestCommand01(Command): """ this is the test command 01 usage: c01 """ class TestCommand02(Command): """ this is the test command 02 """
21.594595
86
0.628911
#!/usr/bin/env python3 import unittest from R_ev3dev.interpreter import Interpreter, Command from R_ev3dev.help import Help, Version class TestCommand01(Command): """ this is the test command 01 usage: c01 """ def invoke(self, interpreter_context, args): return 1 class TestCommand02(Command): """ this is the test command 02 """ def invoke(self, interpreter_context, args): return 2 class TestCommand03(Command): def invoke(self, interpreter_context, args): return 3 class TestHelp(unittest.TestCase): def setUp(self): self.interpreter = Interpreter([ TestCommand01('c01'), TestCommand02('c02'), TestCommand03('c03'), Help('help'), Version('version') ]) def test_overview(self): self.assertEqual("""--- R_ev3 protocol language version 0.0.1 author: Thomas Volk license: Apache License Version 2.0 source: https://github.com/thomasvolk/R_ev3dev possible commands: c01 - this is the test command 01 c02 - this is the test command 02 c03 - help - show help version - show version use help <command> for details ---""", self.interpreter.evaluate_internal("help").value) def test_help(self): self.assertEqual("""--- c01 this is the test command 01 usage: c01 ---""", self.interpreter.evaluate_internal("help c01").value) def test_version(self): self.assertEqual('0.0.1', self.interpreter.evaluate_internal("version").value)
1,029
21
231
0cb3d0dd6f38e1ffc07fd4e85e3458786f9cf6d8
420
py
Python
news/urls.py
vigen-b/FakeNews
fc19f623529d1661c9f3d475adc9db98ee95a38a
[ "Apache-2.0" ]
null
null
null
news/urls.py
vigen-b/FakeNews
fc19f623529d1661c9f3d475adc9db98ee95a38a
[ "Apache-2.0" ]
null
null
null
news/urls.py
vigen-b/FakeNews
fc19f623529d1661c9f3d475adc9db98ee95a38a
[ "Apache-2.0" ]
null
null
null
from django.urls import path from rest_framework.urlpatterns import format_suffix_patterns from news import views app_name = "news" urlpatterns = [ path("news/", views.NewsList.as_view()), path("news/<int:pk>/", views.NewsDetail.as_view()), path("category/", views.CategoryList.as_view()), path("category/<str:pk>/", views.CategoryDetail.as_view()), ] urlpatterns = format_suffix_patterns(urlpatterns)
30
63
0.735714
from django.urls import path from rest_framework.urlpatterns import format_suffix_patterns from news import views app_name = "news" urlpatterns = [ path("news/", views.NewsList.as_view()), path("news/<int:pk>/", views.NewsDetail.as_view()), path("category/", views.CategoryList.as_view()), path("category/<str:pk>/", views.CategoryDetail.as_view()), ] urlpatterns = format_suffix_patterns(urlpatterns)
0
0
0
8a0003c5108f33e3f1329656eaa782586e2568a7
9,993
py
Python
realtime_hand_3d/segmentation/models/csm.py
NeelayS/realtime_hand
219c772b9b7df60c390edac7da23f9cdddebca4d
[ "MIT" ]
null
null
null
realtime_hand_3d/segmentation/models/csm.py
NeelayS/realtime_hand
219c772b9b7df60c390edac7da23f9cdddebca4d
[ "MIT" ]
null
null
null
realtime_hand_3d/segmentation/models/csm.py
NeelayS/realtime_hand
219c772b9b7df60c390edac7da23f9cdddebca4d
[ "MIT" ]
null
null
null
import torch from torch import nn from .retrieve import SEG_MODELS_REGISTRY @SEG_MODELS_REGISTRY.register()
32.339806
88
0.536976
import torch from torch import nn from .retrieve import SEG_MODELS_REGISTRY class Bottleneck(nn.Module): expansion = 4 def __init__(self, in_channels, out_channels, stride=1, downsample=None): super(Bottleneck, self).__init__() self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=1, bias=False) self.bn1 = nn.BatchNorm2d(out_channels) self.conv2 = nn.Conv2d( out_channels, out_channels, kernel_size=3, stride=stride, bias=False, padding=1, ) self.bn2 = nn.BatchNorm2d(out_channels) self.conv3 = nn.Conv2d( out_channels, out_channels * self.expansion, kernel_size=1, bias=False ) self.bn3 = nn.BatchNorm2d(out_channels * self.expansion) self.relu = nn.ReLU() self.downsample = downsample def forward(self, x): shortcut = x out = self.conv1(x) out = self.bn1(out) out = self.relu(out) out = self.conv2(out) out = self.bn2(out) out = self.relu(out) out = self.conv3(out) out = self.bn3(out) out = self.relu(out) if self.downsample is not None: shortcut = self.downsample(x) out += shortcut out = self.relu(out) return out class DeconvBottleneck(nn.Module): def __init__(self, in_channels, out_channels, expansion=2, stride=1, upsample=None): super(DeconvBottleneck, self).__init__() self.expansion = expansion self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=1, bias=False) self.bn1 = nn.BatchNorm2d(out_channels) if stride == 1: self.conv2 = nn.Conv2d( out_channels, out_channels, kernel_size=3, stride=stride, bias=False, padding=1, ) else: self.conv2 = nn.ConvTranspose2d( out_channels, out_channels, kernel_size=3, stride=stride, bias=False, padding=1, output_padding=1, ) self.bn2 = nn.BatchNorm2d(out_channels) self.conv3 = nn.Conv2d( out_channels, out_channels * self.expansion, kernel_size=1, bias=False ) self.bn3 = nn.BatchNorm2d(out_channels * self.expansion) self.relu = nn.ReLU() self.upsample = upsample def forward(self, x): shortcut = x out = self.conv1(x) out = self.bn1(out) out = self.relu(out) out = self.conv2(out) out = self.bn2(out) out = self.relu(out) out = self.conv3(out) out = self.bn3(out) out = self.relu(out) if self.upsample is not None: shortcut = self.upsample(x) out += shortcut out = self.relu(out) return out class CSM_model(nn.Module): def __init__( self, downblock, upblock, in_channels, n_classes, with_energy=True, n_stages=2 ): super(CSM_model, self).__init__() self.start_channels = 32 self.n_classes = n_classes self.n_stages = n_stages self.with_energy = with_energy down_layer_size = 3 up_layer_size = 3 self.relu = nn.ReLU() self.sigmoid = nn.Sigmoid() self.conv1 = nn.Conv2d( in_channels, 32, kernel_size=7, stride=2, padding=3, bias=False ) self.bn1 = nn.BatchNorm2d(32) self.avgpool = nn.AvgPool2d(kernel_size=3, stride=2, padding=1) self.dlayer1 = self._make_downlayer(downblock, 32, down_layer_size) self.dlayer2 = self._make_downlayer(downblock, 64, down_layer_size, stride=2) self.dlayer3 = self._make_downlayer(downblock, 128, down_layer_size, stride=2) self.dlayer4 = self._make_downlayer(downblock, 256, down_layer_size, stride=2) # stage1 if self.n_stages >= 1 or self.n_stages == -1: self.uplayer1_1 = self._make_up_block(upblock, 256, up_layer_size, stride=2) self.uplayer2_1 = self._make_up_block(upblock, 128, up_layer_size, stride=2) upsample_1 = nn.Sequential( nn.ConvTranspose2d( self.start_channels, 32, kernel_size=1, stride=2, bias=False, output_padding=1, ), nn.BatchNorm2d(32), ) self.uplayer_stage_1 = DeconvBottleneck( self.start_channels, 32, 1, 2, upsample_1 ) self.conv_seg_out_1 = nn.Conv2d( 32, n_classes, kernel_size=1, stride=1, bias=False ) if self.with_energy: self.conv_e_out_1 = nn.Conv2d( 32, n_classes, kernel_size=1, stride=1, bias=False ) # stage2 if self.n_stages >= 2 or self.n_stages == -1: self.uplayer1_2 = self._make_up_block(upblock, 64, up_layer_size, stride=2) if self.with_energy: self.post_cat_2 = nn.Conv2d( 134, 128, kernel_size=1, stride=1, bias=False ) else: self.post_cat_2 = nn.Conv2d( 131, 128, kernel_size=1, stride=1, bias=False ) self.bn_2 = nn.BatchNorm2d(128) self.uplayer2_2 = self._make_up_block(upblock, 32, up_layer_size) upsample_2 = nn.Sequential( nn.ConvTranspose2d( self.start_channels, 32, kernel_size=1, stride=2, bias=False, output_padding=1, ), nn.BatchNorm2d(32), ) self.uplayer_stage_2 = DeconvBottleneck(64, 32, 1, 2, upsample_2) self.conv_seg_out_2 = nn.Conv2d( 32, n_classes, kernel_size=1, stride=1, bias=False ) if self.with_energy: self.conv_e_out_2 = nn.Conv2d( 32, n_classes, kernel_size=1, stride=1, bias=False ) def _make_downlayer(self, block, init_channels, num_layer, stride=1): downsample = None if stride != 1 or self.start_channels != init_channels * block.expansion: downsample = nn.Sequential( nn.Conv2d( self.start_channels, init_channels * block.expansion, kernel_size=1, stride=stride, bias=False, ), nn.BatchNorm2d(init_channels * block.expansion), ) layers = [] layers.append(block(self.start_channels, init_channels, stride, downsample)) self.start_channels = init_channels * block.expansion for i in range(1, num_layer): layers.append(block(self.start_channels, init_channels)) return nn.Sequential(*layers) def _make_up_block(self, block, init_channels, num_layer, stride=1): upsample = None if stride != 1 or self.start_channels != init_channels * 2: if stride == 1: output_padding = 0 else: output_padding = 1 upsample = nn.Sequential( nn.ConvTranspose2d( self.start_channels, init_channels * 2, kernel_size=1, stride=stride, bias=False, output_padding=output_padding, ), # 1), nn.BatchNorm2d(init_channels * 2), ) layers = [] for i in range(1, num_layer): layers.append(block(self.start_channels, init_channels, 4)) layers.append(block(self.start_channels, init_channels, 2, stride, upsample)) self.start_channels = init_channels * 2 return nn.Sequential(*layers) def forward(self, x): img = x x = self.conv1(x) x = self.bn1(x) x = self.relu(x) x = self.avgpool(x) x = self.dlayer1(x) x = self.dlayer2(x) x = self.dlayer3(x) x = self.dlayer4(x) # Mid x = self.uplayer1_1(x) x_mid = self.uplayer2_1(x) # Stage 1 x_stage1 = self.uplayer_stage_1(x_mid) x_seg_out1 = self.conv_seg_out_1(x_stage1) x_hands1 = x_seg_out1 if self.with_energy: x_e_out1 = self.sigmoid(self.conv_e_out_1(x_stage1)) if self.n_stages == 1: if self.with_energy: return x_hands1, x_e_out1 else: return x_hands1 # stage2 x_mid2 = self.uplayer1_2(x_mid) if self.with_energy: x = torch.cat([x_mid2, x_seg_out1, x_e_out1], dim=1) else: x = torch.cat([x_mid2, x_seg_out1], dim=1) x = self.post_cat_2(x) x = self.bn_2(x) x = self.relu(x) x = self.uplayer2_2(x) x = self.uplayer_stage_2(x) x_seg_out2 = self.conv_seg_out_2(x) x_hands2 = x_seg_out2 if self.with_energy: x_e_out2 = self.sigmoid(self.conv_e_out_2(x)) if self.n_stages == 2: if self.with_energy: return x_e_out2, x_hands2 else: return x_hands2 else: if self.with_energy: return x_hands1, x_e_out1, x_hands2, x_e_out2 else: return x_hands1, x_hands2 @SEG_MODELS_REGISTRY.register() def CSM(in_channels=3, n_classes=3, **kwargs): return CSM_model( Bottleneck, DeconvBottleneck, in_channels=in_channels, n_classes=n_classes, **kwargs )
9,530
98
251
9967baa443818e97fb20549f70a4bd20685b2cd4
5,239
py
Python
bobstack/sipmessaging/sipMessage.py
bobjects/BobStack
c177b286075044832f44baf9ace201780c8b4320
[ "Apache-2.0" ]
null
null
null
bobstack/sipmessaging/sipMessage.py
bobjects/BobStack
c177b286075044832f44baf9ace201780c8b4320
[ "Apache-2.0" ]
null
null
null
bobstack/sipmessaging/sipMessage.py
bobjects/BobStack
c177b286075044832f44baf9ace201780c8b4320
[ "Apache-2.0" ]
null
null
null
try: from cStringIO import StringIO except ImportError: from StringIO import StringIO from sipHeader import SIPHeader from sipStartLineFactory import SIPStartLineFactory
23.181416
76
0.640389
try: from cStringIO import StringIO except ImportError: from StringIO import StringIO from sipHeader import SIPHeader from sipStartLineFactory import SIPStartLineFactory class SIPMessage(object): @classmethod def new_parsed_from(cls, a_string): answer = cls() answer.raw_string = a_string return answer @classmethod def _new_for_attributes(cls, start_line=None, header=None, content=""): answer = cls() answer.start_line = start_line if header: answer.header = header else: answer.header = SIPHeader.new_for_attributes(header_fields=None) answer.content = content return answer def __init__(self): self._content = None self._startLine = None self._header = None self._rawString = None @property def deep_copy(self): return self.__class__.new_parsed_from(self.raw_string) @property def raw_string(self): if self._rawString is None: self.render_raw_string_from_attributes() return self._rawString @raw_string.setter def raw_string(self, a_string): self._rawString = a_string self.clear_attributes() @property def body(self): return self.content def clear_raw_string(self): self._rawString = None def clear_attributes(self): self._content = None self._startLine = None self._header = None def parse_attributes_from_raw_string(self): self._content = "" string_io = StringIO(self._rawString) self._startLine = SIPStartLineFactory().next_for_stringio(string_io) self._header = SIPHeader.new_parsed_from(string_io) self._content = string_io.read() string_io.close() def render_raw_string_from_attributes(self): stringio = StringIO() stringio.write(self._startLine.raw_string) stringio.write("\r\n") self._header.render_raw_string_from_attributes(stringio) stringio.write(self._content) self._rawString = stringio.getvalue() stringio.close() @property def start_line(self): if self._startLine is None: self.parse_attributes_from_raw_string() return self._startLine @start_line.setter def start_line(self, a_sip_start_line): self._startLine = a_sip_start_line self.clear_raw_string() @property def header(self): if self._header is None: self.parse_attributes_from_raw_string() return self._header @header.setter def header(self, a_sip_header): self._header = a_sip_header self.clear_raw_string() @property def content(self): if self._content is None: self.parse_attributes_from_raw_string() return self._content @content.setter def content(self, a_string): self._content = a_string self.clear_raw_string() @property def vias(self): return self.header.vias @property def via_header_fields(self): return self.header.via_header_fields @property def route_uris(self): return self.header.route_uris @property def record_route_uris(self): return self.header.record_route_uris @property def transaction_hash(self): return self.header.transaction_hash @property def dialog_hash(self): return self.header.dialog_hash # TODO: This is a hot method. Should we cache? @property def is_valid(self): if self.is_malformed: return False if not self.header.is_valid: return False if self.header.content_length is not None: if self.header.content_length != self.content.__len__(): return False return True @property def is_invalid(self): return not self.is_valid @property def is_unknown(self): return not self.is_known @property def is_known(self): return False @property def is_malformed(self): return False @property def is_request(self): return False @property def is_response(self): return False @property def is_ack_request(self): return False @property def is_bye_request(self): return False @property def is_cancel_request(self): return False @property def is_info_request(self): return False @property def is_invite_request(self): return False @property def is_message_request(self): return False @property def is_notify_request(self): return False @property def is_options_request(self): return False @property def is_publish_request(self): return False @property def is_prack_request(self): return False @property def is_refer_request(self): return False @property def is_register_request(self): return False @property def is_subscribe_request(self): return False @property def is_update_request(self): return False
3,211
1,825
23
943c8ed1cd17178b2e7dd6ef67854da8a007f148
98
py
Python
codes_auto/1635.number-of-good-pairs.py
smartmark-pro/leetcode_record
6504b733d892a705571eb4eac836fb10e94e56db
[ "MIT" ]
null
null
null
codes_auto/1635.number-of-good-pairs.py
smartmark-pro/leetcode_record
6504b733d892a705571eb4eac836fb10e94e56db
[ "MIT" ]
null
null
null
codes_auto/1635.number-of-good-pairs.py
smartmark-pro/leetcode_record
6504b733d892a705571eb4eac836fb10e94e56db
[ "MIT" ]
null
null
null
# # @lc app=leetcode.cn id=1635 lang=python3 # # [1635] number-of-good-pairs # None # @lc code=end
14
42
0.673469
# # @lc app=leetcode.cn id=1635 lang=python3 # # [1635] number-of-good-pairs # None # @lc code=end
0
0
0
3d707e5f1c279e06637838e9d88dd40ec499c8ba
1,110
py
Python
python-the-hard-way/12-prompting-people.py
Valka7a/python-playground
f08d4374f2cec2e8b1afec3753854b1ec10ff480
[ "MIT" ]
null
null
null
python-the-hard-way/12-prompting-people.py
Valka7a/python-playground
f08d4374f2cec2e8b1afec3753854b1ec10ff480
[ "MIT" ]
null
null
null
python-the-hard-way/12-prompting-people.py
Valka7a/python-playground
f08d4374f2cec2e8b1afec3753854b1ec10ff480
[ "MIT" ]
null
null
null
# Exercise 12: Prompting People # Variables age = raw_input("How old are you? ") height = raw_input("How tall are you? ") weight = raw_input("How much do you weigh? ") # Print print "So, you're %r old, %r tall and %r heavy." % (age, height, weight) # Study Drills # 1. In Terminal where you normally run python to run your scripts, # type pydoc raw_input. Read what it says. If you're on Windows # try python -m pydoc raw_input instead. # 2. Get out of pydoc by typing q to quit. # 3. Look onine for what the pydoc command does. # 4. Use pydoc to also read about open, file, os and sys. It's # alright if you do not understand thosel just read through # and take notes about interesting things. # Drill 1 # Help on built-in function raw_input in module __builtin__: # raw_input(...) # raw_input([prompt]) -> string # # Read a string from standard input. The trailing newline is stripped. # If the user hits EOF (Unix: Ctl-D, Windows: Ctl-Z+Return), raise EOFError. # On Unix, GNU readline is used if enabled. The prompt string, if given, # is printed without a trailing newline before reading.
34.6875
76
0.715315
# Exercise 12: Prompting People # Variables age = raw_input("How old are you? ") height = raw_input("How tall are you? ") weight = raw_input("How much do you weigh? ") # Print print "So, you're %r old, %r tall and %r heavy." % (age, height, weight) # Study Drills # 1. In Terminal where you normally run python to run your scripts, # type pydoc raw_input. Read what it says. If you're on Windows # try python -m pydoc raw_input instead. # 2. Get out of pydoc by typing q to quit. # 3. Look onine for what the pydoc command does. # 4. Use pydoc to also read about open, file, os and sys. It's # alright if you do not understand thosel just read through # and take notes about interesting things. # Drill 1 # Help on built-in function raw_input in module __builtin__: # raw_input(...) # raw_input([prompt]) -> string # # Read a string from standard input. The trailing newline is stripped. # If the user hits EOF (Unix: Ctl-D, Windows: Ctl-Z+Return), raise EOFError. # On Unix, GNU readline is used if enabled. The prompt string, if given, # is printed without a trailing newline before reading.
0
0
0
17884ad7e858a3c341d64db09625d9ca52b143f6
1,730
py
Python
alipay/aop/api/domain/InvestorMaterialInfo.py
antopen/alipay-sdk-python-all
8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c
[ "Apache-2.0" ]
null
null
null
alipay/aop/api/domain/InvestorMaterialInfo.py
antopen/alipay-sdk-python-all
8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c
[ "Apache-2.0" ]
null
null
null
alipay/aop/api/domain/InvestorMaterialInfo.py
antopen/alipay-sdk-python-all
8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c
[ "Apache-2.0" ]
null
null
null
#!/usr/bin/env python # -*- coding: utf-8 -*- import json from alipay.aop.api.constant.ParamConstants import *
24.366197
67
0.550289
#!/usr/bin/env python # -*- coding: utf-8 -*- import json from alipay.aop.api.constant.ParamConstants import * class InvestorMaterialInfo(object): def __init__(self): self._file_id = None self._file_url = None self._type = None @property def file_id(self): return self._file_id @file_id.setter def file_id(self, value): self._file_id = value @property def file_url(self): return self._file_url @file_url.setter def file_url(self, value): self._file_url = value @property def type(self): return self._type @type.setter def type(self, value): self._type = value def to_alipay_dict(self): params = dict() if self.file_id: if hasattr(self.file_id, 'to_alipay_dict'): params['file_id'] = self.file_id.to_alipay_dict() else: params['file_id'] = self.file_id if self.file_url: if hasattr(self.file_url, 'to_alipay_dict'): params['file_url'] = self.file_url.to_alipay_dict() else: params['file_url'] = self.file_url if self.type: if hasattr(self.type, 'to_alipay_dict'): params['type'] = self.type.to_alipay_dict() else: params['type'] = self.type return params @staticmethod def from_alipay_dict(d): if not d: return None o = InvestorMaterialInfo() if 'file_id' in d: o.file_id = d['file_id'] if 'file_url' in d: o.file_url = d['file_url'] if 'type' in d: o.type = d['type'] return o
1,218
374
23
715eab9e05e4e3e6f81c12646f271a7236441291
12,764
py
Python
msticpy/nbtools/azure_ml_tools.py
ekmixon/msticpy
8676a648ba9bfb4d848a8dda964820d4942a32ca
[ "MIT" ]
null
null
null
msticpy/nbtools/azure_ml_tools.py
ekmixon/msticpy
8676a648ba9bfb4d848a8dda964820d4942a32ca
[ "MIT" ]
3
2021-05-15T02:16:39.000Z
2022-01-19T13:13:25.000Z
msticpy/nbtools/azure_ml_tools.py
ekmixon/msticpy
8676a648ba9bfb4d848a8dda964820d4942a32ca
[ "MIT" ]
null
null
null
# ------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for # license information. # -------------------------------------------------------------------------- """Checker functions for Azure ML notebooks.""" import json import os import socket import sys import urllib from pathlib import Path from typing import Any, List, Mapping, Optional, Tuple, Union from IPython import get_ipython from IPython.display import HTML, display from pkg_resources import parse_version from .._version import VERSION from ..common.pkg_config import refresh_config __version__ = VERSION AZ_GET_STARTED = ( "https://github.com/Azure/Azure-Sentinel-Notebooks/blob/master/A%20Getting" "%20Started%20Guide%20For%20Azure%20Sentinel%20ML%20Notebooks.ipynb" ) TROUBLE_SHOOTING = ( "https://github.com/Azure/Azure-Sentinel-Notebooks/blob/master/" "TroubleShootingNotebooks.ipynb" ) MISSING_PKG_ERR = """ <h4><font color='orange'>The package '<b>{package}</b>' is not installed or has an unsupported version (installed version = '{inst_ver}')</font></h4> Please install or upgrade before continuing: required version is {package}>={req_ver} """ MP_INSTALL_FAILED = """ <h4><font color='red'>The notebook may not run correctly without the correct version of '<b>{pkg}</b>' ({ver} or later).</font></h4> Please see the <a href="{nbk_uri}"> Getting Started Guide For Azure Sentinel ML Notebooks</a></b> for more information<br><hr> """ RELOAD_MP = """ <h4><font color='orange'>Kernel restart needed</h4> An error was detected trying to load the updated version of MSTICPy.<br> Please restart the notebook kernel and re-run this cell - it should run without error. """ MIN_PYTHON_VER_DEF = "3.6" MSTICPY_REQ_VERSION = __version__ VER_RGX = r"(?P<maj>\d+)\.(?P<min>\d+).(?P<pnt>\d+)(?P<suff>.*)" MP_ENV_VAR = "MSTICPYCONFIG" MP_FILE = "msticpyconfig.yaml" NB_CHECK_URI = ( "https://raw.githubusercontent.com/Azure/Azure-Sentinel-" "Notebooks/master/utils/nb_check.py" ) def is_in_aml(): """Return True if running in Azure Machine Learning.""" return os.environ.get("APPSETTING_WEBSITE_SITE_NAME") == "AMLComputeInstance" def check_versions( min_py_ver: Union[str, Tuple] = MIN_PYTHON_VER_DEF, min_mp_ver: Union[str, Tuple] = MSTICPY_REQ_VERSION, extras: Optional[List[str]] = None, mp_release: Optional[str] = None, **kwargs, ): """ Check the current versions of the Python kernel and MSTICPy. Parameters ---------- min_py_ver : Union[Tuple[int, int], str] Minimum Python version min_mp_ver : Union[Tuple[int, int], str] Minimum MSTICPy version extras : Optional[List[str]], optional A list of extras required for MSTICPy mp_release : Optional[str], optional Override the MSTICPy release version. This can also be specified in the environment variable 'MP_TEST_VER' Raises ------ RuntimeError If the Python version does not support the notebook. If the MSTICPy version does not support the notebook and the user chose not to upgrade """ del kwargs _disp_html("<h4>Starting notebook pre-checks...</h4>") if isinstance(min_py_ver, str): min_py_ver = _get_pkg_version(min_py_ver).release check_python_ver(min_py_ver=min_py_ver) _check_mp_install(min_mp_ver, mp_release, extras) _check_kql_prereqs() _set_kql_env_vars(extras) _run_user_settings() _set_mpconfig_var() _disp_html("<h4>Notebook pre-checks complete.</h4>") def check_python_ver(min_py_ver: Union[str, Tuple] = MIN_PYTHON_VER_DEF): """ Check the current version of the Python kernel. Parameters ---------- min_py_ver : Tuple[int, int] Minimum Python version Raises ------ RuntimeError If the Python version does not support the notebook. """ min_py_ver = _get_pkg_version(min_py_ver) sys_ver = _get_pkg_version(sys.version_info[:3]) _disp_html("Checking Python kernel version...") if sys_ver < min_py_ver: # Bandit SQL inject error found here _disp_html( f""" <h4><font color='red'>This notebook requires a later (Python) kernel version.</h4></font> Select a kernel from the notebook toolbar (above), that is Python {min_py_ver} or later (Python 3.8 recommended)<br> """ # nosec ) _disp_html( f""" Please see the <a href="{TROUBLE_SHOOTING}">TroubleShootingNotebooks</a> for more information<br><br><hr> """ ) # Bandit SQL inject error found here raise RuntimeError(f"Python {min_py_ver} or later kernel is required.") # nosec if sys_ver < _get_pkg_version("3.8"): _disp_html( "Recommended: switch to using the 'Python 3.8 - AzureML' notebook kernel" " if this is available." ) _disp_html(f"Info: Python kernel version {sys_ver} - OK<br>") def _check_mp_install( min_mp_ver: Union[str, Tuple], mp_release: Optional[str], extras: Optional[List[str]], ): """Check for and try to install required MSTICPy version.""" # Use the release ver specified in params, in the environment or # the notebook default. pkg_version = _get_pkg_version(min_mp_ver) mp_install_version = mp_release or os.environ.get("MP_TEST_VER") or str(pkg_version) check_mp_ver(min_msticpy_ver=mp_install_version, extras=extras) def check_mp_ver(min_msticpy_ver: Union[str, Tuple], extras: Optional[List[str]]): """ Check and optionally update the current version of msticpy. Parameters ---------- min_msticpy_ver : Tuple[int, int] Minimum MSTICPy version extras : Optional[List[str]], optional A list of extras required for MSTICPy Raises ------ ImportError If MSTICPy version is insufficient and we need to upgrade """ mp_min_pkg_ver = _get_pkg_version(min_msticpy_ver) _disp_html("Checking msticpy version...<br>") inst_version = _get_pkg_version(__version__) if inst_version < mp_min_pkg_ver: _disp_html( MISSING_PKG_ERR.format( package="msticpy", inst_ver=inst_version, req_ver=mp_min_pkg_ver, ) ) mp_pkg_spec = f"msticpy[{','.join(extras)}]" if extras else "msticpy" mp_pkg_spec = f"{mp_pkg_spec}>={min_msticpy_ver}" _disp_html( f"Please run the following command to upgrade MSTICPy<br>" f"<pre>!{mp_pkg_spec}</pre><br>" ) raise ImportError( "Unsupported version of MSTICPy installed", f"Installed version: {inst_version}", f"Required version: {mp_min_pkg_ver}", ) _disp_html(f"Info: msticpy version {inst_version} (>= {mp_min_pkg_ver}) - OK<br>") def _set_kql_env_vars(extras: Optional[List[str]]): """Set environment variables for Kqlmagic based on MP extras.""" jp_extended = ("azsentinel", "azuresentinel", "kql") if extras and any(extra for extra in extras if extra in jp_extended): os.environ["KQLMAGIC_EXTRAS_REQUIRE"] = "jupyter-extended" else: os.environ["KQLMAGIC_EXTRAS_REQUIRE"] = "jupyter-basic" if is_in_aml(): os.environ["KQLMAGIC_AZUREML_COMPUTE"] = _get_vm_fqdn() def _get_pkg_version(version: Union[str, Tuple]) -> Any: """Return pkg_resources parsed version from string or tuple.""" if isinstance(version, str): return parse_version(version) if isinstance(version, tuple): return parse_version(".".join(str(ver) for ver in version)) raise TypeError(f"Unparseable type version {version}") def _disp_html(text: str): """Display the HTML text.""" display(HTML(text)) def get_aml_user_folder() -> Optional[Path]: """Return the root of the user folder.""" path_parts = Path(".").absolute().parts if "Users" not in path_parts: return None # find the index of the last occurrence of "users" users_idx = len(path_parts) - path_parts[::-1].index("Users") # the user folder is one item below this if len(path_parts) < users_idx + 1: return None return Path("/".join(path_parts[: users_idx + 1])) # pylint: disable=import-outside-toplevel, unused-import, import-error def _run_user_settings(): """Import nbuser_settings.py, if it exists.""" user_folder = get_aml_user_folder() if user_folder.joinpath("nbuser_settings.py").is_file(): sys.path.append(str(user_folder)) import nbuser_settings # noqa: F401 # pylint: enable=import-outside-toplevel, unused-import, import-error def _set_mpconfig_var(): """Set MSTICPYCONFIG to file in user directory if no other found.""" mp_path_val = os.environ.get(MP_ENV_VAR) if ( # If a valid MSTICPYCONFIG value is found - return (mp_path_val and Path(mp_path_val).is_file()) # Or if there is a msticpconfig in the current folder. or Path(".").joinpath(MP_FILE).is_file() ): return # Otherwise check the user's root folder user_dir = get_aml_user_folder() mp_path = Path(user_dir).joinpath(MP_FILE) if mp_path.is_file(): # If there's a file there, set the env variable to that. os.environ[MP_ENV_VAR] = str(mp_path) # Since we have already imported msticpy to check the version # it will have already configured settings so we need to refresh. refresh_config() _disp_html( f"<br>No {MP_FILE} found. Will use {MP_FILE} in user folder {user_dir}<br>" ) def _get_vm_metadata() -> Mapping[str, Any]: """Use local request to get VM metadata.""" vm_uri = "http://169.254.169.254/metadata/instance?api-version=2017-08-01" req = urllib.request.Request(vm_uri) # type: ignore req.add_header("Metadata", "true") # Bandit warning on urlopen - Fixed private URL with urllib.request.urlopen(req) as resp: # type: ignore # nosec metadata = json.loads(resp.read()) return metadata if isinstance(metadata, dict) else {} def _get_vm_fqdn() -> str: """Get the FQDN of the host.""" az_region = _get_vm_metadata().get("compute", {}).get("location") return ".".join( [ socket.gethostname(), az_region, "instances.azureml.ms", ] if az_region else "" ) def _check_kql_prereqs(): """ Check and install packages for Kqlmagic/msal_extensions. Notes ----- Kqlmagic may trigger warnings about a missing PyGObject package and some system library dependencies. To fix this do the following:<br> From a notebook run: %pip uninstall enum34 !sudo apt-get --yes install libgirepository1.0-dev !sudo apt-get --yes install gir1.2-secret-1 %pip install pygobject You can also do this from a terminal - but ensure that you've activated the environment corresponding to the kernel you are using prior to running the pip commands. # Install the libgi dependency sudo apt install libgirepository1.0-dev sudo apt install gir1.2-secret-1 # activate the environment # conda activate azureml_py38 # source ./env_path/scripts/activate # Uninstall enum34 python -m pip uninstall enum34 # Install pygobject python -m install pygobject """ if not is_in_aml(): return try: # If this successfully imports, we are ok # pylint: disable=import-outside-toplevel import gi # pylint: enable=import-outside-toplevel del gi except ImportError: # Check for system packages ip_shell = get_ipython() if not ip_shell: return apt_list = ip_shell.run_line_magic("sx", "apt list") apt_list = [apt.split("/", maxsplit=1)[0] for apt in apt_list] missing_lx_pkg = [ apt_pkg for apt_pkg in ("libgirepository1.0-dev", "gir1.2-secret-1") if apt_pkg not in apt_list ] if missing_lx_pkg: _disp_html( "Kqlmagic/msal-extensions pre-requisite PyGObject not installed." ) _disp_html( "To prevent warnings when loading the Kqlmagic data provider," " Please run the following command:<br>" "!conda install --yes -c conda-forge pygobject<br>" )
33.413613
90
0.645096
# ------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for # license information. # -------------------------------------------------------------------------- """Checker functions for Azure ML notebooks.""" import json import os import socket import sys import urllib from pathlib import Path from typing import Any, List, Mapping, Optional, Tuple, Union from IPython import get_ipython from IPython.display import HTML, display from pkg_resources import parse_version from .._version import VERSION from ..common.pkg_config import refresh_config __version__ = VERSION AZ_GET_STARTED = ( "https://github.com/Azure/Azure-Sentinel-Notebooks/blob/master/A%20Getting" "%20Started%20Guide%20For%20Azure%20Sentinel%20ML%20Notebooks.ipynb" ) TROUBLE_SHOOTING = ( "https://github.com/Azure/Azure-Sentinel-Notebooks/blob/master/" "TroubleShootingNotebooks.ipynb" ) MISSING_PKG_ERR = """ <h4><font color='orange'>The package '<b>{package}</b>' is not installed or has an unsupported version (installed version = '{inst_ver}')</font></h4> Please install or upgrade before continuing: required version is {package}>={req_ver} """ MP_INSTALL_FAILED = """ <h4><font color='red'>The notebook may not run correctly without the correct version of '<b>{pkg}</b>' ({ver} or later).</font></h4> Please see the <a href="{nbk_uri}"> Getting Started Guide For Azure Sentinel ML Notebooks</a></b> for more information<br><hr> """ RELOAD_MP = """ <h4><font color='orange'>Kernel restart needed</h4> An error was detected trying to load the updated version of MSTICPy.<br> Please restart the notebook kernel and re-run this cell - it should run without error. """ MIN_PYTHON_VER_DEF = "3.6" MSTICPY_REQ_VERSION = __version__ VER_RGX = r"(?P<maj>\d+)\.(?P<min>\d+).(?P<pnt>\d+)(?P<suff>.*)" MP_ENV_VAR = "MSTICPYCONFIG" MP_FILE = "msticpyconfig.yaml" NB_CHECK_URI = ( "https://raw.githubusercontent.com/Azure/Azure-Sentinel-" "Notebooks/master/utils/nb_check.py" ) def is_in_aml(): """Return True if running in Azure Machine Learning.""" return os.environ.get("APPSETTING_WEBSITE_SITE_NAME") == "AMLComputeInstance" def check_versions( min_py_ver: Union[str, Tuple] = MIN_PYTHON_VER_DEF, min_mp_ver: Union[str, Tuple] = MSTICPY_REQ_VERSION, extras: Optional[List[str]] = None, mp_release: Optional[str] = None, **kwargs, ): """ Check the current versions of the Python kernel and MSTICPy. Parameters ---------- min_py_ver : Union[Tuple[int, int], str] Minimum Python version min_mp_ver : Union[Tuple[int, int], str] Minimum MSTICPy version extras : Optional[List[str]], optional A list of extras required for MSTICPy mp_release : Optional[str], optional Override the MSTICPy release version. This can also be specified in the environment variable 'MP_TEST_VER' Raises ------ RuntimeError If the Python version does not support the notebook. If the MSTICPy version does not support the notebook and the user chose not to upgrade """ del kwargs _disp_html("<h4>Starting notebook pre-checks...</h4>") if isinstance(min_py_ver, str): min_py_ver = _get_pkg_version(min_py_ver).release check_python_ver(min_py_ver=min_py_ver) _check_mp_install(min_mp_ver, mp_release, extras) _check_kql_prereqs() _set_kql_env_vars(extras) _run_user_settings() _set_mpconfig_var() _disp_html("<h4>Notebook pre-checks complete.</h4>") def check_python_ver(min_py_ver: Union[str, Tuple] = MIN_PYTHON_VER_DEF): """ Check the current version of the Python kernel. Parameters ---------- min_py_ver : Tuple[int, int] Minimum Python version Raises ------ RuntimeError If the Python version does not support the notebook. """ min_py_ver = _get_pkg_version(min_py_ver) sys_ver = _get_pkg_version(sys.version_info[:3]) _disp_html("Checking Python kernel version...") if sys_ver < min_py_ver: # Bandit SQL inject error found here _disp_html( f""" <h4><font color='red'>This notebook requires a later (Python) kernel version.</h4></font> Select a kernel from the notebook toolbar (above), that is Python {min_py_ver} or later (Python 3.8 recommended)<br> """ # nosec ) _disp_html( f""" Please see the <a href="{TROUBLE_SHOOTING}">TroubleShootingNotebooks</a> for more information<br><br><hr> """ ) # Bandit SQL inject error found here raise RuntimeError(f"Python {min_py_ver} or later kernel is required.") # nosec if sys_ver < _get_pkg_version("3.8"): _disp_html( "Recommended: switch to using the 'Python 3.8 - AzureML' notebook kernel" " if this is available." ) _disp_html(f"Info: Python kernel version {sys_ver} - OK<br>") def _check_mp_install( min_mp_ver: Union[str, Tuple], mp_release: Optional[str], extras: Optional[List[str]], ): """Check for and try to install required MSTICPy version.""" # Use the release ver specified in params, in the environment or # the notebook default. pkg_version = _get_pkg_version(min_mp_ver) mp_install_version = mp_release or os.environ.get("MP_TEST_VER") or str(pkg_version) check_mp_ver(min_msticpy_ver=mp_install_version, extras=extras) def check_mp_ver(min_msticpy_ver: Union[str, Tuple], extras: Optional[List[str]]): """ Check and optionally update the current version of msticpy. Parameters ---------- min_msticpy_ver : Tuple[int, int] Minimum MSTICPy version extras : Optional[List[str]], optional A list of extras required for MSTICPy Raises ------ ImportError If MSTICPy version is insufficient and we need to upgrade """ mp_min_pkg_ver = _get_pkg_version(min_msticpy_ver) _disp_html("Checking msticpy version...<br>") inst_version = _get_pkg_version(__version__) if inst_version < mp_min_pkg_ver: _disp_html( MISSING_PKG_ERR.format( package="msticpy", inst_ver=inst_version, req_ver=mp_min_pkg_ver, ) ) mp_pkg_spec = f"msticpy[{','.join(extras)}]" if extras else "msticpy" mp_pkg_spec = f"{mp_pkg_spec}>={min_msticpy_ver}" _disp_html( f"Please run the following command to upgrade MSTICPy<br>" f"<pre>!{mp_pkg_spec}</pre><br>" ) raise ImportError( "Unsupported version of MSTICPy installed", f"Installed version: {inst_version}", f"Required version: {mp_min_pkg_ver}", ) _disp_html(f"Info: msticpy version {inst_version} (>= {mp_min_pkg_ver}) - OK<br>") def _set_kql_env_vars(extras: Optional[List[str]]): """Set environment variables for Kqlmagic based on MP extras.""" jp_extended = ("azsentinel", "azuresentinel", "kql") if extras and any(extra for extra in extras if extra in jp_extended): os.environ["KQLMAGIC_EXTRAS_REQUIRE"] = "jupyter-extended" else: os.environ["KQLMAGIC_EXTRAS_REQUIRE"] = "jupyter-basic" if is_in_aml(): os.environ["KQLMAGIC_AZUREML_COMPUTE"] = _get_vm_fqdn() def _get_pkg_version(version: Union[str, Tuple]) -> Any: """Return pkg_resources parsed version from string or tuple.""" if isinstance(version, str): return parse_version(version) if isinstance(version, tuple): return parse_version(".".join(str(ver) for ver in version)) raise TypeError(f"Unparseable type version {version}") def _disp_html(text: str): """Display the HTML text.""" display(HTML(text)) def get_aml_user_folder() -> Optional[Path]: """Return the root of the user folder.""" path_parts = Path(".").absolute().parts if "Users" not in path_parts: return None # find the index of the last occurrence of "users" users_idx = len(path_parts) - path_parts[::-1].index("Users") # the user folder is one item below this if len(path_parts) < users_idx + 1: return None return Path("/".join(path_parts[: users_idx + 1])) # pylint: disable=import-outside-toplevel, unused-import, import-error def _run_user_settings(): """Import nbuser_settings.py, if it exists.""" user_folder = get_aml_user_folder() if user_folder.joinpath("nbuser_settings.py").is_file(): sys.path.append(str(user_folder)) import nbuser_settings # noqa: F401 # pylint: enable=import-outside-toplevel, unused-import, import-error def _set_mpconfig_var(): """Set MSTICPYCONFIG to file in user directory if no other found.""" mp_path_val = os.environ.get(MP_ENV_VAR) if ( # If a valid MSTICPYCONFIG value is found - return (mp_path_val and Path(mp_path_val).is_file()) # Or if there is a msticpconfig in the current folder. or Path(".").joinpath(MP_FILE).is_file() ): return # Otherwise check the user's root folder user_dir = get_aml_user_folder() mp_path = Path(user_dir).joinpath(MP_FILE) if mp_path.is_file(): # If there's a file there, set the env variable to that. os.environ[MP_ENV_VAR] = str(mp_path) # Since we have already imported msticpy to check the version # it will have already configured settings so we need to refresh. refresh_config() _disp_html( f"<br>No {MP_FILE} found. Will use {MP_FILE} in user folder {user_dir}<br>" ) def _get_vm_metadata() -> Mapping[str, Any]: """Use local request to get VM metadata.""" vm_uri = "http://169.254.169.254/metadata/instance?api-version=2017-08-01" req = urllib.request.Request(vm_uri) # type: ignore req.add_header("Metadata", "true") # Bandit warning on urlopen - Fixed private URL with urllib.request.urlopen(req) as resp: # type: ignore # nosec metadata = json.loads(resp.read()) return metadata if isinstance(metadata, dict) else {} def _get_vm_fqdn() -> str: """Get the FQDN of the host.""" az_region = _get_vm_metadata().get("compute", {}).get("location") return ".".join( [ socket.gethostname(), az_region, "instances.azureml.ms", ] if az_region else "" ) def _check_kql_prereqs(): """ Check and install packages for Kqlmagic/msal_extensions. Notes ----- Kqlmagic may trigger warnings about a missing PyGObject package and some system library dependencies. To fix this do the following:<br> From a notebook run: %pip uninstall enum34 !sudo apt-get --yes install libgirepository1.0-dev !sudo apt-get --yes install gir1.2-secret-1 %pip install pygobject You can also do this from a terminal - but ensure that you've activated the environment corresponding to the kernel you are using prior to running the pip commands. # Install the libgi dependency sudo apt install libgirepository1.0-dev sudo apt install gir1.2-secret-1 # activate the environment # conda activate azureml_py38 # source ./env_path/scripts/activate # Uninstall enum34 python -m pip uninstall enum34 # Install pygobject python -m install pygobject """ if not is_in_aml(): return try: # If this successfully imports, we are ok # pylint: disable=import-outside-toplevel import gi # pylint: enable=import-outside-toplevel del gi except ImportError: # Check for system packages ip_shell = get_ipython() if not ip_shell: return apt_list = ip_shell.run_line_magic("sx", "apt list") apt_list = [apt.split("/", maxsplit=1)[0] for apt in apt_list] missing_lx_pkg = [ apt_pkg for apt_pkg in ("libgirepository1.0-dev", "gir1.2-secret-1") if apt_pkg not in apt_list ] if missing_lx_pkg: _disp_html( "Kqlmagic/msal-extensions pre-requisite PyGObject not installed." ) _disp_html( "To prevent warnings when loading the Kqlmagic data provider," " Please run the following command:<br>" "!conda install --yes -c conda-forge pygobject<br>" )
0
0
0
b65206728e5f3f6cbab0f87066d7ed1dc8784f63
4,423
py
Python
konfuzio_sdk/urls.py
atraining/document-ai-python-sdk
ea2df68af0254053da7e6f4c6e2c2df6d7911233
[ "MIT" ]
null
null
null
konfuzio_sdk/urls.py
atraining/document-ai-python-sdk
ea2df68af0254053da7e6f4c6e2c2df6d7911233
[ "MIT" ]
null
null
null
konfuzio_sdk/urls.py
atraining/document-ai-python-sdk
ea2df68af0254053da7e6f4c6e2c2df6d7911233
[ "MIT" ]
null
null
null
"""Endpoints of the Konfuzio Host.""" import logging from konfuzio_sdk import KONFUZIO_HOST, KONFUZIO_PROJECT_ID logger = logging.getLogger(__name__) def get_auth_token_url() -> str: """ Generate URL that creates an authentication token for the user. :return: URL to generate the token. """ return f"{KONFUZIO_HOST}/api/token-auth/" def get_project_list_url() -> str: """ Generate URL to load all the projects available for the user. :return: URL to get all the projects for the user. """ return f"{KONFUZIO_HOST}/api/projects/" def create_new_project_url() -> str: """ Generate URL to create a new project. :return: URL to create a new project. """ return f"{KONFUZIO_HOST}/api/projects/" def get_documents_meta_url() -> str: """ Generate URL to load meta information about documents. :return: URL to get all the documents details. """ return f"{KONFUZIO_HOST}/api/projects/{KONFUZIO_PROJECT_ID}/docs/" def get_upload_document_url() -> str: """ Generate URL to upload a document. :return: URL to upload a document """ return f"{KONFUZIO_HOST}/api/v2/docs/" def get_create_label_url() -> str: """ Generate URL to create a label. :return: URL to create a label. """ return f"{KONFUZIO_HOST}/api/v2/labels/" def get_document_ocr_file_url(document_id: int) -> str: """ Generate URL to access OCR version of document. :param document_id: ID of the document as integer :return: URL to get OCR document file. """ return f'{KONFUZIO_HOST}/doc/show/{document_id}/' def get_document_original_file_url(document_id: int) -> str: """ Generate URL to access original version of the document. :param document_id: ID of the document as integer :return: URL to get the original document """ return f'{KONFUZIO_HOST}/doc/show-original/{document_id}/' def get_document_api_details_url(document_id: int, include_extractions: bool = False, extra_fields='bbox') -> str: """ Generate URL to access document details of one document in a project. :param document_id: ID of the document as integer :param include_extractions: Bool to include extractions :param extra_fields: Extra information to include in the response :return: URL to get document details """ return ( f'{KONFUZIO_HOST}/api/projects/{KONFUZIO_PROJECT_ID}/docs/{document_id}/' f'?include_extractions={include_extractions}&extra_fields={extra_fields}' ) def get_project_url(project_id=None) -> str: """ Generate URL to get project details. :param project_id: ID of the project :return: URL to get project details. """ project_id = project_id if project_id else KONFUZIO_PROJECT_ID return f'{KONFUZIO_HOST}/api/projects/{project_id}/' def post_project_api_document_annotations_url(document_id: int) -> str: """ Add new annotations to a document. :param document_id: ID of the document as integer :return: URL for adding annotations to a document """ return f'{KONFUZIO_HOST}/api/projects/{KONFUZIO_PROJECT_ID}/docs/{document_id}/annotations/' def delete_project_api_document_annotations_url(document_id: int, annotation_id: int) -> str: """ Delete the annotation of a document. :param document_id: ID of the document as integer :param annotation_id: ID of the annotation as integer :return: URL to delete annotation of a document """ return f'{KONFUZIO_HOST}/api/projects/{KONFUZIO_PROJECT_ID}/docs/{document_id}/' f'annotations/{annotation_id}/' def get_document_result_v1(document_id: int) -> str: """ Generate URL to access web interface for labeling of this project. :param document_id: ID of the document as integer :return: URL for labeling of the project. """ return f'{KONFUZIO_HOST}/api/v1/docs/{document_id}/' def get_document_segmentation_details_url(document_id: int, project_id, action='segmentation') -> str: """ Generate URL to get the segmentation results of a document. :param document_id: ID of the document as integer :param project_id: ID of the project :param action: Action from where to get the results :return: URL to access the segmentation results of a document """ return f'https://app.konfuzio.com/api/projects/{project_id}/docs/{document_id}/{action}/'
29.291391
116
0.703821
"""Endpoints of the Konfuzio Host.""" import logging from konfuzio_sdk import KONFUZIO_HOST, KONFUZIO_PROJECT_ID logger = logging.getLogger(__name__) def get_auth_token_url() -> str: """ Generate URL that creates an authentication token for the user. :return: URL to generate the token. """ return f"{KONFUZIO_HOST}/api/token-auth/" def get_project_list_url() -> str: """ Generate URL to load all the projects available for the user. :return: URL to get all the projects for the user. """ return f"{KONFUZIO_HOST}/api/projects/" def create_new_project_url() -> str: """ Generate URL to create a new project. :return: URL to create a new project. """ return f"{KONFUZIO_HOST}/api/projects/" def get_documents_meta_url() -> str: """ Generate URL to load meta information about documents. :return: URL to get all the documents details. """ return f"{KONFUZIO_HOST}/api/projects/{KONFUZIO_PROJECT_ID}/docs/" def get_upload_document_url() -> str: """ Generate URL to upload a document. :return: URL to upload a document """ return f"{KONFUZIO_HOST}/api/v2/docs/" def get_create_label_url() -> str: """ Generate URL to create a label. :return: URL to create a label. """ return f"{KONFUZIO_HOST}/api/v2/labels/" def get_document_ocr_file_url(document_id: int) -> str: """ Generate URL to access OCR version of document. :param document_id: ID of the document as integer :return: URL to get OCR document file. """ return f'{KONFUZIO_HOST}/doc/show/{document_id}/' def get_document_original_file_url(document_id: int) -> str: """ Generate URL to access original version of the document. :param document_id: ID of the document as integer :return: URL to get the original document """ return f'{KONFUZIO_HOST}/doc/show-original/{document_id}/' def get_document_api_details_url(document_id: int, include_extractions: bool = False, extra_fields='bbox') -> str: """ Generate URL to access document details of one document in a project. :param document_id: ID of the document as integer :param include_extractions: Bool to include extractions :param extra_fields: Extra information to include in the response :return: URL to get document details """ return ( f'{KONFUZIO_HOST}/api/projects/{KONFUZIO_PROJECT_ID}/docs/{document_id}/' f'?include_extractions={include_extractions}&extra_fields={extra_fields}' ) def get_project_url(project_id=None) -> str: """ Generate URL to get project details. :param project_id: ID of the project :return: URL to get project details. """ project_id = project_id if project_id else KONFUZIO_PROJECT_ID return f'{KONFUZIO_HOST}/api/projects/{project_id}/' def post_project_api_document_annotations_url(document_id: int) -> str: """ Add new annotations to a document. :param document_id: ID of the document as integer :return: URL for adding annotations to a document """ return f'{KONFUZIO_HOST}/api/projects/{KONFUZIO_PROJECT_ID}/docs/{document_id}/annotations/' def delete_project_api_document_annotations_url(document_id: int, annotation_id: int) -> str: """ Delete the annotation of a document. :param document_id: ID of the document as integer :param annotation_id: ID of the annotation as integer :return: URL to delete annotation of a document """ return f'{KONFUZIO_HOST}/api/projects/{KONFUZIO_PROJECT_ID}/docs/{document_id}/' f'annotations/{annotation_id}/' def get_document_result_v1(document_id: int) -> str: """ Generate URL to access web interface for labeling of this project. :param document_id: ID of the document as integer :return: URL for labeling of the project. """ return f'{KONFUZIO_HOST}/api/v1/docs/{document_id}/' def get_document_segmentation_details_url(document_id: int, project_id, action='segmentation') -> str: """ Generate URL to get the segmentation results of a document. :param document_id: ID of the document as integer :param project_id: ID of the project :param action: Action from where to get the results :return: URL to access the segmentation results of a document """ return f'https://app.konfuzio.com/api/projects/{project_id}/docs/{document_id}/{action}/'
0
0
0
5a5179184e11bd69c115e048377711a912dc3761
440
py
Python
Web/user/models.py
Pancras-Zheng/Graduation-Project
5d1ae78d5e890fa7ecc2456d0d3d22bdea7c29f0
[ "MIT" ]
37
2018-01-25T03:14:24.000Z
2021-12-15T10:02:37.000Z
Web/user/models.py
Pancras-Zheng/Graduation-Project
5d1ae78d5e890fa7ecc2456d0d3d22bdea7c29f0
[ "MIT" ]
null
null
null
Web/user/models.py
Pancras-Zheng/Graduation-Project
5d1ae78d5e890fa7ecc2456d0d3d22bdea7c29f0
[ "MIT" ]
10
2019-04-11T07:27:10.000Z
2021-11-24T11:16:14.000Z
from django.db import models from django.contrib.auth.models import AbstractUser from django.utils.translation import ugettext_lazy as _ from django.contrib.auth.base_user import BaseUserManager # Create your models here.
27.5
69
0.747727
from django.db import models from django.contrib.auth.models import AbstractUser from django.utils.translation import ugettext_lazy as _ from django.contrib.auth.base_user import BaseUserManager # Create your models here. class User(AbstractUser): nickname = models.CharField(_('昵称'),max_length=50,blank=True) info = models.CharField(_('备注'),max_length=200,blank=True) class Meta(AbstractUser.Meta): pass
0
200
23
db4b3216850356cdd188fbda35706bb2acbe536c
14,096
py
Python
src/huggingface_hub/commands/user.py
FrancescoSaverioZuppichini/huggingface_hub
9e7ffda07ddcd668302a61156bcae0d9ec97a26e
[ "Apache-2.0" ]
1
2022-03-28T14:15:24.000Z
2022-03-28T14:15:24.000Z
src/huggingface_hub/commands/user.py
osanseviero/huggingface_hub
b1cf2d8f47088d3fce2244058d222a4d8234b3ab
[ "Apache-2.0" ]
null
null
null
src/huggingface_hub/commands/user.py
osanseviero/huggingface_hub
b1cf2d8f47088d3fce2244058d222a4d8234b3ab
[ "Apache-2.0" ]
null
null
null
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import subprocess from argparse import ArgumentParser from getpass import getpass from typing import List, Union from huggingface_hub.commands import BaseHuggingfaceCLICommand from huggingface_hub.constants import ( REPO_TYPES, REPO_TYPES_URL_PREFIXES, SPACES_SDK_TYPES, ) from huggingface_hub.hf_api import HfApi, HfFolder from requests.exceptions import HTTPError class ANSI: """ Helper for en.wikipedia.org/wiki/ANSI_escape_code """ _bold = "\u001b[1m" _red = "\u001b[31m" _gray = "\u001b[90m" _reset = "\u001b[0m" @classmethod @classmethod @classmethod def tabulate(rows: List[List[Union[str, int]]], headers: List[str]) -> str: """ Inspired by: - stackoverflow.com/a/8356620/593036 - stackoverflow.com/questions/9535954/printing-lists-as-tabular-data """ col_widths = [max(len(str(x)) for x in col) for col in zip(*rows, headers)] row_format = ("{{:{}}} " * len(headers)).format(*col_widths) lines = [] lines.append(row_format.format(*headers)) lines.append(row_format.format(*["-" * w for w in col_widths])) for row in rows: lines.append(row_format.format(*row)) return "\n".join(lines) NOTEBOOK_LOGIN_PASSWORD_HTML = """<center> <img src=https://huggingface.co/front/assets/huggingface_logo-noborder.svg alt='Hugging Face'> <br> Immediately click login after typing your password or it might be stored in plain text in this notebook file. </center>""" NOTEBOOK_LOGIN_TOKEN_HTML_START = """<center> <img src=https://huggingface.co/front/assets/huggingface_logo-noborder.svg alt='Hugging Face'> <br> Copy a token from <a href="https://huggingface.co/settings/tokens" target="_blank">your Hugging Face tokens page</a> and paste it below. <br> Immediately click login after copying your token or it might be stored in plain text in this notebook file. </center>""" NOTEBOOK_LOGIN_TOKEN_HTML_END = """ <b>Pro Tip:</b> If you don't already have one, you can create a dedicated 'notebooks' token with 'write' access, that you can then easily reuse for all notebooks. <br> <i>Logging in with your username and password is deprecated and won't be possible anymore in the near future. You can still use them for now by clicking below.</i> </center>""" def notebook_login(): """ Displays a widget to login to the HF website and store the token. """ try: import ipywidgets.widgets as widgets from IPython.display import clear_output, display except ImportError: raise ImportError( "The `notebook_login` function can only be used in a notebook (Jupyter or Colab) and you need the " "`ipywdidgets` module: `pip install ipywidgets`." ) box_layout = widgets.Layout( display="flex", flex_flow="column", align_items="center", width="50%" ) token_widget = widgets.Password(description="Token:") token_finish_button = widgets.Button(description="Login") switch_button = widgets.Button(description="Use password") login_token_widget = widgets.VBox( [ widgets.HTML(NOTEBOOK_LOGIN_TOKEN_HTML_START), token_widget, token_finish_button, widgets.HTML(NOTEBOOK_LOGIN_TOKEN_HTML_END), switch_button, ], layout=box_layout, ) display(login_token_widget) # Deprecated page for login input_widget = widgets.Text(description="Username:") password_widget = widgets.Password(description="Password:") password_finish_button = widgets.Button(description="Login") login_password_widget = widgets.VBox( [ widgets.HTML(value=NOTEBOOK_LOGIN_PASSWORD_HTML), widgets.HBox([input_widget, password_widget]), password_finish_button, ], layout=box_layout, ) # On click events token_finish_button.on_click(login_token_event) password_finish_button.on_click(login_password_event) switch_button.on_click(switch_event)
35.064677
135
0.605065
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import subprocess from argparse import ArgumentParser from getpass import getpass from typing import List, Union from huggingface_hub.commands import BaseHuggingfaceCLICommand from huggingface_hub.constants import ( REPO_TYPES, REPO_TYPES_URL_PREFIXES, SPACES_SDK_TYPES, ) from huggingface_hub.hf_api import HfApi, HfFolder from requests.exceptions import HTTPError class UserCommands(BaseHuggingfaceCLICommand): @staticmethod def register_subcommand(parser: ArgumentParser): login_parser = parser.add_parser( "login", help="Log in using the same credentials as on huggingface.co" ) login_parser.set_defaults(func=lambda args: LoginCommand(args)) whoami_parser = parser.add_parser( "whoami", help="Find out which huggingface.co account you are logged in as." ) whoami_parser.set_defaults(func=lambda args: WhoamiCommand(args)) logout_parser = parser.add_parser("logout", help="Log out") logout_parser.set_defaults(func=lambda args: LogoutCommand(args)) # new system: git-based repo system repo_parser = parser.add_parser( "repo", help="{create, ls-files} Commands to interact with your huggingface.co repos.", ) repo_subparsers = repo_parser.add_subparsers( help="huggingface.co repos related commands" ) repo_create_parser = repo_subparsers.add_parser( "create", help="Create a new repo on huggingface.co" ) repo_create_parser.add_argument( "name", type=str, help="Name for your repo. Will be namespaced under your username to build the repo id.", ) repo_create_parser.add_argument( "--type", type=str, help='Optional: repo_type: set to "dataset" or "space" if creating a dataset or space, default is model.', ) repo_create_parser.add_argument( "--organization", type=str, help="Optional: organization namespace." ) repo_create_parser.add_argument( "--space_sdk", type=str, help='Optional: Hugging Face Spaces SDK type. Required when --type is set to "space".', choices=SPACES_SDK_TYPES, ) repo_create_parser.add_argument( "-y", "--yes", action="store_true", help="Optional: answer Yes to the prompt", ) repo_create_parser.set_defaults(func=lambda args: RepoCreateCommand(args)) class ANSI: """ Helper for en.wikipedia.org/wiki/ANSI_escape_code """ _bold = "\u001b[1m" _red = "\u001b[31m" _gray = "\u001b[90m" _reset = "\u001b[0m" @classmethod def bold(cls, s): return f"{cls._bold}{s}{cls._reset}" @classmethod def red(cls, s): return f"{cls._bold + cls._red}{s}{cls._reset}" @classmethod def gray(cls, s): return f"{cls._gray}{s}{cls._reset}" def tabulate(rows: List[List[Union[str, int]]], headers: List[str]) -> str: """ Inspired by: - stackoverflow.com/a/8356620/593036 - stackoverflow.com/questions/9535954/printing-lists-as-tabular-data """ col_widths = [max(len(str(x)) for x in col) for col in zip(*rows, headers)] row_format = ("{{:{}}} " * len(headers)).format(*col_widths) lines = [] lines.append(row_format.format(*headers)) lines.append(row_format.format(*["-" * w for w in col_widths])) for row in rows: lines.append(row_format.format(*row)) return "\n".join(lines) def currently_setup_credential_helpers(directory=None) -> List[str]: try: output = subprocess.run( "git config --list".split(), stderr=subprocess.PIPE, stdout=subprocess.PIPE, encoding="utf-8", check=True, cwd=directory, ).stdout.split("\n") current_credential_helpers = [] for line in output: if "credential.helper" in line: current_credential_helpers.append(line.split("=")[-1]) except subprocess.CalledProcessError as exc: raise EnvironmentError(exc.stderr) return current_credential_helpers class BaseUserCommand: def __init__(self, args): self.args = args self._api = HfApi() class LoginCommand(BaseUserCommand): def run(self): print( # docstyle-ignore """ _| _| _| _| _|_|_| _|_|_| _|_|_| _| _| _|_|_| _|_|_|_| _|_| _|_|_| _|_|_|_| _| _| _| _| _| _| _| _|_| _| _| _| _| _| _| _| _|_|_|_| _| _| _| _|_| _| _|_| _| _| _| _| _| _|_| _|_|_| _|_|_|_| _| _|_|_| _| _| _| _| _| _| _| _| _| _| _|_| _| _| _| _| _| _| _| _| _| _|_| _|_|_| _|_|_| _|_|_| _| _| _|_|_| _| _| _| _|_|_| _|_|_|_| To login, `huggingface_hub` now requires a token generated from https://huggingface.co/settings/tokens. (Deprecated, will be removed in v0.3.0) To login with username and password instead, interrupt with Ctrl+C. """ ) try: token = getpass("Token: ") _login(self._api, token=token) except KeyboardInterrupt: username = input("\rUsername: ") password = getpass() _login(self._api, username, password) class WhoamiCommand(BaseUserCommand): def run(self): token = HfFolder.get_token() if token is None: print("Not logged in") exit() try: info = self._api.whoami(token) print(info["name"]) orgs = [org["name"] for org in info["orgs"]] if orgs: print(ANSI.bold("orgs: "), ",".join(orgs)) except HTTPError as e: print(e) print(ANSI.red(e.response.text)) exit(1) class LogoutCommand(BaseUserCommand): def run(self): token = HfFolder.get_token() if token is None: print("Not logged in") exit() HfFolder.delete_token() HfApi.unset_access_token() try: self._api.logout(token) except HTTPError as e: # Logging out with an access token will return a client error. if not e.response.status_code == 400: raise e print("Successfully logged out.") class RepoCreateCommand(BaseUserCommand): def run(self): token = HfFolder.get_token() if token is None: print("Not logged in") exit(1) try: stdout = subprocess.check_output(["git", "--version"]).decode("utf-8") print(ANSI.gray(stdout.strip())) except FileNotFoundError: print("Looks like you do not have git installed, please install.") try: stdout = subprocess.check_output(["git-lfs", "--version"]).decode("utf-8") print(ANSI.gray(stdout.strip())) except FileNotFoundError: print( ANSI.red( "Looks like you do not have git-lfs installed, please install." " You can install from https://git-lfs.github.com/." " Then run `git lfs install` (you only have to do this once)." ) ) print("") user = self._api.whoami(token)["name"] namespace = ( self.args.organization if self.args.organization is not None else user ) repo_id = f"{namespace}/{self.args.name}" if self.args.type not in REPO_TYPES: print("Invalid repo --type") exit(1) if self.args.type in REPO_TYPES_URL_PREFIXES: prefixed_repo_id = REPO_TYPES_URL_PREFIXES[self.args.type] + repo_id else: prefixed_repo_id = repo_id print(f"You are about to create {ANSI.bold(prefixed_repo_id)}") if not self.args.yes: choice = input("Proceed? [Y/n] ").lower() if not (choice == "" or choice == "y" or choice == "yes"): print("Abort") exit() try: url = self._api.create_repo( repo_id=repo_id, token=token, repo_type=self.args.type, space_sdk=self.args.space_sdk, ) except HTTPError as e: print(e) print(ANSI.red(e.response.text)) exit(1) print("\nYour repo now lives at:") print(f" {ANSI.bold(url)}") print( "\nYou can clone it locally with the command below," " and commit/push as usual." ) print(f"\n git clone {url}") print("") NOTEBOOK_LOGIN_PASSWORD_HTML = """<center> <img src=https://huggingface.co/front/assets/huggingface_logo-noborder.svg alt='Hugging Face'> <br> Immediately click login after typing your password or it might be stored in plain text in this notebook file. </center>""" NOTEBOOK_LOGIN_TOKEN_HTML_START = """<center> <img src=https://huggingface.co/front/assets/huggingface_logo-noborder.svg alt='Hugging Face'> <br> Copy a token from <a href="https://huggingface.co/settings/tokens" target="_blank">your Hugging Face tokens page</a> and paste it below. <br> Immediately click login after copying your token or it might be stored in plain text in this notebook file. </center>""" NOTEBOOK_LOGIN_TOKEN_HTML_END = """ <b>Pro Tip:</b> If you don't already have one, you can create a dedicated 'notebooks' token with 'write' access, that you can then easily reuse for all notebooks. <br> <i>Logging in with your username and password is deprecated and won't be possible anymore in the near future. You can still use them for now by clicking below.</i> </center>""" def notebook_login(): """ Displays a widget to login to the HF website and store the token. """ try: import ipywidgets.widgets as widgets from IPython.display import clear_output, display except ImportError: raise ImportError( "The `notebook_login` function can only be used in a notebook (Jupyter or Colab) and you need the " "`ipywdidgets` module: `pip install ipywidgets`." ) box_layout = widgets.Layout( display="flex", flex_flow="column", align_items="center", width="50%" ) token_widget = widgets.Password(description="Token:") token_finish_button = widgets.Button(description="Login") switch_button = widgets.Button(description="Use password") login_token_widget = widgets.VBox( [ widgets.HTML(NOTEBOOK_LOGIN_TOKEN_HTML_START), token_widget, token_finish_button, widgets.HTML(NOTEBOOK_LOGIN_TOKEN_HTML_END), switch_button, ], layout=box_layout, ) display(login_token_widget) # Deprecated page for login input_widget = widgets.Text(description="Username:") password_widget = widgets.Password(description="Password:") password_finish_button = widgets.Button(description="Login") login_password_widget = widgets.VBox( [ widgets.HTML(value=NOTEBOOK_LOGIN_PASSWORD_HTML), widgets.HBox([input_widget, password_widget]), password_finish_button, ], layout=box_layout, ) # On click events def login_token_event(t): token = token_widget.value # Erase token and clear value to make sure it's not saved in the notebook. token_widget.value = "" clear_output() _login(HfApi(), token=token) token_finish_button.on_click(login_token_event) def login_password_event(t): username = input_widget.value password = password_widget.value # Erase password and clear value to make sure it's not saved in the notebook. password_widget.value = "" clear_output() _login(HfApi(), username=username, password=password) password_finish_button.on_click(login_password_event) def switch_event(t): clear_output() display(login_password_widget) switch_button.on_click(switch_event) def _login(hf_api, username=None, password=None, token=None): if token is None: try: token = hf_api.login(username, password) except HTTPError as e: # probably invalid credentials, display error message. print(e) print(ANSI.red(e.response.text)) exit(1) else: token, name = hf_api._validate_or_retrieve_token(token) hf_api.set_access_token(token) HfFolder.save_token(token) print("Login successful") print("Your token has been saved to", HfFolder.path_token) helpers = currently_setup_credential_helpers() if "store" not in helpers: print( ANSI.red( "Authenticated through git-credential store but this isn't the helper defined on your machine.\nYou " "might have to re-authenticate when pushing to the Hugging Face Hub. Run the following command in your " "terminal in case you want to set this credential helper as the default\n\ngit config --global credential.helper store" ) )
8,862
137
472
293ccee023fc91f0aa073e2ba442d3ed89f6b0d4
8,365
py
Python
src/ramstk/logger.py
TahaEntezari/ramstk
f82e5b31ef5c4e33cc02252263247b99a9abe129
[ "BSD-3-Clause" ]
26
2019-05-15T02:03:47.000Z
2022-02-21T07:28:11.000Z
src/ramstk/logger.py
TahaEntezari/ramstk
f82e5b31ef5c4e33cc02252263247b99a9abe129
[ "BSD-3-Clause" ]
815
2019-05-10T12:31:52.000Z
2022-03-31T12:56:26.000Z
src/ramstk/logger.py
TahaEntezari/ramstk
f82e5b31ef5c4e33cc02252263247b99a9abe129
[ "BSD-3-Clause" ]
9
2019-04-20T23:06:29.000Z
2022-01-24T21:21:04.000Z
# -*- coding: utf-8 -*- # # ramstk.logger.py is part of The RAMSTK Project # # All rights reserved. # Copyright 2019 Doyle Rowland doyle.rowland <AT> reliaqual <DOT> com """RAMSTK Logger Module.""" # Standard Library Imports import logging import sys from typing import Dict # Third Party Imports from pubsub import pub LOGFORMAT = logging.Formatter("%(asctime)s - %(name)s - %(lineno)s : %(message)s") class RAMSTKLogManager: """Class to manage logging of RAMSTK messages.""" loggers: Dict[str, logging.Logger] = {} def __init__(self, log_file: str) -> None: """Initialize an instance of the LogManager. :param log_file: the absolute path to the log file to use with this log manager. """ # Initialize private dictionary attributes. # Initialize private list attributes. # Initialize private scalar attributes. # Initialize public dictionary attributes. # Initialize public list attributes. # Initialize public scalar attributes. self.log_file = log_file # Subscribe to PyPubSub messages. pub.subscribe(self._do_log_fail_message, "fail_connect_program_database") pub.subscribe(self._do_log_fail_message, "fail_delete_environment") pub.subscribe(self._do_log_fail_message, "fail_delete_failure_definition") pub.subscribe(self._do_log_fail_message, "fail_delete_fmea") pub.subscribe(self._do_log_fail_message, "fail_delete_function") pub.subscribe(self._do_log_fail_message, "fail_delete_hazard") pub.subscribe(self._do_log_fail_message, "fail_delete_mission") pub.subscribe(self._do_log_fail_message, "fail_delete_mission_phase") pub.subscribe(self._do_log_fail_message, "fail_delete_revision") pub.subscribe(self._do_log_fail_message, "fail_import_module") pub.subscribe(self._do_log_fail_message, "fail_insert_action") pub.subscribe(self._do_log_fail_message, "fail_insert_cause") pub.subscribe(self._do_log_fail_message, "fail_insert_control") pub.subscribe(self._do_log_fail_message, "fail_insert_environment") pub.subscribe(self._do_log_fail_message, "fail_insert_failure_definition") pub.subscribe(self._do_log_fail_message, "fail_insert_mechanism") pub.subscribe(self._do_log_fail_message, "fail_insert_mission") pub.subscribe(self._do_log_fail_message, "fail_insert_mission_phase") pub.subscribe(self._do_log_fail_message, "fail_insert_mode") pub.subscribe(self._do_log_fail_message, "fail_insert_function") pub.subscribe(self._do_log_fail_message, "fail_insert_hazard") pub.subscribe(self._do_log_fail_message, "fail_insert_hardware") pub.subscribe(self._do_log_fail_message, "fail_insert_validation") pub.subscribe(self._do_log_fail_message, "fail_insert_stakeholder") pub.subscribe(self._do_log_fail_message, "fail_insert_revision") pub.subscribe(self._do_log_fail_message, "fail_insert_requirement") pub.subscribe(self._do_log_fail_message, "fail_insert_opload") pub.subscribe(self._do_log_fail_message, "fail_insert_opstress") pub.subscribe(self._do_log_fail_message, "fail_insert_record") pub.subscribe(self._do_log_fail_message, "fail_insert_test_method") pub.subscribe(self._do_log_fail_message, "fail_update_fmea") pub.subscribe(self._do_log_fail_message, "fail_update_function") pub.subscribe(self._do_log_fail_message, "fail_update_hardware") pub.subscribe(self._do_log_fail_message, "fail_update_record") pub.subscribe(self._do_log_fail_message, "fail_update_requirement") pub.subscribe(self._do_log_fail_message, "fail_update_revision") pub.subscribe(self.do_log_debug, "do_log_debug_msg") pub.subscribe(self.do_log_info, "do_log_info_msg") pub.subscribe(self.do_log_warning, "do_log_warning_msg") pub.subscribe(self.do_log_error, "do_log_error_msg") pub.subscribe(self.do_log_critical, "do_log_critical_msg") # Create a logger for the pypubsub fail_* messages. self.do_create_logger(__name__, "WARN") def _do_log_fail_message(self, error_message: str) -> None: """Log PyPubSub broadcast fail messages. :param error_message: the error message that was part of the broadcast package. :return: None :rtype: None """ self.loggers[__name__].warning(error_message) @staticmethod def _get_console_handler(log_level: str) -> logging.Handler: """Create the log handler for console output. :return: _c_handler :rtype: :class:`logging.Handler` """ _c_handler = logging.StreamHandler(sys.stdout) _c_handler.setLevel(log_level) _c_handler.setFormatter(LOGFORMAT) return _c_handler def _get_file_handler(self, log_level: str) -> logging.Handler: """Create the log handler for file output. :return: _f_handler :rtype: :class:`logging.Handler` """ _f_handler = logging.FileHandler(self.log_file) _f_handler.setLevel(log_level) _f_handler.setFormatter(LOGFORMAT) return _f_handler def do_create_logger( self, logger_name: str, log_level: str, to_tty: bool = False ) -> None: """Create a logger instance. :param logger_name: the name of the logger used in the application. :param log_level: the level of messages to log. :param to_tty: boolean indicating whether this logger will also dump messages to the terminal. :return: None :rtype: None """ _logger = logging.getLogger(logger_name) _logger.setLevel(log_level) _logger.addHandler(self._get_file_handler(log_level)) if to_tty: _logger.addHandler(self._get_console_handler(log_level)) self.loggers[logger_name] = _logger def do_log_debug(self, logger_name: str, message: str) -> None: """Log DEBUG level messages. :param logger_name: the name of the logger used in the application. :param message: the message to log. :return: None :rtype: None """ if self.loggers[logger_name].isEnabledFor(logging.DEBUG): self.loggers[logger_name].debug(message) def do_log_exception(self, logger_name: str, exception: object) -> None: """Log EXCEPTIONS. :param logger_name: the name of the logger used in the application. :param exception: the exception to log. :return: None :rtype: None """ if self.loggers[logger_name].isEnabledFor(logging.WARNING): self.loggers[logger_name].exception(exception) def do_log_info(self, logger_name: str, message: str) -> None: """Log INFO level messages. :param logger_name: the name of the logger used in the application. :param message: the message to log. :return: None :rtype: None """ if self.loggers[logger_name].isEnabledFor(logging.INFO): self.loggers[logger_name].info(message) def do_log_warning(self, logger_name: str, message: str) -> None: """Log WARN level messages. :param logger_name: the name of the logger used in the application. :param message: the message to log. :return: None :rtype: None """ if self.loggers[logger_name].isEnabledFor(logging.WARNING): self.loggers[logger_name].warning(message) def do_log_error(self, logger_name: str, message: str) -> None: """Log ERROR level messages. :param logger_name: the name of the logger used in the application. :param message: the message to log. :return: None :rtype: None """ if self.loggers[logger_name].isEnabledFor(logging.ERROR): self.loggers[logger_name].error(message) def do_log_critical(self, logger_name: str, message: str) -> None: """Log CRITICAL level messages. :param logger_name: the name of the logger used in the application. :param message: the message to log. :return: None :rtype: None """ self.loggers[logger_name].critical(message)
39.64455
82
0.68416
# -*- coding: utf-8 -*- # # ramstk.logger.py is part of The RAMSTK Project # # All rights reserved. # Copyright 2019 Doyle Rowland doyle.rowland <AT> reliaqual <DOT> com """RAMSTK Logger Module.""" # Standard Library Imports import logging import sys from typing import Dict # Third Party Imports from pubsub import pub LOGFORMAT = logging.Formatter("%(asctime)s - %(name)s - %(lineno)s : %(message)s") class RAMSTKLogManager: """Class to manage logging of RAMSTK messages.""" loggers: Dict[str, logging.Logger] = {} def __init__(self, log_file: str) -> None: """Initialize an instance of the LogManager. :param log_file: the absolute path to the log file to use with this log manager. """ # Initialize private dictionary attributes. # Initialize private list attributes. # Initialize private scalar attributes. # Initialize public dictionary attributes. # Initialize public list attributes. # Initialize public scalar attributes. self.log_file = log_file # Subscribe to PyPubSub messages. pub.subscribe(self._do_log_fail_message, "fail_connect_program_database") pub.subscribe(self._do_log_fail_message, "fail_delete_environment") pub.subscribe(self._do_log_fail_message, "fail_delete_failure_definition") pub.subscribe(self._do_log_fail_message, "fail_delete_fmea") pub.subscribe(self._do_log_fail_message, "fail_delete_function") pub.subscribe(self._do_log_fail_message, "fail_delete_hazard") pub.subscribe(self._do_log_fail_message, "fail_delete_mission") pub.subscribe(self._do_log_fail_message, "fail_delete_mission_phase") pub.subscribe(self._do_log_fail_message, "fail_delete_revision") pub.subscribe(self._do_log_fail_message, "fail_import_module") pub.subscribe(self._do_log_fail_message, "fail_insert_action") pub.subscribe(self._do_log_fail_message, "fail_insert_cause") pub.subscribe(self._do_log_fail_message, "fail_insert_control") pub.subscribe(self._do_log_fail_message, "fail_insert_environment") pub.subscribe(self._do_log_fail_message, "fail_insert_failure_definition") pub.subscribe(self._do_log_fail_message, "fail_insert_mechanism") pub.subscribe(self._do_log_fail_message, "fail_insert_mission") pub.subscribe(self._do_log_fail_message, "fail_insert_mission_phase") pub.subscribe(self._do_log_fail_message, "fail_insert_mode") pub.subscribe(self._do_log_fail_message, "fail_insert_function") pub.subscribe(self._do_log_fail_message, "fail_insert_hazard") pub.subscribe(self._do_log_fail_message, "fail_insert_hardware") pub.subscribe(self._do_log_fail_message, "fail_insert_validation") pub.subscribe(self._do_log_fail_message, "fail_insert_stakeholder") pub.subscribe(self._do_log_fail_message, "fail_insert_revision") pub.subscribe(self._do_log_fail_message, "fail_insert_requirement") pub.subscribe(self._do_log_fail_message, "fail_insert_opload") pub.subscribe(self._do_log_fail_message, "fail_insert_opstress") pub.subscribe(self._do_log_fail_message, "fail_insert_record") pub.subscribe(self._do_log_fail_message, "fail_insert_test_method") pub.subscribe(self._do_log_fail_message, "fail_update_fmea") pub.subscribe(self._do_log_fail_message, "fail_update_function") pub.subscribe(self._do_log_fail_message, "fail_update_hardware") pub.subscribe(self._do_log_fail_message, "fail_update_record") pub.subscribe(self._do_log_fail_message, "fail_update_requirement") pub.subscribe(self._do_log_fail_message, "fail_update_revision") pub.subscribe(self.do_log_debug, "do_log_debug_msg") pub.subscribe(self.do_log_info, "do_log_info_msg") pub.subscribe(self.do_log_warning, "do_log_warning_msg") pub.subscribe(self.do_log_error, "do_log_error_msg") pub.subscribe(self.do_log_critical, "do_log_critical_msg") # Create a logger for the pypubsub fail_* messages. self.do_create_logger(__name__, "WARN") def _do_log_fail_message(self, error_message: str) -> None: """Log PyPubSub broadcast fail messages. :param error_message: the error message that was part of the broadcast package. :return: None :rtype: None """ self.loggers[__name__].warning(error_message) @staticmethod def _get_console_handler(log_level: str) -> logging.Handler: """Create the log handler for console output. :return: _c_handler :rtype: :class:`logging.Handler` """ _c_handler = logging.StreamHandler(sys.stdout) _c_handler.setLevel(log_level) _c_handler.setFormatter(LOGFORMAT) return _c_handler def _get_file_handler(self, log_level: str) -> logging.Handler: """Create the log handler for file output. :return: _f_handler :rtype: :class:`logging.Handler` """ _f_handler = logging.FileHandler(self.log_file) _f_handler.setLevel(log_level) _f_handler.setFormatter(LOGFORMAT) return _f_handler def do_create_logger( self, logger_name: str, log_level: str, to_tty: bool = False ) -> None: """Create a logger instance. :param logger_name: the name of the logger used in the application. :param log_level: the level of messages to log. :param to_tty: boolean indicating whether this logger will also dump messages to the terminal. :return: None :rtype: None """ _logger = logging.getLogger(logger_name) _logger.setLevel(log_level) _logger.addHandler(self._get_file_handler(log_level)) if to_tty: _logger.addHandler(self._get_console_handler(log_level)) self.loggers[logger_name] = _logger def do_log_debug(self, logger_name: str, message: str) -> None: """Log DEBUG level messages. :param logger_name: the name of the logger used in the application. :param message: the message to log. :return: None :rtype: None """ if self.loggers[logger_name].isEnabledFor(logging.DEBUG): self.loggers[logger_name].debug(message) def do_log_exception(self, logger_name: str, exception: object) -> None: """Log EXCEPTIONS. :param logger_name: the name of the logger used in the application. :param exception: the exception to log. :return: None :rtype: None """ if self.loggers[logger_name].isEnabledFor(logging.WARNING): self.loggers[logger_name].exception(exception) def do_log_info(self, logger_name: str, message: str) -> None: """Log INFO level messages. :param logger_name: the name of the logger used in the application. :param message: the message to log. :return: None :rtype: None """ if self.loggers[logger_name].isEnabledFor(logging.INFO): self.loggers[logger_name].info(message) def do_log_warning(self, logger_name: str, message: str) -> None: """Log WARN level messages. :param logger_name: the name of the logger used in the application. :param message: the message to log. :return: None :rtype: None """ if self.loggers[logger_name].isEnabledFor(logging.WARNING): self.loggers[logger_name].warning(message) def do_log_error(self, logger_name: str, message: str) -> None: """Log ERROR level messages. :param logger_name: the name of the logger used in the application. :param message: the message to log. :return: None :rtype: None """ if self.loggers[logger_name].isEnabledFor(logging.ERROR): self.loggers[logger_name].error(message) def do_log_critical(self, logger_name: str, message: str) -> None: """Log CRITICAL level messages. :param logger_name: the name of the logger used in the application. :param message: the message to log. :return: None :rtype: None """ self.loggers[logger_name].critical(message)
0
0
0
f4bbd3c26bf1e8d647337c4dd66784c1c9d86a9f
2,458
py
Python
examples/demo/status_overlay.py
martinRenou/chaco
1888da3ecee89f9b2d11900cda9333b32fc5e89a
[ "BSD-3-Clause" ]
3
2017-09-17T17:32:06.000Z
2022-03-15T13:04:43.000Z
examples/demo/status_overlay.py
martinRenou/chaco
1888da3ecee89f9b2d11900cda9333b32fc5e89a
[ "BSD-3-Clause" ]
null
null
null
examples/demo/status_overlay.py
martinRenou/chaco
1888da3ecee89f9b2d11900cda9333b32fc5e89a
[ "BSD-3-Clause" ]
5
2015-05-17T16:08:11.000Z
2021-02-23T09:23:42.000Z
import numpy from chaco.api import Plot, ArrayPlotData from chaco.layers.api import ErrorLayer, WarningLayer, StatusLayer from enable.component_editor import ComponentEditor from traits.api import HasTraits, Instance, Button from traitsui.api import UItem, View, HGroup class MyPlot(HasTraits): """ Displays a plot with a few buttons to control which overlay to display """ plot = Instance(Plot) status_overlay = Instance(StatusLayer) error_button = Button('Error') warn_button = Button('Warning') no_problem_button = Button('No problem') traits_view = View( HGroup(UItem('error_button'), UItem('warn_button'), UItem('no_problem_button')), UItem('plot', editor=ComponentEditor()), width=700, height=600, resizable=True, ) def _error_button_fired(self, event): """ removes the old overlay and replaces it with an error overlay """ self.clear_status() self.status_overlay = ErrorLayer(component=self.plot, align='ul', scale_factor=0.25) self.plot.overlays.append(self.status_overlay) self.plot.request_redraw() def _warn_button_fired(self, event): """ removes the old overlay and replaces it with an warning overlay """ self.clear_status() self.status_overlay = WarningLayer(component=self.plot, align='ur', scale_factor=0.25) self.plot.overlays.append(self.status_overlay) self.plot.request_redraw() def _no_problem_button_fired(self, event): """ removes the old overlay """ self.clear_status() self.plot.request_redraw() index = numpy.array([1,2,3,4,5]) data_series = index**2 my_plot = MyPlot(index, data_series) my_plot.configure_traits()
32.342105
74
0.614321
import numpy from chaco.api import Plot, ArrayPlotData from chaco.layers.api import ErrorLayer, WarningLayer, StatusLayer from enable.component_editor import ComponentEditor from traits.api import HasTraits, Instance, Button from traitsui.api import UItem, View, HGroup class MyPlot(HasTraits): """ Displays a plot with a few buttons to control which overlay to display """ plot = Instance(Plot) status_overlay = Instance(StatusLayer) error_button = Button('Error') warn_button = Button('Warning') no_problem_button = Button('No problem') traits_view = View( HGroup(UItem('error_button'), UItem('warn_button'), UItem('no_problem_button')), UItem('plot', editor=ComponentEditor()), width=700, height=600, resizable=True, ) def __init__(self, index, data_series, **kw): super(MyPlot, self).__init__(**kw) plot_data = ArrayPlotData(index=index) plot_data.set_data('data_series', data_series) self.plot = Plot(plot_data) self.plot.plot(('index', 'data_series')) def _error_button_fired(self, event): """ removes the old overlay and replaces it with an error overlay """ self.clear_status() self.status_overlay = ErrorLayer(component=self.plot, align='ul', scale_factor=0.25) self.plot.overlays.append(self.status_overlay) self.plot.request_redraw() def _warn_button_fired(self, event): """ removes the old overlay and replaces it with an warning overlay """ self.clear_status() self.status_overlay = WarningLayer(component=self.plot, align='ur', scale_factor=0.25) self.plot.overlays.append(self.status_overlay) self.plot.request_redraw() def _no_problem_button_fired(self, event): """ removes the old overlay """ self.clear_status() self.plot.request_redraw() def clear_status(self): if self.status_overlay in self.plot.overlays: # fade_out will remove the overlay when its done self.status_overlay.fade_out() index = numpy.array([1,2,3,4,5]) data_series = index**2 my_plot = MyPlot(index, data_series) my_plot.configure_traits()
415
0
54
08cdc43106ee16eac03626a91a328ff78df10a22
681
py
Python
multi_threadpool_executor.py
Dev-Bobbie/multi_spider
8fd19ab70de04b6cac021d354850b07ffcf360f2
[ "Apache-2.0" ]
null
null
null
multi_threadpool_executor.py
Dev-Bobbie/multi_spider
8fd19ab70de04b6cac021d354850b07ffcf360f2
[ "Apache-2.0" ]
null
null
null
multi_threadpool_executor.py
Dev-Bobbie/multi_spider
8fd19ab70de04b6cac021d354850b07ffcf360f2
[ "Apache-2.0" ]
null
null
null
from concurrent.futures import ThreadPoolExecutor import time if __name__ == '__main__': main()
24.321429
49
0.625551
from concurrent.futures import ThreadPoolExecutor import time def sayhello(a): print("hello: "+a) time.sleep(2) def main(): seed=["a","b","c"] start1=time.time() for each in seed: sayhello(each) end1=time.time() print("time1: "+str(end1-start1)) start2=time.time() with ThreadPoolExecutor(3) as executor: for each in seed: executor.submit(sayhello,each) end2=time.time() print("time2: "+str(end2-start2)) start3=time.time() with ThreadPoolExecutor(3) as executor1: executor1.map(sayhello,seed) end3=time.time() print("time3: "+str(end3-start3)) if __name__ == '__main__': main()
535
0
46
61b5793ee25a599b5e5738633cc2cd220b7bf9e9
7,541
py
Python
boss2.py
Jamhacks2018/TheJamExpansion
1acec353e666fef6608e06b57e82683053e7f060
[ "MIT" ]
null
null
null
boss2.py
Jamhacks2018/TheJamExpansion
1acec353e666fef6608e06b57e82683053e7f060
[ "MIT" ]
null
null
null
boss2.py
Jamhacks2018/TheJamExpansion
1acec353e666fef6608e06b57e82683053e7f060
[ "MIT" ]
3
2018-05-05T19:59:56.000Z
2020-11-15T21:06:27.000Z
from pygame import * from enemies import * import random init() fontGeneral = font.Font('resources/fonts/Calibri.ttf', 30) fontHealth = font.Font('resources/fonts/Calibri Bold.ttf', 15) #draws itself and its health
40.983696
287
0.502851
from pygame import * from enemies import * import random init() fontGeneral = font.Font('resources/fonts/Calibri.ttf', 30) fontHealth = font.Font('resources/fonts/Calibri Bold.ttf', 15) class Cap(): def __init__(self): #initialize the image and pos of cap: self.img = image.load('resources/jam/boss/cap.png') self.x = 0 self.y = -150 self.rect = Rect(self.x, self.y, 722, 149) def draw(self, screen): screen.blit(self.image[self.phase], self.Rect()) self.rect = Rect(self.x, self.y, self.image[1].get_width(), self.image[1].get_height()) def check(self): for b in bullets: if b.rect.colliderrect(self.rect): self.health -= b.dmg #check if it is supposed to die, if dead start boss phase 2: class Boss(): def __init__(self): #initialize the image and pos: self.image = image.load('resources/jam/boss/uncapped.png').convert_alpha() self.w = self.image.get_width() // 5 self.h = self.image.get_width() // 5 self.x = 300 self.y = 25 self.rect = Rect(self.x, self.y, self.w, self.h) self.image = transform.scale(self.image, (self.w, self.h)) self.gun3 = (self.rect.bottomleft[0]+10, self.rect.bottomleft[1]-10) self.gun2 = (self.rect.bottomright[0]+10, self.rect.bottomright[1]-10) self.gun1 = (self.rect.bottomright[0] + self.w // 2, self.rect.bottomright[1]-10) self.guns = [self.gun1, self.gun2] self.firing_speed = [25, 20, 15] self.firing_time = 0 #grace time is reset if grace time is reached self.grace_timers = [120, 90, 65] self.grace_time = 180 #initialize boss properties self.phase = 0 self.max_health = 12000 self.health = self.max_health self.vulnerable = True self.attacks = [False, False] self.directions = 0 #counter of how much boss moved self.frames_spent_moving = 0 #draws itself and its health def draw(self, screen): screen.blit(self.image, self.rect) draw.rect(screen, (255, 0, 255), (15, 700 - 85, int(985 * self.health / self.max_health), 75)) screen.blit(fontGeneral.render("Boss health: %i/%i" %(self.health, self.max_health), 1, (0, 255, 0)), (467 - fontHealth.size("Boss health: %i/%i" %(self.health, self.max_health))[0] // 2, 700 - 55 - fontHealth.size("Boss health: %i/%i" %(self.health, self.max_health))[1] // 2)) def update(self, pl, eb): if self.grace_time == 0: #handles attack timings with some randomness self.attacks[random.randint(0,1)] = True self.directions = random.randint(0,3) #resets movement during attacks self.frames_spent_moving = 0 #handles in between attack grace timers self.grace_time = self.grace_timers[self.phase] else: #handles movement between attacks if self.frames_spent_moving <= 30: self.move() self.frames_spent_moving += 1 self.grace_time -= 1 self.rect = Rect(self.x, self.y, self.w, self.h) self.gun3 = (self.rect.bottomleft[0]+10, self.rect.bottomleft[1]-10) self.gun2 = (self.rect.bottomright[0]+10, self.rect.bottomright[1]-10) self.gun1 = (self.rect.bottomright[0] - self.w // 2, self.rect.bottomright[1]-10) self.guns = [self.gun1, self.gun2] #tries to fire each attack self.sweeper(eb) self.ring(eb) def check(self, bullets, pickups, pl): for b in bullets: if b.rect.colliderect(self.rect): self.health -= b.dmg + pl.dmg_upG #if health permits, spawns a randomly placed heart if 0 <= self.health%500 <= 10 and self.health != self.max_health: pickups.append(Heart(random.randint(300, 700), random.randint(200, 500), random.randint(250, 500))) if 0 <= self.health%250 <= 10 and self.health != self.max_health: self.weakpoint = random.randint(0, 4) self.health -= 11 # checks if it is supposed to die if self.health <= 0: self.health = self.max_health return False #check for phase change elif self.health < 8000: self.phase = 2 elif self.health < 4000: self.phase = 3 return True def move(self): #very similar to pl.directions, moves if it can if self.directions == 0: if self.y < 100: self.y += 3 print("move 1") elif self.directions == 1: if 0 < self.y: self.y -= 3 print("move 2") elif self.directions == 2: if 0 < self.x: self.x -= 10 print("move 3") elif self.directions == 3: if self.x + 800 < 1000: self.x += 10 print("move 4") def sweeper(self, enemyBullets): #shoots stream of bullets from left to right from random guns if self.attacks[1]: for angle in range(10, 170, 5): #checks if timer conditions are just right if self.firing_time + 10 == angle: self.target_angle = (self.gun2[0] + 50 * cos(radians(angle)), self.gun2[1] + 50 * sin(radians(angle))) enemyBullets.append(JamBullet(self.gun2[0], self.gun2[1], self.target_angle[0], self.target_angle[1], 15 * (self.phase + 1))) self.target_angle = (self.gun3[0] + 50 * cos(radians(180 - angle)), self.gun3[1] + 50 * sin(radians(180 -angle))) enemyBullets.append(JamBullet(self.gun3[0], self.gun3[1], self.target_angle[0], self.target_angle[1], 15 * (self.phase + 1))) #ends attack if self.firing_time + 10 >= 170: self.attacks[1] = False self.firing_time = 0 break else: self.firing_time += 2 def ring(self, enemyBullets): if self.attacks[0]: for angle in range(0, 360, 10): if self.firing_time == angle: self.target_angle = (self.rect.centerx + 50 * cos(radians(angle)), self.rect.centery + 50 * sin(radians(angle))) enemyBullets.append(JamBullet(self.rect.centerx, self.rect.centery, self.target_angle[0], self.target_angle[1], 15 * self.phase)) if self.firing_time >= 360: self.attacks[0] = False self.firing_time = 0 break else: self.firing_time += 2.5
6,824
-17
424
9084b7ccd8e3dba852fd6469469662507b5a8c2b
24,781
py
Python
src/simulator/network_wrong_mi.py
ChenGeng-ZJU/PCC-RL
6627a186643175ea68269d78e206e6bc45ac634f
[ "Apache-2.0" ]
null
null
null
src/simulator/network_wrong_mi.py
ChenGeng-ZJU/PCC-RL
6627a186643175ea68269d78e206e6bc45ac634f
[ "Apache-2.0" ]
null
null
null
src/simulator/network_wrong_mi.py
ChenGeng-ZJU/PCC-RL
6627a186643175ea68269d78e206e6bc45ac634f
[ "Apache-2.0" ]
null
null
null
# Copyright 2019 Nathan Jay and Noga Rotman # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import csv import heapq import os import random import sys import time import math import warnings warnings.simplefilter(action='ignore', category=UserWarning) import gym from gym import spaces from gym.envs.registration import register from gym.utils import seeding import numpy as np from common import sender_obs from common.utils import pcc_aurora_reward, read_json_file from simulator.trace import Trace import pandas as pd MAX_CWND = 5000 MIN_CWND = 4 MAX_RATE = 20000 MIN_RATE = 5 REWARD_SCALE = 0.001 EVENT_TYPE_SEND = 'S' EVENT_TYPE_ACK = 'A' BYTES_PER_PACKET = 1500 LATENCY_PENALTY = 1.0 LOSS_PENALTY = 1.0 USE_LATENCY_NOISE = True MAX_LATENCY_NOISE = 1.1 # DEBUG = True DEBUG = False MI_RTT_PROPORTION = 1.0 # PACKET_LOG_FLAG = False PACKET_LOG_FLAG = True register(id='PccNs-v0', entry_point='simulator.network:SimulatedNetworkEnv')
38.183359
130
0.561438
# Copyright 2019 Nathan Jay and Noga Rotman # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import csv import heapq import os import random import sys import time import math import warnings warnings.simplefilter(action='ignore', category=UserWarning) import gym from gym import spaces from gym.envs.registration import register from gym.utils import seeding import numpy as np from common import sender_obs from common.utils import pcc_aurora_reward, read_json_file from simulator.trace import Trace import pandas as pd MAX_CWND = 5000 MIN_CWND = 4 MAX_RATE = 20000 MIN_RATE = 5 REWARD_SCALE = 0.001 EVENT_TYPE_SEND = 'S' EVENT_TYPE_ACK = 'A' BYTES_PER_PACKET = 1500 LATENCY_PENALTY = 1.0 LOSS_PENALTY = 1.0 USE_LATENCY_NOISE = True MAX_LATENCY_NOISE = 1.1 # DEBUG = True DEBUG = False MI_RTT_PROPORTION = 1.0 # PACKET_LOG_FLAG = False PACKET_LOG_FLAG = True def debug_print(msg): if DEBUG: print(msg, file=sys.stderr, flush=True) class EmuReplay: def __init__(self, ): df = pd.read_csv('aurora_emulation_log.csv') self.ts = df['timestamp'].tolist() self.send_rate = df['send_rate'].tolist() self.idx = 0 def get_ts(self): if self.idx > len(self.ts): self.idx = len(self.ts) -1 ts = self.ts[self.idx] self.idx += 1 return ts def get_rate(self): return self.send_rate[self.idx] / 8 / BYTES_PER_PACKET def reset(self): self.idx = 0 class Link(): def __init__(self, trace: Trace): self.trace = trace self.queue_delay = 0.0 self.queue_delay_update_time = 0.0 self.queue_size = self.trace.get_queue_size() self.pkt_in_queue = 0 def get_cur_queue_delay(self, event_time): self.pkt_in_queue = max(0, self.pkt_in_queue - (event_time - self.queue_delay_update_time) * self.get_bandwidth(event_time)) self.queue_delay_update_time = event_time cur_queue_delay = math.ceil( self.pkt_in_queue) / self.get_bandwidth(event_time) return cur_queue_delay def get_cur_latency(self, event_time): q_delay = self.get_cur_queue_delay(event_time) # print('queue delay: ', q_delay) return self.trace.get_delay(event_time) / 1000.0 + q_delay def packet_enters_link(self, event_time): if (random.random() < self.trace.get_loss_rate()): return False self.queue_delay = self.get_cur_queue_delay(event_time) extra_delay = 1.0 / self.get_bandwidth(event_time) if 1 + math.ceil(self.pkt_in_queue) > self.queue_size: # print("{}\tDrop!".format(event_time)) return False self.queue_delay += extra_delay self.pkt_in_queue += 1 return True def print_debug(self): print("Link:") # TODO: Do not use timestamp 0. print("Bandwidth: %.3fMbps" % (self.trace.get_bandwidth(0))) # TODO: Do not use timestamp 0. print("Delay: %.3fms" % (self.trace.get_delay(0))) print("Queue Delay: %.3fms" % (self.queue_delay * 1000)) print("One Packet Queue Delay: %.3fms" % ( 1000.0 * 1 / (self.trace.get_bandwidth(0) * 1e6 / 8 / BYTES_PER_PACKET))) print("Queue size: %dpackets" % self.queue_size) print("Loss: %.4f" % self.trace.get_loss_rate()) def reset(self): self.queue_delay = 0.0 self.queue_delay_update_time = 0.0 self.pkt_in_queue = 0 def get_bandwidth(self, ts): return self.trace.get_bandwidth(ts) * 1e6 / 8 / BYTES_PER_PACKET class Network(): def __init__(self, senders, links, env): self.event_count = 0 self.q = [] self.cur_time = 0.0 self.senders = senders self.links = links self.queue_initial_packets() self.env = env self.pkt_log = [] def queue_initial_packets(self): for sender in self.senders: sender.register_network(self) sender.reset_obs() heapq.heappush(self.q, (0, sender, EVENT_TYPE_SEND, 0, 0.0, False, self.event_count, sender.rto, 0)) self.event_count += 1 def reset(self): self.pkt_log = [] self.cur_time = 0.0 self.q = [] [link.reset() for link in self.links] [sender.reset() for sender in self.senders] self.queue_initial_packets() def get_cur_time(self): return self.cur_time def run_for_dur(self, dur, action=None): # if self.cur_time > 1.75: # pass # else: # self.senders[0].rate = self.env.replay.get_rate() # dur = self.env.replay.get_ts() - self.cur_time end_time = min(self.cur_time + dur, self.env.current_trace.timestamps[-1]) debug_print('MI from {} to {}, dur {}'.format( self.cur_time, end_time, dur)) for sender in self.senders: sender.reset_obs() while True: event_time, sender, event_type, next_hop, cur_latency, dropped, \ event_id, rto, event_queue_delay = self.q[0] if event_time >= end_time: self.cur_time = end_time break event_time, sender, event_type, next_hop, cur_latency, dropped, \ event_id, rto, event_queue_delay = heapq.heappop(self.q) self.cur_time = event_time new_event_time = event_time new_event_type = event_type new_next_hop = next_hop new_latency = cur_latency new_dropped = dropped new_event_queue_delay = event_queue_delay push_new_event = False debug_print("Got %d event %s, to link %d, latency %f at time %f, " "next_hop %d, dropped %s, event_q length %f, " "sender rate %f, duration: %f, queue_size: %f, " "rto: %f, cwnd: %f, ssthresh: %f, sender rto %f, " "pkt in flight %d, wait time %d" % ( event_id, event_type, next_hop, cur_latency, event_time, next_hop, dropped, len(self.q), sender.rate, dur, self.links[0].queue_size, rto, sender.cwnd, sender.ssthresh, sender.rto, int(sender.bytes_in_flight/BYTES_PER_PACKET), sender.pkt_loss_wait_time)) if event_type == EVENT_TYPE_ACK: if next_hop == len(sender.path): # if cur_latency > 1.0: # sender.timeout(cur_latency) # sender.on_packet_lost(cur_latency) if rto >= 0 and cur_latency > rto and sender.pkt_loss_wait_time <= 0: sender.timeout() dropped = True new_dropped = True elif dropped: sender.on_packet_lost(cur_latency) if PACKET_LOG_FLAG: self.pkt_log.append([self.cur_time, event_id, 'lost', BYTES_PER_PACKET]) else: sender.on_packet_acked(cur_latency) debug_print('Ack packet at {}'.format(self.cur_time)) # log packet acked if PACKET_LOG_FLAG: self.pkt_log.append([self.cur_time, event_id, 'acked', BYTES_PER_PACKET, cur_latency, event_queue_delay]) else: new_next_hop = next_hop + 1 new_event_queue_delay += sender.path[next_hop].get_cur_queue_delay( self.cur_time) link_latency = sender.path[next_hop].get_cur_latency( self.cur_time) # link_latency *= self.env.current_trace.get_delay_noise_replay(self.cur_time) # if USE_LATENCY_NOISE: # link_latency *= random.uniform(1.0, MAX_LATENCY_NOISE) new_latency += link_latency new_event_time += link_latency push_new_event = True elif event_type == EVENT_TYPE_SEND: if next_hop == 0: if sender.can_send_packet(): sender.on_packet_sent() # print('Send packet at {}'.format(self.cur_time)) if PACKET_LOG_FLAG: self.pkt_log.append([self.cur_time, event_id, 'sent', BYTES_PER_PACKET]) push_new_event = True heapq.heappush(self.q, (self.cur_time + (1.0 / sender.rate), sender, EVENT_TYPE_SEND, 0, 0.0, False, self.event_count, sender.rto, 0)) self.event_count += 1 else: push_new_event = True if next_hop == sender.dest: new_event_type = EVENT_TYPE_ACK new_next_hop = next_hop + 1 new_event_queue_delay += sender.path[next_hop].get_cur_queue_delay( self.cur_time) link_latency = sender.path[next_hop].get_cur_latency( self.cur_time) # if USE_LATENCY_NOISE: # link_latency *= random.uniform(1.0, MAX_LATENCY_NOISE) # link_latency += self.env.current_trace.get_delay_noise(self.cur_time) / 1000 # link_latency *= self.env.current_trace.get_delay_noise_replay(self.cur_time) new_latency += link_latency new_event_time += link_latency new_dropped = not sender.path[next_hop].packet_enters_link( self.cur_time) if not new_dropped: sender.queue_delay_samples.append(new_event_queue_delay) if push_new_event: heapq.heappush(self.q, (new_event_time, sender, new_event_type, new_next_hop, new_latency, new_dropped, event_id, rto, new_event_queue_delay)) for sender in self.senders: sender.record_run() sender_mi = self.senders[0].get_run_data() throughput = sender_mi.get("recv rate") # bits/sec latency = sender_mi.get("avg latency") # second loss = sender_mi.get("loss ratio") debug_print("thpt %f, delay %f, loss %f, bytes sent %f, bytes acked %f" % ( throughput/1e6, latency, loss, sender_mi.bytes_sent, sender_mi.bytes_acked)) reward = pcc_aurora_reward( throughput / 8 / BYTES_PER_PACKET, latency, loss, np.mean(self.env.current_trace.bandwidths) * 1e6 / 8 / BYTES_PER_PACKET) if latency > 0.0: self.env.run_dur = MI_RTT_PROPORTION * sender_mi.get("avg latency") + (1 / self.links[0].get_bandwidth(self.cur_time)) # self.env.run_dur = max(MI_RTT_PROPORTION * sender_mi.get("avg latency"), 5 * (1 / self.senders[0].rate)) # print(self.env.run_dur) return reward * REWARD_SCALE class Sender(): def __init__(self, rate, path, dest, features, cwnd=25, history_len=10, delta_scale=1): self.id = Sender._get_next_id() self.delta_scale = delta_scale self.starting_rate = rate self.rate = rate self.sent = 0 self.acked = 0 self.lost = 0 self.bytes_in_flight = 0 self.min_latency = None self.rtt_samples = [] self.queue_delay_samples = [] self.prev_rtt_samples = self.rtt_samples self.sample_time = [] self.net = None self.path = path self.dest = dest self.history_len = history_len self.features = features self.history = sender_obs.SenderHistory(self.history_len, self.features, self.id) self.cwnd = cwnd self.use_cwnd = False self.rto = -1 self.ssthresh = 0 self.pkt_loss_wait_time = -1 self.estRTT = 1000000 / 1e6 # SynInterval in emulation self.RTTVar = self.estRTT / 2 # RTT variance # self.got_data = False _next_id = 1 def _get_next_id(): result = Sender._next_id Sender._next_id += 1 return result def apply_rate_delta(self, delta): # if self.got_data: delta *= self.delta_scale #print("Applying delta %f" % delta) if delta >= 0.0: self.set_rate(self.rate * (1.0 + delta)) else: self.set_rate(self.rate / (1.0 - delta)) def apply_cwnd_delta(self, delta): delta *= self.delta_scale #print("Applying delta %f" % delta) if delta >= 0.0: self.set_cwnd(self.cwnd * (1.0 + delta)) else: self.set_cwnd(self.cwnd / (1.0 - delta)) def can_send_packet(self): if self.use_cwnd: return int(self.bytes_in_flight) / BYTES_PER_PACKET < self.cwnd else: return True def register_network(self, net): self.net = net def on_packet_sent(self): self.sent += 1 self.bytes_in_flight += BYTES_PER_PACKET def on_packet_acked(self, rtt): self.estRTT = (7.0 * self.estRTT + rtt) / 8.0 # RTT of emulation way self.RTTVar = (self.RTTVar * 7.0 + abs(rtt - self.estRTT) * 1.0) / 8.0 self.acked += 1 self.rtt_samples.append(rtt) # self.rtt_samples.append(self.estRTT) if (self.min_latency is None) or (rtt < self.min_latency): self.min_latency = rtt self.bytes_in_flight -= BYTES_PER_PACKET def on_packet_lost(self, rtt): self.lost += 1 self.bytes_in_flight -= BYTES_PER_PACKET def set_rate(self, new_rate): self.rate = new_rate # print("Attempt to set new rate to %f (min %f, max %f)" % (new_rate, MIN_RATE, MAX_RATE)) if self.rate > MAX_RATE: self.rate = MAX_RATE if self.rate < MIN_RATE: self.rate = MIN_RATE def set_cwnd(self, new_cwnd): self.cwnd = int(new_cwnd) #print("Attempt to set new rate to %f (min %f, max %f)" % (new_rate, MIN_RATE, MAX_RATE)) # if self.cwnd > MAX_CWND: # self.cwnd = MAX_CWND # if self.cwnd < MIN_CWND: # self.cwnd = MIN_CWND def record_run(self): smi = self.get_run_data() # if not self.got_data and smi.rtt_samples: # self.got_data = True # self.history.step(smi) # else: self.history.step(smi) def get_obs(self): return self.history.as_array() def get_run_data(self): obs_end_time = self.net.get_cur_time() #obs_dur = obs_end_time - self.obs_start_time #print("Got %d acks in %f seconds" % (self.acked, obs_dur)) #print("Sent %d packets in %f seconds" % (self.sent, obs_dur)) #print("self.rate = %f" % self.rate) # print(self.acked, self.sent) rtt_samples = self.rtt_samples if self.rtt_samples else self.prev_rtt_samples # if not self.rtt_samples: # print(self.obs_start_time, obs_end_time, self.rate) # rtt_samples is empty when there is no packet acked in MI # Solution: inherit from previous rtt_samples. return sender_obs.SenderMonitorInterval( self.id, bytes_sent=self.sent * BYTES_PER_PACKET, bytes_acked=self.acked * BYTES_PER_PACKET, bytes_lost=self.lost * BYTES_PER_PACKET, send_start=self.obs_start_time, send_end=obs_end_time, recv_start=self.obs_start_time, recv_end=obs_end_time, rtt_samples=self.rtt_samples, queue_delay_samples=self.queue_delay_samples, packet_size=BYTES_PER_PACKET ) def reset_obs(self): self.sent = 0 self.acked = 0 self.lost = 0 if self.rtt_samples: self.prev_rtt_samples = self.rtt_samples self.rtt_samples = [] self.queue_delay_samples = [] self.obs_start_time = self.net.get_cur_time() def print_debug(self): print("Sender:") print("Obs: %s" % str(self.get_obs())) print("Rate: %f" % self.rate) print("Sent: %d" % self.sent) print("Acked: %d" % self.acked) print("Lost: %d" % self.lost) print("Min Latency: %s" % str(self.min_latency)) def reset(self): #print("Resetting sender!") self.rate = self.starting_rate self.bytes_in_flight = 0 self.min_latency = None self.reset_obs() self.history = sender_obs.SenderHistory(self.history_len, self.features, self.id) self.estRTT = 1000000 / 1e6 # SynInterval in emulation self.RTTVar = self.estRTT / 2 # RTT variance # self.got_data = False def timeout(self): # placeholder pass class SimulatedNetworkEnv(gym.Env): def __init__(self, traces, history_len=10, features="sent latency inflation,latency ratio,send ratio", congestion_control_type="aurora", train_flag=False, delta_scale=1.0): """Network environment used in simulation. congestion_control_type: aurora is pcc-rl. cubic is TCPCubic. """ assert congestion_control_type in {"aurora", "cubic"}, \ "Unrecognized congestion_control_type {}.".format( congestion_control_type) # self.replay = EmuReplay() self.delta_scale = delta_scale self.traces = traces self.current_trace = np.random.choice(self.traces) self.train_flag = train_flag self.congestion_control_type = congestion_control_type if self.congestion_control_type == 'aurora': self.use_cwnd = False elif self.congestion_control_type == 'cubic': self.use_cwnd = True self.history_len = history_len # print("History length: %d" % history_len) self.features = features.split(",") # print("Features: %s" % str(self.features)) self.links = None self.senders = None self.create_new_links_and_senders() self.net = Network(self.senders, self.links, self) self.run_dur = None self.run_period = 0.1 self.steps_taken = 0 self.debug_thpt_changes = False self.last_thpt = None self.last_rate = None if self.use_cwnd: self.action_space = spaces.Box( np.array([-1e12, -1e12]), np.array([1e12, 1e12]), dtype=np.float32) else: self.action_space = spaces.Box( np.array([-1e12]), np.array([1e12]), dtype=np.float32) self.observation_space = None # use_only_scale_free = True single_obs_min_vec = sender_obs.get_min_obs_vector(self.features) single_obs_max_vec = sender_obs.get_max_obs_vector(self.features) self.observation_space = spaces.Box(np.tile(single_obs_min_vec, self.history_len), np.tile(single_obs_max_vec, self.history_len), dtype=np.float32) self.reward_sum = 0.0 self.reward_ewma = 0.0 self.episodes_run = -1 def seed(self, seed=None): self.rand, seed = seeding.np_random(seed) return [seed] def _get_all_sender_obs(self): sender_obs = self.senders[0].get_obs() sender_obs = np.array(sender_obs).reshape(-1,) return sender_obs def step(self, actions): #print("Actions: %s" % str(actions)) # print(actions) for i in range(0, 1): # len(actions)): #print("Updating rate for sender %d" % i) action = actions self.senders[i].apply_rate_delta(action[0]) if self.use_cwnd: self.senders[i].apply_cwnd_delta(action[1]) # print("Running for %fs" % self.run_dur) reward = self.net.run_for_dur(self.run_dur, action=actions[0]) self.steps_taken += 1 sender_obs = self._get_all_sender_obs() should_stop = self.current_trace.is_finished(self.net.get_cur_time()) self.reward_sum += reward # print('env step: {}s'.format(time.time() - t_start)) return sender_obs, reward, should_stop, {} def print_debug(self): print("---Link Debug---") for link in self.links: link.print_debug() print("---Sender Debug---") for sender in self.senders: sender.print_debug() def create_new_links_and_senders(self): # self.replay.reset() self.links = [Link(self.current_trace), Link(self.current_trace)] if self.congestion_control_type == "aurora": if not self.train_flag: self.senders = [Sender( #self.replay.get_rate(), # 2500000 / 8 /BYTES_PER_PACKET / 0.048, # 12000000 / 8 /BYTES_PER_PACKET / 0.048, # 10 / (self.current_trace.get_delay(0) *2/1000), 100, [self.links[0], self.links[1]], 0, self.features, history_len=self.history_len, delta_scale=self.delta_scale)] else: # self.senders = [Sender(random.uniform(0.3, 1.5) * bw, # [self.links[0], self.links[1]], 0, # self.features, # history_len=self.history_len)] # self.senders = [Sender(random.uniform(10/bw, 1.5) * bw, # [self.links[0], self.links[1]], 0, # self.features, # history_len=self.history_len, # delta_scale=self.delta_scale)] self.senders = [Sender(100, [self.links[0], self.links[1]], 0, self.features, history_len=self.history_len, delta_scale=self.delta_scale)] elif self.congestion_control_type == "cubic": raise NotImplementedError else: raise RuntimeError("Unrecognized congestion_control_type {}".format( self.congestion_control_type)) # self.run_dur = 3 * lat # self.run_dur = 1 * lat if not self.senders[0].rtt_samples: # self.run_dur = 0.473 # self.run_dur = 5 / self.senders[0].rate self.run_dur = 0.01 # self.run_dur = self.current_trace.get_delay(0) * 2 / 1000 # self.run_dur = self.replay.get_ts() - 0 def reset(self): self.steps_taken = 0 self.net.reset() self.current_trace = np.random.choice(self.traces) self.current_trace.reset() self.create_new_links_and_senders() self.net = Network(self.senders, self.links, self) self.episodes_run += 1 # self.replay.reset() self.net.run_for_dur(self.run_dur) self.reward_ewma *= 0.99 self.reward_ewma += 0.01 * self.reward_sum # print("Reward: %0.2f, Ewma Reward: %0.2f" % (self.reward_sum, self.reward_ewma)) self.reward_sum = 0.0 return self._get_all_sender_obs() register(id='PccNs-v0', entry_point='simulator.network:SimulatedNetworkEnv')
19,700
3,056
569
e2b17755e0aaa5b3a5cbb71d2ff79a60e5f99eea
3,150
py
Python
cheddar_oauth_example/settings.py
brianbrunner/cheddar-oauth-demo
7768023a355d9cdc2e861aded2c05ebe3246c930
[ "MIT" ]
1
2015-05-26T18:21:32.000Z
2015-05-26T18:21:32.000Z
cheddar_oauth_example/settings.py
brianbrunner/cheddar-oauth-demo
7768023a355d9cdc2e861aded2c05ebe3246c930
[ "MIT" ]
null
null
null
cheddar_oauth_example/settings.py
brianbrunner/cheddar-oauth-demo
7768023a355d9cdc2e861aded2c05ebe3246c930
[ "MIT" ]
null
null
null
""" Django settings for cheddar_oauth_example project. For more information on this file, see https://docs.djangoproject.com/en/1.7/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/1.7/ref/settings/ """ # Build paths inside the project like this: os.path.join(BASE_DIR, ...) import os BASE_DIR = os.path.dirname(os.path.dirname(__file__)) # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = '43cy=fmsak_xqkme&yi9@c^+-*0pvr%s+-of!yzx6rdiw*!bxt' # SECURITY WARNING: don't run with debug turned on in production! DEBUG = True TEMPLATE_DEBUG = True ALLOWED_HOSTS = [] # Application definition INSTALLED_APPS = ( 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', 'social.apps.django_app.default', 'django.contrib.humanize', 'app', ) MIDDLEWARE_CLASSES = ( 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.auth.middleware.SessionAuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ) ROOT_URLCONF = 'cheddar_oauth_example.urls' WSGI_APPLICATION = 'cheddar_oauth_example.wsgi.application' AUTHENTICATION_BACKENDS = ( 'oauth.cheddar.CheddarOAuth2', ) TEMPLATE_CONTEXT_PROCESSORS = ( 'social.apps.django_app.context_processors.backends', 'social.apps.django_app.context_processors.login_redirect', ) # Database # https://docs.djangoproject.com/en/1.7/ref/settings/#databases DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'), } } # Internationalization # https://docs.djangoproject.com/en/1.7/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE = 'UTC' USE_I18N = True USE_L10N = True USE_TZ = True # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/1.7/howto/static-files/ STATIC_URL = '/static/' # SOCIAL_AUTH_CHEDDAR_SCOPE = [] SOCIAL_AUTH_LOGIN_REDIRECT_URL = '/' SOCIAL_AUTH_LOGIN_ERROR_URL = '/login_error' # Logging LOGGING = { 'version': 1, 'formatters': { 'verbose': { 'format': '%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s' }, 'simple': { 'format': '%(levelname)s %(message)s' }, }, 'handlers': { 'console': { 'level': 'DEBUG', 'class': 'logging.StreamHandler', 'formatter': 'simple' }, }, 'loggers': { 'django': { 'handlers': ['console'], 'level': 'DEBUG', 'propagate': True, }, } } # Import Local Settings try: from local_settings import * except ImportError as e: print "FAILED TO IMPORT LOCAL SETTINGS: %s" % e
23.333333
89
0.699365
""" Django settings for cheddar_oauth_example project. For more information on this file, see https://docs.djangoproject.com/en/1.7/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/1.7/ref/settings/ """ # Build paths inside the project like this: os.path.join(BASE_DIR, ...) import os BASE_DIR = os.path.dirname(os.path.dirname(__file__)) # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = '43cy=fmsak_xqkme&yi9@c^+-*0pvr%s+-of!yzx6rdiw*!bxt' # SECURITY WARNING: don't run with debug turned on in production! DEBUG = True TEMPLATE_DEBUG = True ALLOWED_HOSTS = [] # Application definition INSTALLED_APPS = ( 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', 'social.apps.django_app.default', 'django.contrib.humanize', 'app', ) MIDDLEWARE_CLASSES = ( 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.auth.middleware.SessionAuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ) ROOT_URLCONF = 'cheddar_oauth_example.urls' WSGI_APPLICATION = 'cheddar_oauth_example.wsgi.application' AUTHENTICATION_BACKENDS = ( 'oauth.cheddar.CheddarOAuth2', ) TEMPLATE_CONTEXT_PROCESSORS = ( 'social.apps.django_app.context_processors.backends', 'social.apps.django_app.context_processors.login_redirect', ) # Database # https://docs.djangoproject.com/en/1.7/ref/settings/#databases DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'), } } # Internationalization # https://docs.djangoproject.com/en/1.7/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE = 'UTC' USE_I18N = True USE_L10N = True USE_TZ = True # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/1.7/howto/static-files/ STATIC_URL = '/static/' # SOCIAL_AUTH_CHEDDAR_SCOPE = [] SOCIAL_AUTH_LOGIN_REDIRECT_URL = '/' SOCIAL_AUTH_LOGIN_ERROR_URL = '/login_error' # Logging LOGGING = { 'version': 1, 'formatters': { 'verbose': { 'format': '%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s' }, 'simple': { 'format': '%(levelname)s %(message)s' }, }, 'handlers': { 'console': { 'level': 'DEBUG', 'class': 'logging.StreamHandler', 'formatter': 'simple' }, }, 'loggers': { 'django': { 'handlers': ['console'], 'level': 'DEBUG', 'propagate': True, }, } } # Import Local Settings try: from local_settings import * except ImportError as e: print "FAILED TO IMPORT LOCAL SETTINGS: %s" % e
0
0
0
1097c97b6b77b2f181e0c5a9531a0851278011cb
470
py
Python
django101/django102/urls.py
Minkov/python-web-2020-09
a43baf4dd4dd811caf25aad971a0f1a4d3d486a4
[ "MIT" ]
4
2020-10-30T23:13:50.000Z
2020-12-26T21:35:00.000Z
django101/django102/urls.py
Minkov/python-web-2020-09
a43baf4dd4dd811caf25aad971a0f1a4d3d486a4
[ "MIT" ]
null
null
null
django101/django102/urls.py
Minkov/python-web-2020-09
a43baf4dd4dd811caf25aad971a0f1a4d3d486a4
[ "MIT" ]
7
2020-09-17T13:08:35.000Z
2020-10-31T15:01:46.000Z
from django.urls import path from django102.views import index as index_view, UsersListView, GamesListView, something, methods_demo, \ raises_exception, create_game urlpatterns = [ path('', index_view, name='index'), path('2/', UsersListView.as_view()), path('games/', GamesListView.as_view()), path('smth/', something), path('methods/', methods_demo), path('raises/', raises_exception), path('creategame/', create_game), ]
33.571429
106
0.676596
from django.urls import path from django102.views import index as index_view, UsersListView, GamesListView, something, methods_demo, \ raises_exception, create_game urlpatterns = [ path('', index_view, name='index'), path('2/', UsersListView.as_view()), path('games/', GamesListView.as_view()), path('smth/', something), path('methods/', methods_demo), path('raises/', raises_exception), path('creategame/', create_game), ]
0
0
0
699f409bdd5d561bb93770f28b38f939f53fc421
5,483
py
Python
3_dataset_create.py
shivanirmishra/musicgenre
954214b6f7756c05de1253702811fd69dd99b0e2
[ "MIT" ]
null
null
null
3_dataset_create.py
shivanirmishra/musicgenre
954214b6f7756c05de1253702811fd69dd99b0e2
[ "MIT" ]
null
null
null
3_dataset_create.py
shivanirmishra/musicgenre
954214b6f7756c05de1253702811fd69dd99b0e2
[ "MIT" ]
null
null
null
from google.colab import drive drive.mount('/content/drive') import librosa import os import pandas as pd from numpy import mean import warnings; warnings.filterwarnings('ignore'); folders_5s = { 'pop_5s':'/content/drive/My Drive/ML_Project/New_Data/pop_test_5s', 'rnb_5s':'/content/drive/My Drive/ML_Project/New_Data/rnb_test_5s', 'blues_5s':'/content/drive/My Drive/ML_Project/New_Data/blues_test_5s', 'hiphop_5s':'/content/drive/My Drive/ML_Project/New_Data/hiphop_test_5s', 'rock_5s':'/content/drive/My Drive/ML_Project/New_Data/rock_test_5s' } folders_10s = { 'pop_10s':'/content/drive/My Drive/ML_Project/New_Data/pop_test_10s', 'rnb_10s':'/content/drive/My Drive/ML_Project/New_Data/rnb_test_10s', 'blues_10s':'/content/drive/My Drive/ML_Project/New_Data/blues_test_10s', 'hiphop_10s':'/content/drive/My Drive/ML_Project/New_Data/hiphop_test_10s', 'rock_10s':'/content/drive/My Drive/ML_Project/New_Data/rock_test_10s' } folders_20s = { 'pop_20s':'/content/drive/My Drive/ML_Project/New_Data/pop_test_20s', 'rnb_20s':'/content/drive/My Drive/ML_Project/New_Data/rnb_test_20s', 'blues_20s':'/content/drive/My Drive/ML_Project/New_Data/blues_test_20s', 'hiphop_20s':'/content/drive/My Drive/ML_Project/New_Data/hiphop_test_20s', 'rock_20s':'/content/drive/My Drive/ML_Project/New_Data/rock_test_20s' } label = { 'pop_5s': 0, 'rnb_5s': 1, 'blues_5s': 2, 'hiphop_5s': 3, 'rock_5s': 4, 'pop_10s': 0, 'rnb_10s': 1, 'blues_10s': 2, 'hiphop_10s': 3, 'rock_10s': 4, 'pop_20s': 0, 'rnb_20s': 1, 'blues_20s': 2, 'hiphop_20s': 3, 'rock_20s': 4 } data_5s = [] data_10s = [] data_20s = [] for name, path in folders_5s.items(): #count_5s = 3000 for filename in os.listdir(path): # if(count_5s == 0): # break songData = [] songname = f'{path}/{filename}' y, sr = librosa.load(songname, mono=True) tempo, beats = librosa.beat.beat_track(y=y, sr=sr) songData.append(tempo) songData.append(mean(beats)) chroma_stft = librosa.feature.chroma_stft(y=y, sr=sr) songData.append(mean(chroma_stft)) rmse = librosa.feature.rmse(y=y) songData.append(mean(rmse)) spec_cent = librosa.feature.spectral_centroid(y=y, sr=sr) songData.append(mean(spec_cent)) spec_bw = librosa.feature.spectral_bandwidth(y=y, sr=sr) songData.append(mean(spec_bw)) rolloff = librosa.feature.spectral_rolloff(y=y, sr=sr) songData.append(mean(rolloff)) zcr = librosa.feature.zero_crossing_rate(y) songData.append(mean(zcr)) mfcc = librosa.feature.mfcc(y=y, sr=sr) for i in mfcc: songData.append(mean(i)) songData.append(label[name]) data_5s.append(songData) #count_5s -= 1 for name, path in folders_10s.items(): #count_10s = 1500 for filename in os.listdir(path): # if(count_10s == 0): # break songData = [] songname = f'{path}/{filename}' y, sr = librosa.load(songname, mono=True) tempo, beats = librosa.beat.beat_track(y=y, sr=sr) songData.append(tempo) songData.append(mean(beats)) chroma_stft = librosa.feature.chroma_stft(y=y, sr=sr) songData.append(mean(chroma_stft)) rmse = librosa.feature.rmse(y=y) songData.append(mean(rmse)) spec_cent = librosa.feature.spectral_centroid(y=y, sr=sr) songData.append(mean(spec_cent)) spec_bw = librosa.feature.spectral_bandwidth(y=y, sr=sr) songData.append(mean(spec_bw)) rolloff = librosa.feature.spectral_rolloff(y=y, sr=sr) songData.append(mean(rolloff)) zcr = librosa.feature.zero_crossing_rate(y) songData.append(mean(zcr)) mfcc = librosa.feature.mfcc(y=y, sr=sr) for i in mfcc: songData.append(mean(i)) songData.append(label[name]) data_10s.append(songData) #count_10s -= 1 for name, path in folders_20s.items(): #count_20s = 900 for filename in os.listdir(path): # if(count_20s == 0): # break songData = [] songname = f'{path}/{filename}' y, sr = librosa.load(songname, mono=True) tempo, beats = librosa.beat.beat_track(y=y, sr=sr) songData.append(tempo) songData.append(mean(beats)) chroma_stft = librosa.feature.chroma_stft(y=y, sr=sr) songData.append(mean(chroma_stft)) rmse = librosa.feature.rmse(y=y) songData.append(mean(rmse)) spec_cent = librosa.feature.spectral_centroid(y=y, sr=sr) songData.append(mean(spec_cent)) spec_bw = librosa.feature.spectral_bandwidth(y=y, sr=sr) songData.append(mean(spec_bw)) rolloff = librosa.feature.spectral_rolloff(y=y, sr=sr) songData.append(mean(rolloff)) zcr = librosa.feature.zero_crossing_rate(y) songData.append(mean(zcr)) mfcc = librosa.feature.mfcc(y=y, sr=sr) for i in mfcc: songData.append(mean(i)) songData.append(label[name]) data_20s.append(songData) #count_20s -= 1 data_5s = pd.DataFrame(data_5s) data_5s.to_csv('/content/drive/My Drive/ML_Project/data_5s_test_all_genres.csv') data_10s = pd.DataFrame(data_10s) data_10s.to_csv('/content/drive/My Drive/ML_Project/data_10s_test_all_genres.csv') data_20s = pd.DataFrame(data_20s) data_20s.to_csv('/content/drive/My Drive/ML_Project/data_20s_test_all_genres.csv') data_10s
28.857895
89
0.666423
from google.colab import drive drive.mount('/content/drive') import librosa import os import pandas as pd from numpy import mean import warnings; warnings.filterwarnings('ignore'); folders_5s = { 'pop_5s':'/content/drive/My Drive/ML_Project/New_Data/pop_test_5s', 'rnb_5s':'/content/drive/My Drive/ML_Project/New_Data/rnb_test_5s', 'blues_5s':'/content/drive/My Drive/ML_Project/New_Data/blues_test_5s', 'hiphop_5s':'/content/drive/My Drive/ML_Project/New_Data/hiphop_test_5s', 'rock_5s':'/content/drive/My Drive/ML_Project/New_Data/rock_test_5s' } folders_10s = { 'pop_10s':'/content/drive/My Drive/ML_Project/New_Data/pop_test_10s', 'rnb_10s':'/content/drive/My Drive/ML_Project/New_Data/rnb_test_10s', 'blues_10s':'/content/drive/My Drive/ML_Project/New_Data/blues_test_10s', 'hiphop_10s':'/content/drive/My Drive/ML_Project/New_Data/hiphop_test_10s', 'rock_10s':'/content/drive/My Drive/ML_Project/New_Data/rock_test_10s' } folders_20s = { 'pop_20s':'/content/drive/My Drive/ML_Project/New_Data/pop_test_20s', 'rnb_20s':'/content/drive/My Drive/ML_Project/New_Data/rnb_test_20s', 'blues_20s':'/content/drive/My Drive/ML_Project/New_Data/blues_test_20s', 'hiphop_20s':'/content/drive/My Drive/ML_Project/New_Data/hiphop_test_20s', 'rock_20s':'/content/drive/My Drive/ML_Project/New_Data/rock_test_20s' } label = { 'pop_5s': 0, 'rnb_5s': 1, 'blues_5s': 2, 'hiphop_5s': 3, 'rock_5s': 4, 'pop_10s': 0, 'rnb_10s': 1, 'blues_10s': 2, 'hiphop_10s': 3, 'rock_10s': 4, 'pop_20s': 0, 'rnb_20s': 1, 'blues_20s': 2, 'hiphop_20s': 3, 'rock_20s': 4 } data_5s = [] data_10s = [] data_20s = [] for name, path in folders_5s.items(): #count_5s = 3000 for filename in os.listdir(path): # if(count_5s == 0): # break songData = [] songname = f'{path}/{filename}' y, sr = librosa.load(songname, mono=True) tempo, beats = librosa.beat.beat_track(y=y, sr=sr) songData.append(tempo) songData.append(mean(beats)) chroma_stft = librosa.feature.chroma_stft(y=y, sr=sr) songData.append(mean(chroma_stft)) rmse = librosa.feature.rmse(y=y) songData.append(mean(rmse)) spec_cent = librosa.feature.spectral_centroid(y=y, sr=sr) songData.append(mean(spec_cent)) spec_bw = librosa.feature.spectral_bandwidth(y=y, sr=sr) songData.append(mean(spec_bw)) rolloff = librosa.feature.spectral_rolloff(y=y, sr=sr) songData.append(mean(rolloff)) zcr = librosa.feature.zero_crossing_rate(y) songData.append(mean(zcr)) mfcc = librosa.feature.mfcc(y=y, sr=sr) for i in mfcc: songData.append(mean(i)) songData.append(label[name]) data_5s.append(songData) #count_5s -= 1 for name, path in folders_10s.items(): #count_10s = 1500 for filename in os.listdir(path): # if(count_10s == 0): # break songData = [] songname = f'{path}/{filename}' y, sr = librosa.load(songname, mono=True) tempo, beats = librosa.beat.beat_track(y=y, sr=sr) songData.append(tempo) songData.append(mean(beats)) chroma_stft = librosa.feature.chroma_stft(y=y, sr=sr) songData.append(mean(chroma_stft)) rmse = librosa.feature.rmse(y=y) songData.append(mean(rmse)) spec_cent = librosa.feature.spectral_centroid(y=y, sr=sr) songData.append(mean(spec_cent)) spec_bw = librosa.feature.spectral_bandwidth(y=y, sr=sr) songData.append(mean(spec_bw)) rolloff = librosa.feature.spectral_rolloff(y=y, sr=sr) songData.append(mean(rolloff)) zcr = librosa.feature.zero_crossing_rate(y) songData.append(mean(zcr)) mfcc = librosa.feature.mfcc(y=y, sr=sr) for i in mfcc: songData.append(mean(i)) songData.append(label[name]) data_10s.append(songData) #count_10s -= 1 for name, path in folders_20s.items(): #count_20s = 900 for filename in os.listdir(path): # if(count_20s == 0): # break songData = [] songname = f'{path}/{filename}' y, sr = librosa.load(songname, mono=True) tempo, beats = librosa.beat.beat_track(y=y, sr=sr) songData.append(tempo) songData.append(mean(beats)) chroma_stft = librosa.feature.chroma_stft(y=y, sr=sr) songData.append(mean(chroma_stft)) rmse = librosa.feature.rmse(y=y) songData.append(mean(rmse)) spec_cent = librosa.feature.spectral_centroid(y=y, sr=sr) songData.append(mean(spec_cent)) spec_bw = librosa.feature.spectral_bandwidth(y=y, sr=sr) songData.append(mean(spec_bw)) rolloff = librosa.feature.spectral_rolloff(y=y, sr=sr) songData.append(mean(rolloff)) zcr = librosa.feature.zero_crossing_rate(y) songData.append(mean(zcr)) mfcc = librosa.feature.mfcc(y=y, sr=sr) for i in mfcc: songData.append(mean(i)) songData.append(label[name]) data_20s.append(songData) #count_20s -= 1 data_5s = pd.DataFrame(data_5s) data_5s.to_csv('/content/drive/My Drive/ML_Project/data_5s_test_all_genres.csv') data_10s = pd.DataFrame(data_10s) data_10s.to_csv('/content/drive/My Drive/ML_Project/data_10s_test_all_genres.csv') data_20s = pd.DataFrame(data_20s) data_20s.to_csv('/content/drive/My Drive/ML_Project/data_20s_test_all_genres.csv') data_10s
0
0
0
412a8b42be8c6054311e076c95465833bdd45355
1,206
py
Python
data/train_test_split.py
ttaoREtw/A-Pytorch-Implementation-of-Tacotron-End-to-end-Text-to-speech-Deep-Learning-Model
6b0f615cafb0530370631a880aac5736fe9a2c64
[ "MIT" ]
105
2018-09-13T02:45:10.000Z
2021-06-24T03:31:15.000Z
data/train_test_split.py
henryhenrychen/Tacotron-pytorch
4a4d1ea0d83fd88a50464999f5d55fe012c86687
[ "MIT" ]
9
2018-12-11T02:37:58.000Z
2021-03-18T02:42:40.000Z
data/train_test_split.py
henryhenrychen/Tacotron-pytorch
4a4d1ea0d83fd88a50464999f5d55fe012c86687
[ "MIT" ]
31
2018-09-15T14:51:31.000Z
2021-01-19T07:37:14.000Z
import os import argparse import random if __name__ == '__main__': parser = argparse.ArgumentParser(description='Split the data') parser.add_argument('--meta-all', type=str, help='The meta file generated by preprocess.py', required=True) parser.add_argument('--ratio-test', default=0.1, type=float, help='ratio of testing examples', required=False) args = parser.parse_args() split_and_save(args)
31.736842
114
0.662521
import os import argparse import random def split_and_save(args): meta_all_path = args.meta_all meta_dir = os.path.dirname(os.path.realpath(meta_all_path)) meta_tr_path = os.path.join(meta_dir, 'meta_train.txt') meta_te_path = os.path.join(meta_dir, 'meta_test.txt') with open(meta_all_path) as f: meta_all = f.readlines() meta_tr = [] meta_te = [] n_meta = len(meta_all) n_test = int(args.ratio_test * n_meta) indice_te = random.sample(range(n_meta), n_test) for idx, line in enumerate(meta_all): if idx in indice_te: meta_te.append(line) else: meta_tr.append(line) with open(meta_tr_path, 'w') as ftr: ftr.write(''.join(meta_tr)) with open(meta_te_path, 'w') as fte: fte.write(''.join(meta_te)) if __name__ == '__main__': parser = argparse.ArgumentParser(description='Split the data') parser.add_argument('--meta-all', type=str, help='The meta file generated by preprocess.py', required=True) parser.add_argument('--ratio-test', default=0.1, type=float, help='ratio of testing examples', required=False) args = parser.parse_args() split_and_save(args)
762
0
23
4d36c18720eb25777d76206398891b1da5c803d3
10,711
py
Python
item_sets.py
jay-maity/RecommendPCY
040eda27be46d241406d3cb8ce6605dde492fef9
[ "MIT" ]
null
null
null
item_sets.py
jay-maity/RecommendPCY
040eda27be46d241406d3cb8ce6605dde492fef9
[ "MIT" ]
null
null
null
item_sets.py
jay-maity/RecommendPCY
040eda27be46d241406d3cb8ce6605dde492fef9
[ "MIT" ]
null
null
null
""" Frequent item discovery by PCY algorithm""" import operator import json import sys from pyspark import SparkContext, SparkConf import pyspark_cassandra from cassandra.cluster import Cluster cluster = None session = None class PCYFrequentItems: """ Find Frequent item list using PCY algorithm """ IS_DEBUGGING = False config_object = None def __init__(self, is_debug, config_file="config.json"): """ Sets the initial debiggin parameter :param is_debug: Print collect messages if set true """ self.IS_DEBUGGING = is_debug json_data = open(config_file).read() self.config_object = json.loads(json_data) @staticmethod def group_items(basket, group_size): """ Get item_groups from a basket Returns sorted items by their numerical number :param basket: Basket to search the item_group from (could be a single cart) :param group_size: Size of the item_group to form :return: """ assert (group_size >= 1 and isinstance(group_size, int)), \ "Please use group size as Integer and > 0" # In case of group size is one simply return each items if group_size == 1: return [(item,) for item in basket] item_groups = [] if len(basket) >= group_size: # Sort the basket basket = sorted(basket) # Loop through the basket for i in range(len(basket) - group_size + 1): # Gets the base and add all items for each group # until end # If base is [2,3] and basket is [2,3,4,5] # then creates [2,3,4], [2,3,5] base_item_count = i + (group_size - 1) base_items = basket[i:base_item_count] for item in basket[base_item_count:]: item_groups.append(tuple(base_items) + (item,)) return item_groups @staticmethod def map_nodes(line): """ Map line into graph node key = value as array """ key_values = line.split(":") # key = int(key_values[0]) values = [] if key_values[1].strip() != "": values = [int(node) for node in key_values[1].strip().split(' ')] return values @staticmethod def filter_pairs(pair, hosts, keyspace, hashfunction, item_table, bitmap_table): """ Filter pairs by querying from cassandra table :return: """ global cluster, session if cluster is None: cluster = Cluster(hosts) session = cluster.connect(keyspace) item1 = session.execute("select item from " + item_table + " where item = %d" % pair[0]) item2 = session.execute("select item from " + item_table + " where item = %d" % pair[1]) bitmap = session.execute("select hash from " + bitmap_table + " where hash = %d" % hashfunction(pair)) print("Pair checked " + str(pair[0])) return item1 and item2 and bitmap @staticmethod def filter_pairs_broadcast(pair, freq_pair, bitmap, hashfunction): """ Filter pairs from broadcast variables :return: """ return pair[0] in freq_pair and pair[1] in freq_pair and hashfunction(pair) in bitmap def pcy_freq_items(self, item_group_rdd, hash_function, support_count): """ Get Frequent items for a particular group of items :param item_group_rdd: :param passno: :param hash_function: :param support_count: :return: """ # Hash and Items mapping order_prod_hash = item_group_rdd \ .map(lambda x: (hash_function(x), 1)) # Group, filter and get unique item sets frequent_items = order_prod_hash.reduceByKey(operator.add) \ .filter(lambda x: x[1] > support_count) \ .map(lambda x: x[0]) return frequent_items def pcy_pass(self, order_prod, pass_no, support_count, hashn, hashnplus1, is_nplus1_cache=False): """ Calculates frequent items and bitmap after n th pass :param order_prod: :param pass_no: :param support_count: :param hashn: :param hashnplus1: :param is_nplus1_cache: :return: """ item_set_count = pass_no order_prod_single = order_prod. \ flatMap(lambda x: PCYFrequentItems. group_items(x, item_set_count)) frequent_items_n = self.pcy_freq_items(order_prod_single, hashn, support_count) item_set_count += 1 order_prod_pairs = order_prod. \ flatMap(lambda x: PCYFrequentItems.group_items(x, item_set_count)) if is_nplus1_cache: order_prod_pairs = order_prod_pairs.cache() bitmap_nplus1 = self.pcy_freq_items(order_prod_pairs, hashnplus1, support_count) return frequent_items_n, bitmap_nplus1, order_prod_pairs @staticmethod def pair_bitmap(items): """ Hash function for calculation for pairs :param items: :return: """ mul = 1 for item in items: mul *= ((2 * item) + 1) return mul % 999917 @staticmethod def single(items): """ Hash function for calculation :param items: :return: """ mul = 1 for item in items: mul *= item return mul % 100000000 def configure(self): """ Configure spark and cassandra objects :param is_local_host: :return: """ # Spark Configuration conf = SparkConf().setAppName('Frequent Item Sets'). \ set('spark.cassandra.connection.host', ','.join(self.config_object["CassandraHosts"])) return SparkContext(conf=conf) def frequent_items(self, inputs, output, support_count, is_broadcast=True): """Output correlation coefficient without mean formula Args: inputs:Input file location output:Output file location support_count: is_broadcast: Item pair will be found using broadcast or not """ spark_context = self.configure() # File loading text = spark_context.textFile(inputs) order_prod = text.map(PCYFrequentItems.map_nodes).cache() pass_no = 1 freq_items, bitmap, all_pairs = self.pcy_pass(order_prod, pass_no, support_count, PCYFrequentItems.single, PCYFrequentItems.pair_bitmap, is_nplus1_cache=True) if self.IS_DEBUGGING: print("Frequent " + str(pass_no) + "-group items after pass:" + str(pass_no)) print(freq_items.collect()) print("Bitmap for " + str(pass_no + 1) + "-group items after pass:" + str(pass_no)) print(bitmap.collect()) # System will use broadcast based on user input if is_broadcast: bitmap_set = set(bitmap.collect()) freq_items_set = set(freq_items.collect()) bitmap_broadast = spark_context.broadcast(bitmap_set) freq_items_set = spark_context.broadcast(freq_items_set) frequent_pairs = all_pairs.filter(lambda x: PCYFrequentItems. filter_pairs_broadcast(x, freq_items_set.value, bitmap_broadast.value, PCYFrequentItems.pair_bitmap )) else: # Making freq items Ready to save to cassandra freq_items = freq_items.map(lambda x: {'item': x}) freq_items.saveToCassandra(self.config_object["KeySpace"], self.config_object["Item1Table"]) # Making bitmap Ready to save to cassandra bitmap = bitmap.map(lambda x: {'hash': x}) bitmap.saveToCassandra(self.config_object["KeySpace"], self.config_object["Bitmap2Table"]) print(all_pairs.count()) frequent_pairs = all_pairs.filter(lambda x: PCYFrequentItems. filter_pairs(x, self.config_object["CassandraHosts"], self.config_object["KeySpace"], PCYFrequentItems.pair_bitmap, self.config_object["Item1Table"], self.config_object["Bitmap2Table"])) if self.IS_DEBUGGING: print(all_pairs.collect()) print(frequent_pairs.collect()) # Saves as text file frequent_pairs.saveAsTextFile(output) frequent_pairs = frequent_pairs.\ map(lambda x: {'productid1': x[0], 'productid2': x[1]}) # Save final output to cassandra frequent_pairs.saveToCassandra(self.config_object["KeySpace"], self.config_object["RecommendTable"]) all_pairs.unpersist() order_prod.unpersist() def main(): """ Handles parameters for the file to run :return: """ input_path = sys.argv[1] output_path = sys.argv[2] support_thresold = int(sys.argv[3]) broadcast = 1 if len(sys.argv) > 4: broadcast = int(sys.argv[4]) pcy = PCYFrequentItems(is_debug=True) if broadcast == 1: is_broadcast = True else: is_broadcast = False pcy.frequent_items(input_path, output_path, support_thresold, is_broadcast) if __name__ == "__main__": main()
34.220447
98
0.527588
""" Frequent item discovery by PCY algorithm""" import operator import json import sys from pyspark import SparkContext, SparkConf import pyspark_cassandra from cassandra.cluster import Cluster cluster = None session = None class PCYFrequentItems: """ Find Frequent item list using PCY algorithm """ IS_DEBUGGING = False config_object = None def __init__(self, is_debug, config_file="config.json"): """ Sets the initial debiggin parameter :param is_debug: Print collect messages if set true """ self.IS_DEBUGGING = is_debug json_data = open(config_file).read() self.config_object = json.loads(json_data) @staticmethod def group_items(basket, group_size): """ Get item_groups from a basket Returns sorted items by their numerical number :param basket: Basket to search the item_group from (could be a single cart) :param group_size: Size of the item_group to form :return: """ assert (group_size >= 1 and isinstance(group_size, int)), \ "Please use group size as Integer and > 0" # In case of group size is one simply return each items if group_size == 1: return [(item,) for item in basket] item_groups = [] if len(basket) >= group_size: # Sort the basket basket = sorted(basket) # Loop through the basket for i in range(len(basket) - group_size + 1): # Gets the base and add all items for each group # until end # If base is [2,3] and basket is [2,3,4,5] # then creates [2,3,4], [2,3,5] base_item_count = i + (group_size - 1) base_items = basket[i:base_item_count] for item in basket[base_item_count:]: item_groups.append(tuple(base_items) + (item,)) return item_groups @staticmethod def map_nodes(line): """ Map line into graph node key = value as array """ key_values = line.split(":") # key = int(key_values[0]) values = [] if key_values[1].strip() != "": values = [int(node) for node in key_values[1].strip().split(' ')] return values @staticmethod def filter_pairs(pair, hosts, keyspace, hashfunction, item_table, bitmap_table): """ Filter pairs by querying from cassandra table :return: """ global cluster, session if cluster is None: cluster = Cluster(hosts) session = cluster.connect(keyspace) item1 = session.execute("select item from " + item_table + " where item = %d" % pair[0]) item2 = session.execute("select item from " + item_table + " where item = %d" % pair[1]) bitmap = session.execute("select hash from " + bitmap_table + " where hash = %d" % hashfunction(pair)) print("Pair checked " + str(pair[0])) return item1 and item2 and bitmap @staticmethod def filter_pairs_broadcast(pair, freq_pair, bitmap, hashfunction): """ Filter pairs from broadcast variables :return: """ return pair[0] in freq_pair and pair[1] in freq_pair and hashfunction(pair) in bitmap def pcy_freq_items(self, item_group_rdd, hash_function, support_count): """ Get Frequent items for a particular group of items :param item_group_rdd: :param passno: :param hash_function: :param support_count: :return: """ # Hash and Items mapping order_prod_hash = item_group_rdd \ .map(lambda x: (hash_function(x), 1)) # Group, filter and get unique item sets frequent_items = order_prod_hash.reduceByKey(operator.add) \ .filter(lambda x: x[1] > support_count) \ .map(lambda x: x[0]) return frequent_items def pcy_pass(self, order_prod, pass_no, support_count, hashn, hashnplus1, is_nplus1_cache=False): """ Calculates frequent items and bitmap after n th pass :param order_prod: :param pass_no: :param support_count: :param hashn: :param hashnplus1: :param is_nplus1_cache: :return: """ item_set_count = pass_no order_prod_single = order_prod. \ flatMap(lambda x: PCYFrequentItems. group_items(x, item_set_count)) frequent_items_n = self.pcy_freq_items(order_prod_single, hashn, support_count) item_set_count += 1 order_prod_pairs = order_prod. \ flatMap(lambda x: PCYFrequentItems.group_items(x, item_set_count)) if is_nplus1_cache: order_prod_pairs = order_prod_pairs.cache() bitmap_nplus1 = self.pcy_freq_items(order_prod_pairs, hashnplus1, support_count) return frequent_items_n, bitmap_nplus1, order_prod_pairs @staticmethod def pair_bitmap(items): """ Hash function for calculation for pairs :param items: :return: """ mul = 1 for item in items: mul *= ((2 * item) + 1) return mul % 999917 @staticmethod def single(items): """ Hash function for calculation :param items: :return: """ mul = 1 for item in items: mul *= item return mul % 100000000 def configure(self): """ Configure spark and cassandra objects :param is_local_host: :return: """ # Spark Configuration conf = SparkConf().setAppName('Frequent Item Sets'). \ set('spark.cassandra.connection.host', ','.join(self.config_object["CassandraHosts"])) return SparkContext(conf=conf) def frequent_items(self, inputs, output, support_count, is_broadcast=True): """Output correlation coefficient without mean formula Args: inputs:Input file location output:Output file location support_count: is_broadcast: Item pair will be found using broadcast or not """ spark_context = self.configure() # File loading text = spark_context.textFile(inputs) order_prod = text.map(PCYFrequentItems.map_nodes).cache() pass_no = 1 freq_items, bitmap, all_pairs = self.pcy_pass(order_prod, pass_no, support_count, PCYFrequentItems.single, PCYFrequentItems.pair_bitmap, is_nplus1_cache=True) if self.IS_DEBUGGING: print("Frequent " + str(pass_no) + "-group items after pass:" + str(pass_no)) print(freq_items.collect()) print("Bitmap for " + str(pass_no + 1) + "-group items after pass:" + str(pass_no)) print(bitmap.collect()) # System will use broadcast based on user input if is_broadcast: bitmap_set = set(bitmap.collect()) freq_items_set = set(freq_items.collect()) bitmap_broadast = spark_context.broadcast(bitmap_set) freq_items_set = spark_context.broadcast(freq_items_set) frequent_pairs = all_pairs.filter(lambda x: PCYFrequentItems. filter_pairs_broadcast(x, freq_items_set.value, bitmap_broadast.value, PCYFrequentItems.pair_bitmap )) else: # Making freq items Ready to save to cassandra freq_items = freq_items.map(lambda x: {'item': x}) freq_items.saveToCassandra(self.config_object["KeySpace"], self.config_object["Item1Table"]) # Making bitmap Ready to save to cassandra bitmap = bitmap.map(lambda x: {'hash': x}) bitmap.saveToCassandra(self.config_object["KeySpace"], self.config_object["Bitmap2Table"]) print(all_pairs.count()) frequent_pairs = all_pairs.filter(lambda x: PCYFrequentItems. filter_pairs(x, self.config_object["CassandraHosts"], self.config_object["KeySpace"], PCYFrequentItems.pair_bitmap, self.config_object["Item1Table"], self.config_object["Bitmap2Table"])) if self.IS_DEBUGGING: print(all_pairs.collect()) print(frequent_pairs.collect()) # Saves as text file frequent_pairs.saveAsTextFile(output) frequent_pairs = frequent_pairs.\ map(lambda x: {'productid1': x[0], 'productid2': x[1]}) # Save final output to cassandra frequent_pairs.saveToCassandra(self.config_object["KeySpace"], self.config_object["RecommendTable"]) all_pairs.unpersist() order_prod.unpersist() def main(): """ Handles parameters for the file to run :return: """ input_path = sys.argv[1] output_path = sys.argv[2] support_thresold = int(sys.argv[3]) broadcast = 1 if len(sys.argv) > 4: broadcast = int(sys.argv[4]) pcy = PCYFrequentItems(is_debug=True) if broadcast == 1: is_broadcast = True else: is_broadcast = False pcy.frequent_items(input_path, output_path, support_thresold, is_broadcast) if __name__ == "__main__": main()
0
0
0
67a731ca62e5cbd2844ce988950efc73fd0d3ec6
5,201
pyw
Python
pncShell.pyw
BobBaylor/pnc
11b5a08a1fce5c605a203c4e46c9d9599024ad3c
[ "MIT" ]
null
null
null
pncShell.pyw
BobBaylor/pnc
11b5a08a1fce5c605a203c4e46c9d9599024ad3c
[ "MIT" ]
null
null
null
pncShell.pyw
BobBaylor/pnc
11b5a08a1fce5c605a203c4e46c9d9599024ad3c
[ "MIT" ]
null
null
null
""" A wrapper around my pnc.py module """ import os.path import wx import wx.lib.filebrowsebutton as filebrowse import pnc class MyFrame(wx.Frame): """ This is MyFrame. It just shows a few controls on a wxPanel, and has a simple menu. Use this file inFileBtn Write this root name TextEntry and starting number TextEntry To here outDirRootButton Optional subdirectory TextEntry Move the input file there, too CheckBox """ def evh_close(self, evt): #pylint: disable=unused-argument """Event handler for the button click.""" self.Close() def evh_doit(self, evt): #pylint: disable=unused-argument """Event handler for the button click.""" self.SetStatusText('working...') print '' out_dir = self.file_browse_root.GetValue() out_new_dir = self.tc_out_dir.GetValue() out_dir = os.path.join(out_dir, out_new_dir) b_success = pnc.get_photos(self.btn_infile.GetValue(), out_dir, self.tc_out_fname.GetValue(), self.cb_move_file.GetValue()) if b_success: self.SetStatusText('Done!') else: self.SetStatusText('Failed') def cback_infile(self, evt): #pylint: disable=unused-argument """ dummy callback """ pass def cback_file_root(self, evt): #pylint: disable=unused-argument """ dummy callback """ pass class MyApp(wx.App): """ a simple GUI """ def OnInit(self): #pylint: disable=invalid-name """ let's get this party started """ frame = MyFrame(None, "Panasonic .PNC to .JPG converter") self.SetTopWindow(frame) frame.Show(True) return True # app = MyApp(redirect=True) app = MyApp() #pylint: disable=invalid-name app.MainLoop()
38.242647
153
0.586233
""" A wrapper around my pnc.py module """ import os.path import wx import wx.lib.filebrowsebutton as filebrowse import pnc class MyFrame(wx.Frame): """ This is MyFrame. It just shows a few controls on a wxPanel, and has a simple menu. Use this file inFileBtn Write this root name TextEntry and starting number TextEntry To here outDirRootButton Optional subdirectory TextEntry Move the input file there, too CheckBox """ def __init__(self, parent, title): wide = 860 wx.Frame.__init__(self, parent, wx.ID_ANY, title, pos=(150, 150), size=(wide, 270)) # make a minimalist menu bar self.CreateStatusBar() menu_bar = wx.MenuBar() menu1 = wx.Menu() menu1.Append(101, '&Close', 'Close this frame') self.SetMenuBar(menu_bar) self.Bind(wx.EVT_MENU, self.Close, id=101) # Now create the Panel to put the other controls on. self.panel = wx.Panel(self, wx.ID_ANY) # Use a sizer to layout the controls, stacked vertically and with # a 6 pixel border around each space = 6 sflags = wx.ALL sizer = wx.BoxSizer(wx.VERTICAL) # x = self # sizer.Add(self.panel, wx.EXPAND ) # and a few controls text = wx.StaticText(self, -1, "Browse to the .pnc file, choose a root and folder name, and press Do It!") #pylint: disable=line-too-long text.SetFont(wx.Font(14, wx.SWISS, wx.NORMAL, wx.BOLD)) text.SetSize(text.GetBestSize()) sizer.Add(text, 0, sflags, space) self.btn_infile = filebrowse.FileBrowseButton(self, -1, size=(wide-10, -1), changeCallback=self.cback_infile, labelText='Use this PNC file') self.btn_infile.SetValue('/Users/guy/Downloads/JpegData.PNC') sizer.Add(self.btn_infile, 0, sflags, space) self.file_browse_root = filebrowse.DirBrowseButton(self, -1, size=(wide-10, -1), changeCallback=self.cback_file_root, #pylint: disable=line-too-long labelText='To put JPG files here') # self.file_browse_root.SetValue( '/Users/guy/Pictures' ) self.file_browse_root.SetValue('/Users/guy/python/test') # self.file_browse_root.callCallback = False sizer.Add(self.file_browse_root, 0, sflags, space) # file name root and starting number hsizer = wx.BoxSizer(wx.HORIZONTAL) hsizer.Add(wx.StaticText(self, -1, "Optional new dir:"), 0, sflags, space) self.tc_out_dir = wx.TextCtrl(self, -1, '') hsizer.Add(self.tc_out_dir, 0, sflags, space) hsizer.Add(wx.StaticText(self, -1, "Filename root:"), 0, sflags, space) self.tc_out_fname = wx.TextCtrl(self, -1, 'gcam') hsizer.Add(self.tc_out_fname, 0, sflags, space) # hsizer.Add(wx.StaticText(self, -1, "File number start:"), 0, sflags, space) sizer.Add(hsizer, 0, sflags, space) self.cb_move_file = wx.CheckBox(self, -1, 'Move Input file, too') sizer.Add(self.cb_move_file, 0, sflags, space) # bind the button events to handlers hsizer2 = wx.BoxSizer(wx.HORIZONTAL) funbtn = wx.Button(self, -1, "Do it") self.Bind(wx.EVT_BUTTON, self.evh_doit, funbtn) hsizer2.Add(funbtn, 0, sflags, space) btn = wx.Button(self, -1, "Close") self.Bind(wx.EVT_BUTTON, self.evh_close, btn) hsizer2.Add(btn, 0, sflags, space) sizer.Add(hsizer2, 0, sflags, space) self.SetSizer(sizer) def evh_close(self, evt): #pylint: disable=unused-argument """Event handler for the button click.""" self.Close() def evh_doit(self, evt): #pylint: disable=unused-argument """Event handler for the button click.""" self.SetStatusText('working...') print '' out_dir = self.file_browse_root.GetValue() out_new_dir = self.tc_out_dir.GetValue() out_dir = os.path.join(out_dir, out_new_dir) b_success = pnc.get_photos(self.btn_infile.GetValue(), out_dir, self.tc_out_fname.GetValue(), self.cb_move_file.GetValue()) if b_success: self.SetStatusText('Done!') else: self.SetStatusText('Failed') def cback_infile(self, evt): #pylint: disable=unused-argument """ dummy callback """ pass def cback_file_root(self, evt): #pylint: disable=unused-argument """ dummy callback """ pass class MyApp(wx.App): """ a simple GUI """ def OnInit(self): #pylint: disable=invalid-name """ let's get this party started """ frame = MyFrame(None, "Panasonic .PNC to .JPG converter") self.SetTopWindow(frame) frame.Show(True) return True # app = MyApp(redirect=True) app = MyApp() #pylint: disable=invalid-name app.MainLoop()
3,265
0
26
f7357be79ed5cf787004c67c6e35b3966042133a
659
py
Python
ouch_server.py
jahinzee/theouchteam
870767cae81ad37b4191ded64c3e83eb48be982a
[ "MIT" ]
3
2022-01-09T02:40:31.000Z
2022-02-01T03:57:40.000Z
ouch_server.py
jahinzee/theouchteam
870767cae81ad37b4191ded64c3e83eb48be982a
[ "MIT" ]
null
null
null
ouch_server.py
jahinzee/theouchteam
870767cae81ad37b4191ded64c3e83eb48be982a
[ "MIT" ]
1
2022-01-21T08:05:27.000Z
2022-01-21T08:05:27.000Z
import sys from src.Exchange import Exchange if __name__ == "__main__": exchange = None if len(sys.argv) == 2: if sys.argv[1] == "debug": # Exchange outputs using debug mode. exchange = Exchange(debug="dump") elif sys.argv[1] == "none": # Exchange won't output anything. exchange = Exchange(debug="none") else: raise Exception("Command line argument should be either 'dump' or 'none'") else: exchange = Exchange() exchange.open_exchange() input() # Pressing the enter key will cause the server process to terminate. exchange.close_exchange()
32.95
86
0.608498
import sys from src.Exchange import Exchange if __name__ == "__main__": exchange = None if len(sys.argv) == 2: if sys.argv[1] == "debug": # Exchange outputs using debug mode. exchange = Exchange(debug="dump") elif sys.argv[1] == "none": # Exchange won't output anything. exchange = Exchange(debug="none") else: raise Exception("Command line argument should be either 'dump' or 'none'") else: exchange = Exchange() exchange.open_exchange() input() # Pressing the enter key will cause the server process to terminate. exchange.close_exchange()
0
0
0
9f8dc9d33b7c561ec7708fe6db9f376c20086e3c
42
py
Python
ml-agents/mlagents/trainers/components/reward_signals/extrinsic/__init__.py
russellcaughey/ml-agents
493c75bf683d35d512ae6fb57d4a1a332116df15
[ "Apache-2.0" ]
3
2018-09-18T13:40:29.000Z
2019-02-14T07:30:09.000Z
ml-agents/mlagents/trainers/components/reward_signals/extrinsic/__init__.py
russellcaughey/ml-agents
493c75bf683d35d512ae6fb57d4a1a332116df15
[ "Apache-2.0" ]
1
2020-04-27T01:52:49.000Z
2020-04-27T01:52:49.000Z
ml-agents/mlagents/trainers/components/reward_signals/extrinsic/__init__.py
russellcaughey/ml-agents
493c75bf683d35d512ae6fb57d4a1a332116df15
[ "Apache-2.0" ]
2
2019-09-10T16:05:48.000Z
2020-07-24T20:40:26.000Z
from .signal import ExtrinsicRewardSignal
21
41
0.880952
from .signal import ExtrinsicRewardSignal
0
0
0
a3dcdb967f844c2c93436cc07445e0c92c4d3a7d
99
py
Python
server_prod.py
techx/evolution-chamber
dea9b7d563df6f06d270078f5c512e3f7e367a92
[ "MIT" ]
4
2015-06-22T15:44:57.000Z
2015-06-22T15:57:03.000Z
server_prod.py
techx/evolution-chamber
dea9b7d563df6f06d270078f5c512e3f7e367a92
[ "MIT" ]
null
null
null
server_prod.py
techx/evolution-chamber
dea9b7d563df6f06d270078f5c512e3f7e367a92
[ "MIT" ]
2
2015-07-09T15:21:37.000Z
2016-02-02T15:59:09.000Z
import server if __name__ == "__main__": server.app.run(host='0.0.0.0',port=5000,debug=False)
19.8
56
0.686869
import server if __name__ == "__main__": server.app.run(host='0.0.0.0',port=5000,debug=False)
0
0
0
da72584d02e46192004671f6611a889c0dd3c753
2,533
py
Python
datahub/email_ingestion/emails.py
Staberinde/data-hub-api
3d0467dbceaf62a47158eea412a3dba827073300
[ "MIT" ]
6
2019-12-02T16:11:24.000Z
2022-03-18T10:02:02.000Z
datahub/email_ingestion/emails.py
Staberinde/data-hub-api
3d0467dbceaf62a47158eea412a3dba827073300
[ "MIT" ]
1,696
2019-10-31T14:08:37.000Z
2022-03-29T12:35:57.000Z
datahub/email_ingestion/emails.py
Staberinde/data-hub-api
3d0467dbceaf62a47158eea412a3dba827073300
[ "MIT" ]
9
2019-11-22T12:42:03.000Z
2021-09-03T14:25:05.000Z
import tempfile from logging import getLogger import mailparser from django.conf import settings from django.core.exceptions import ImproperlyConfigured from datahub.documents import utils as documents from datahub.interaction.email_processors.processors import CalendarInteractionEmailProcessor logger = getLogger(__name__) BUCKET_ID = 'mailbox' def get_mail_docs_in_bucket(): """ Gets all mail documents in the bucket. """ if BUCKET_ID not in settings.DOCUMENT_BUCKETS: raise ImproperlyConfigured(f'Bucket "{BUCKET_ID}" is missing in settings') config = settings.DOCUMENT_BUCKETS[BUCKET_ID] if 'bucket' not in config: raise ImproperlyConfigured(f'Bucket "{BUCKET_ID}" not configured properly in settings') name = config['bucket'] if not name: raise ImproperlyConfigured( f'Bucket "{BUCKET_ID}" bucket value not configured properly in settings', ) client = documents.get_s3_client_for_bucket(bucket_id=BUCKET_ID) paginator = client.get_paginator('list_objects') for page in paginator.paginate(Bucket=name): for doc in page.get('Contents') or []: key = doc['Key'] with tempfile.TemporaryFile(mode='w+b') as f: client.download_fileobj(Bucket=name, Key=key, Fileobj=f) f.seek(0) content = f.read() yield {'source': key, 'content': content} def process_ingestion_emails(): """ Gets all new mail documents in the bucket and process each message. """ processor = CalendarInteractionEmailProcessor() for message in get_mail_docs_in_bucket(): source = message['source'] try: documents.delete_document(bucket_id=BUCKET_ID, document_key=message['source']) except Exception as e: logger.exception('Error deleting message: "%s", error: "%s"', source, e) continue try: email = mailparser.parse_from_bytes(message['content']) processed, reason = processor.process_email(message=email) if not processed: logger.error('Error parsing message: "%s", error: "%s"', source, reason) else: logger.info(reason) except Exception as e: logger.exception('Error processing message: "%s", error: "%s"', source, e) logger.info( 'Successfully processed message "%s" and deleted document from bucket "%s"', source, BUCKET_ID, )
33.773333
95
0.649428
import tempfile from logging import getLogger import mailparser from django.conf import settings from django.core.exceptions import ImproperlyConfigured from datahub.documents import utils as documents from datahub.interaction.email_processors.processors import CalendarInteractionEmailProcessor logger = getLogger(__name__) BUCKET_ID = 'mailbox' def get_mail_docs_in_bucket(): """ Gets all mail documents in the bucket. """ if BUCKET_ID not in settings.DOCUMENT_BUCKETS: raise ImproperlyConfigured(f'Bucket "{BUCKET_ID}" is missing in settings') config = settings.DOCUMENT_BUCKETS[BUCKET_ID] if 'bucket' not in config: raise ImproperlyConfigured(f'Bucket "{BUCKET_ID}" not configured properly in settings') name = config['bucket'] if not name: raise ImproperlyConfigured( f'Bucket "{BUCKET_ID}" bucket value not configured properly in settings', ) client = documents.get_s3_client_for_bucket(bucket_id=BUCKET_ID) paginator = client.get_paginator('list_objects') for page in paginator.paginate(Bucket=name): for doc in page.get('Contents') or []: key = doc['Key'] with tempfile.TemporaryFile(mode='w+b') as f: client.download_fileobj(Bucket=name, Key=key, Fileobj=f) f.seek(0) content = f.read() yield {'source': key, 'content': content} def process_ingestion_emails(): """ Gets all new mail documents in the bucket and process each message. """ processor = CalendarInteractionEmailProcessor() for message in get_mail_docs_in_bucket(): source = message['source'] try: documents.delete_document(bucket_id=BUCKET_ID, document_key=message['source']) except Exception as e: logger.exception('Error deleting message: "%s", error: "%s"', source, e) continue try: email = mailparser.parse_from_bytes(message['content']) processed, reason = processor.process_email(message=email) if not processed: logger.error('Error parsing message: "%s", error: "%s"', source, reason) else: logger.info(reason) except Exception as e: logger.exception('Error processing message: "%s", error: "%s"', source, e) logger.info( 'Successfully processed message "%s" and deleted document from bucket "%s"', source, BUCKET_ID, )
0
0
0
0af7288a9052da637b85d240b67185965f20ec48
1,105
py
Python
classes/rooms.py
Loekring/Neversoft
a9e600131585741652b62b2dbbaa2febc1656843
[ "MIT" ]
1
2018-01-21T21:15:52.000Z
2018-01-21T21:15:52.000Z
classes/rooms.py
Loekring/Neversoft
a9e600131585741652b62b2dbbaa2febc1656843
[ "MIT" ]
null
null
null
classes/rooms.py
Loekring/Neversoft
a9e600131585741652b62b2dbbaa2febc1656843
[ "MIT" ]
null
null
null
import random as r offBoundsMsgs = ["Der er ikkje noko i den retninga.", "Du møtte ein vegg.", "Du kjem deg ikkje vidare i den retninga."] roomSizeX, roomSizeY = 2, 1 # Dette er baseklassa til allle romma
31.571429
119
0.6181
import random as r offBoundsMsgs = ["Der er ikkje noko i den retninga.", "Du møtte ein vegg.", "Du kjem deg ikkje vidare i den retninga."] roomSizeX, roomSizeY = 2, 1 class Rooms: # Dette er baseklassa til allle romma def __init__(self, name, smell, feel, taste, look, sound, jump): self.name = name self.smell = smell self.feel = feel self.taste = taste self.look = look self.sound = sound self.jump = jump def __str__(self): return "Du er no i {}.".format(self.name) def Roomsmell(self): return "Rommet luktar {}.".format(self.smell) def Roomfeel(self): return "Du kjenner {}.".format(self.feel) def Roomtaste(self): return "Du sleikjer rundt om i rommet og kjenner smaken av {}.".format(self.taste) def Roomlook(self): return "Du ser rundt i rommet og ser {}.".format(self.look) def Roomsound(self): return "Om du er heilt stille kan du høre lyden av {}.".format(self.sound) def Roomjump(self): return "Du hoppar opp og {}.".format(self.jump)
669
-9
233
9c6cb2f62249c9249426fed5a021326cf26ae2cd
3,970
py
Python
pymatflow/vasp/scripts/vasp-dfpt.py
DeqiTang/pymatflow
bd8776feb40ecef0e6704ee898d9f42ded3b0186
[ "MIT" ]
6
2020-03-06T16:13:08.000Z
2022-03-09T07:53:34.000Z
pymatflow/vasp/scripts/vasp-dfpt.py
DeqiTang/pymatflow
bd8776feb40ecef0e6704ee898d9f42ded3b0186
[ "MIT" ]
1
2021-10-02T02:23:08.000Z
2021-11-08T13:29:37.000Z
pymatflow/vasp/scripts/vasp-dfpt.py
DeqiTang/pymatflow
bd8776feb40ecef0e6704ee898d9f42ded3b0186
[ "MIT" ]
1
2021-07-10T16:28:14.000Z
2021-07-10T16:28:14.000Z
#!/usr/bin/env python # _*_ coding: utf-8 _*_ import os import argparse from pymatflow.vasp.dfpt import dfpt_run """ usage: """ params = {} if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("-d", "--directory", type=str, default="tmp-vasp-static", help="directory of the static running") parser.add_argument("-f", "--file", type=str, help="the xyz file name") parser.add_argument("--runopt", type=str, default="gen", help="gen, run, or genrun") parser.add_argument("--auto", type=int, default=3, help="auto:0 nothing, 1: copying files to server, 2: copying and executing in remote server, 3: pymatflow used in server with direct submit, in order use auto=1, 2, you must make sure there is a working ~/.pymatflow/server_[pbs|yh].conf") parser.add_argument("--mode", type=int, default=0, choices=[0, 1], help="run mode for dfpt. 0: brand new with a new directory; 1: continue in the existing directory") # -------------------------------------------------------- # INCAR PARAMETERS # -------------------------------------------------------- parser.add_argument("--prec", type=str, default="Normal", choices=["Normal", "Accurate", "A", "N"], help="PREC, default value: Normal") parser.add_argument("--encut", type=int, default=300, help="ENCUT, default value: 300 eV") parser.add_argument("--ediff", type=float, default=1.0e-4, help="EDIFF, default value: 1.0e-4") parser.add_argument("--kpoints-mp", type=int, nargs="+", default=[1, 1, 1, 0, 0, 0], help="set kpoints like -k 1 1 1 0 0 0") parser.add_argument("--ismear", type=int, default=0, help="smearing type(methfessel-paxton(>0), gaussian(0), fermi-dirac(-1), tetra(-4), tetra-bloch-dorrected(-5)), default: 0") parser.add_argument("--sigma", type=float, default=0.01, help="determines the width of the smearing in eV.") # ----------------------------------------------------------------- # ---------------------- # properties parametes # --------------------- #parser.add_argument("--lorbit", help="", type=int, default=None) #parser.add_argument("--loptics", help="", type=str, default="FALSE") # ----------------------------------------------------------------- # run params # ----------------------------------------------------------------- parser.add_argument("--mpi", type=str, default="", help="MPI command") parser.add_argument("--server", type=str, default="pbs", choices=["pbs", "yh", "lsf_sz"], help="type of remote server, can be pbs or yh or lsf_sz") parser.add_argument("--jobname", type=str, default="vasp-dfpt", help="jobname on the pbs server") parser.add_argument("--nodes", type=int, default=1, help="Nodes used in server") parser.add_argument("--ppn", type=int, default=32, help="ppn of the server") # ========================================================== # transfer parameters from the arg parser to static_run setting # ========================================================== args = parser.parse_args() params["PREC"] = args.prec params["ENCUT"] = args.encut params["EDIFF"] = args.ediff params["ISMEAR"] = args.ismear params["SIGMA"] = args.sigma task = dfpt_run() task.get_xyz(args.file) task.set_params(params=params) task.set_kpoints(kpoints_mp=args.kpoints_mp) task.set_run(mpi=args.mpi, server=args.server, jobname=args.jobname, nodes=args.nodes, ppn=args.ppn) task.dfpt(directory=args.directory, runopt=args.runopt, auto=args.auto, mode=args.mode)
36.090909
251
0.522418
#!/usr/bin/env python # _*_ coding: utf-8 _*_ import os import argparse from pymatflow.vasp.dfpt import dfpt_run """ usage: """ params = {} if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("-d", "--directory", type=str, default="tmp-vasp-static", help="directory of the static running") parser.add_argument("-f", "--file", type=str, help="the xyz file name") parser.add_argument("--runopt", type=str, default="gen", help="gen, run, or genrun") parser.add_argument("--auto", type=int, default=3, help="auto:0 nothing, 1: copying files to server, 2: copying and executing in remote server, 3: pymatflow used in server with direct submit, in order use auto=1, 2, you must make sure there is a working ~/.pymatflow/server_[pbs|yh].conf") parser.add_argument("--mode", type=int, default=0, choices=[0, 1], help="run mode for dfpt. 0: brand new with a new directory; 1: continue in the existing directory") # -------------------------------------------------------- # INCAR PARAMETERS # -------------------------------------------------------- parser.add_argument("--prec", type=str, default="Normal", choices=["Normal", "Accurate", "A", "N"], help="PREC, default value: Normal") parser.add_argument("--encut", type=int, default=300, help="ENCUT, default value: 300 eV") parser.add_argument("--ediff", type=float, default=1.0e-4, help="EDIFF, default value: 1.0e-4") parser.add_argument("--kpoints-mp", type=int, nargs="+", default=[1, 1, 1, 0, 0, 0], help="set kpoints like -k 1 1 1 0 0 0") parser.add_argument("--ismear", type=int, default=0, help="smearing type(methfessel-paxton(>0), gaussian(0), fermi-dirac(-1), tetra(-4), tetra-bloch-dorrected(-5)), default: 0") parser.add_argument("--sigma", type=float, default=0.01, help="determines the width of the smearing in eV.") # ----------------------------------------------------------------- # ---------------------- # properties parametes # --------------------- #parser.add_argument("--lorbit", help="", type=int, default=None) #parser.add_argument("--loptics", help="", type=str, default="FALSE") # ----------------------------------------------------------------- # run params # ----------------------------------------------------------------- parser.add_argument("--mpi", type=str, default="", help="MPI command") parser.add_argument("--server", type=str, default="pbs", choices=["pbs", "yh", "lsf_sz"], help="type of remote server, can be pbs or yh or lsf_sz") parser.add_argument("--jobname", type=str, default="vasp-dfpt", help="jobname on the pbs server") parser.add_argument("--nodes", type=int, default=1, help="Nodes used in server") parser.add_argument("--ppn", type=int, default=32, help="ppn of the server") # ========================================================== # transfer parameters from the arg parser to static_run setting # ========================================================== args = parser.parse_args() params["PREC"] = args.prec params["ENCUT"] = args.encut params["EDIFF"] = args.ediff params["ISMEAR"] = args.ismear params["SIGMA"] = args.sigma task = dfpt_run() task.get_xyz(args.file) task.set_params(params=params) task.set_kpoints(kpoints_mp=args.kpoints_mp) task.set_run(mpi=args.mpi, server=args.server, jobname=args.jobname, nodes=args.nodes, ppn=args.ppn) task.dfpt(directory=args.directory, runopt=args.runopt, auto=args.auto, mode=args.mode)
0
0
0
9d62cac37a74dba044cd1a53d16dc1255a546ab1
260
py
Python
python5.py
audstanley/nodePythonProcessWatcher
cf3b707af81c837b99c5b2d955cf0d718e286e81
[ "MIT" ]
null
null
null
python5.py
audstanley/nodePythonProcessWatcher
cf3b707af81c837b99c5b2d955cf0d718e286e81
[ "MIT" ]
null
null
null
python5.py
audstanley/nodePythonProcessWatcher
cf3b707af81c837b99c5b2d955cf0d718e286e81
[ "MIT" ]
null
null
null
from python5_unixSocket import interComs myInterComs = interComs() myInterComs.run() import sys from time import sleep while True: print("MESSAGES FROM PYTHON 5") sys.stdout.flush() myInterComs.send( {"wordDawg": "from python5"} ) sleep(0.500)
23.636364
52
0.726923
from python5_unixSocket import interComs myInterComs = interComs() myInterComs.run() import sys from time import sleep while True: print("MESSAGES FROM PYTHON 5") sys.stdout.flush() myInterComs.send( {"wordDawg": "from python5"} ) sleep(0.500)
0
0
0
2c4146e35515d5d11823006c020a481717320a31
1,909
py
Python
Revitron.tab/RPM.panel/Setup.pulldown/ProjectSetup.pushbutton/ProjectSetup_script.py
jmcouffin/revitron-ui
f67739488b504cdb0cabe36e088a40fe3cd2b282
[ "MIT" ]
null
null
null
Revitron.tab/RPM.panel/Setup.pulldown/ProjectSetup.pushbutton/ProjectSetup_script.py
jmcouffin/revitron-ui
f67739488b504cdb0cabe36e088a40fe3cd2b282
[ "MIT" ]
null
null
null
Revitron.tab/RPM.panel/Setup.pulldown/ProjectSetup.pushbutton/ProjectSetup_script.py
jmcouffin/revitron-ui
f67739488b504cdb0cabe36e088a40fe3cd2b282
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- """ Define extensions to be used with this Revit model. Defined extensions can be installed by using the "Install Extensions" button. """ import revitron import System.Windows from pyrevit import script from rpw.ui.forms import FlexForm, TextBox, Button, Label if not revitron.Document().isFamily(): config = revitron.DocumentConfigStorage().get('rpm.extensions') components = [ Label('You can define a list of pyRevit extensions to be used with the currently active model.\n' 'That list will be stored in the project information and therefore can be easily distributed\n' 'among other team members to easly create a common work environment.\n' 'To install or switch to the extension saved with your project just hit the "Install Extensions" button.\n\n' 'Enter one extension per line providing the type of the extension ("ui" or "lib")\n' 'and the repository URL separated by a TAB as follows:', FontSize=14, Height=140, Width=650), Label('ui https://ui-extension-repository.git\r\nlib https://lib-extension-repository.git', FontFamily=System.Windows.Media.FontFamily('Consolas'), FontSize=14, Height=50, Width=650), TextBox('extensions', Text=config, TextWrapping=System.Windows.TextWrapping.Wrap, AcceptsTab=True, AcceptsReturn=True, Multiline=True, Height=200, Width=650, FontFamily=System.Windows.Media.FontFamily('Consolas'), FontSize=14), Button('Open Documentation', on_click=openHelp, Width=650), Button('Save', Width=650) ] form = FlexForm('Project Extensions', components) form.show() if 'extensions' in form.values: revitron.DocumentConfigStorage().set('rpm.extensions', form.values.get('extensions'))
37.431373
130
0.707177
# -*- coding: utf-8 -*- """ Define extensions to be used with this Revit model. Defined extensions can be installed by using the "Install Extensions" button. """ import revitron import System.Windows from pyrevit import script from rpw.ui.forms import FlexForm, TextBox, Button, Label def openHelp(sender, e): script.open_url('https://revitron-ui.readthedocs.io/en/latest/tools/rpm.html') if not revitron.Document().isFamily(): config = revitron.DocumentConfigStorage().get('rpm.extensions') components = [ Label('You can define a list of pyRevit extensions to be used with the currently active model.\n' 'That list will be stored in the project information and therefore can be easily distributed\n' 'among other team members to easly create a common work environment.\n' 'To install or switch to the extension saved with your project just hit the "Install Extensions" button.\n\n' 'Enter one extension per line providing the type of the extension ("ui" or "lib")\n' 'and the repository URL separated by a TAB as follows:', FontSize=14, Height=140, Width=650), Label('ui https://ui-extension-repository.git\r\nlib https://lib-extension-repository.git', FontFamily=System.Windows.Media.FontFamily('Consolas'), FontSize=14, Height=50, Width=650), TextBox('extensions', Text=config, TextWrapping=System.Windows.TextWrapping.Wrap, AcceptsTab=True, AcceptsReturn=True, Multiline=True, Height=200, Width=650, FontFamily=System.Windows.Media.FontFamily('Consolas'), FontSize=14), Button('Open Documentation', on_click=openHelp, Width=650), Button('Save', Width=650) ] form = FlexForm('Project Extensions', components) form.show() if 'extensions' in form.values: revitron.DocumentConfigStorage().set('rpm.extensions', form.values.get('extensions'))
83
0
23
d570100b492c0df602a33bf7fd31f800015b364c
3,653
py
Python
src/features/spectrum.py
vikigenius/neural_speaker_identification
a723290808d748daf65163b71aef2c5376319db3
[ "MIT" ]
1
2019-07-27T00:32:02.000Z
2019-07-27T00:32:02.000Z
src/features/spectrum.py
vikigenius/neural_speaker_identification
a723290808d748daf65163b71aef2c5376319db3
[ "MIT" ]
null
null
null
src/features/spectrum.py
vikigenius/neural_speaker_identification
a723290808d748daf65163b71aef2c5376319db3
[ "MIT" ]
1
2019-07-27T00:32:06.000Z
2019-07-27T00:32:06.000Z
#!/usr/bin/env python import logging import numpy as np import librosa import scipy from random import randint from src.utils.math_utils import nextpow2 logger = logging.getLogger(__name__)
30.957627
79
0.595401
#!/usr/bin/env python import logging import numpy as np import librosa import scipy from random import randint from src.utils.math_utils import nextpow2 logger = logging.getLogger(__name__) class Spectrum(object): def __init__(self, hparams): self.sample_freq = hparams.sample_freq self.duration = hparams.duration self.preprocess = hparams.preprocess self.Tw = hparams.window_size self.Ts = hparams.window_shift self.win_type = hparams.window_type if self.sample_freq == 16000: self.dc_alpha = 0.99 elif self.sample_freq == 8000: self.dc_alpha = 0.999 else: raise ValueError('Only 16 and 8Khz supported') self.pe_alpha = 0.97 def _sample(self, signal, seqlen: int): """ Helper function to sample a contiguos subsequence of length seqlen from signal Args: signal: numpy.ndarray, the signal seqlen: int, the sequence length Returns: numpy.ndarray, the sampled signal """ nframes = len(signal) roffset = randint(0, nframes - seqlen) sampled = signal[roffset:roffset+seqlen] return sampled def _get_resampled_chunks(self, afile: str): """ Takes in a string path afile and returns chunks of audio each representing a 16-bit mono channel with sampling rate = 16000 Args: afile: path of audio file Returns: List[np.ndarray] """ # Load the file signal, _ = librosa.load(afile, sr=self.sample_freq, mono=True) nframes = len(signal) duration = nframes/self.sample_freq if duration <= self.duration: logger.warn(f'Duration less than specified for {afile}') chunks = [] if duration > 2*self.duration: # Can sample 2 chunks mid = int(nframes/2) chunks.append(signal[:mid]) chunks.append(signal[mid:]) else: chunks.append(signal) num_samples = int(self.duration*self.sample_freq) chunks = [self._sample(chunk, num_samples) for chunk in chunks] return chunks def _preprocess(self, signal): # Remove DC component and add a small dither signal = scipy.signal.lfilter([1, -1], [1, -self.dc_alpha], signal) dither = np.random.random_sample( signal.shape) + np.random.random_sample( signal.shape) - 1 spow = np.std(signal) signal = signal + 1e-6*spow*dither signal = scipy.signal.lfilter([1 - self.pe_alpha], 1, signal) return signal def generate(self, afile: str): """ Takes in a string path afile and returns a numpy nd array representing the magnitude spectrum of the signal Args: afile: path of audio file Returns: numpy.ndarray """ resampled_chunks = self._get_resampled_chunks(afile) if self.preprocess: processed = [self._preprocess(chunk) for chunk in resampled_chunks] else: processed = resampled_chunks # stft sf = self.sample_freq Tw = self.Tw # Window size Ts = self.Ts Nw = round(1e-3*Tw*sf) Ns = round(1e-3*Ts*sf) n_fft = 2**nextpow2(Nw) spectrograms = [librosa.core.stft( chunk, n_fft=n_fft, hop_length=Ns, win_length=Nw, window=self.win_type) for chunk in processed] mag_specs = [np.abs(chunk) for chunk in spectrograms] return mag_specs
938
2,499
23
1e6f1908f373e61b9009dc90b043eed4cfc23a7c
1,561
py
Python
src/network.py
Renerick/python-neural-network
552da90999622232d0e7061c690c972f7a2201d0
[ "MIT" ]
1
2019-08-12T09:15:12.000Z
2019-08-12T09:15:12.000Z
src/network.py
Renerick/python-neural-network
552da90999622232d0e7061c690c972f7a2201d0
[ "MIT" ]
null
null
null
src/network.py
Renerick/python-neural-network
552da90999622232d0e7061c690c972f7a2201d0
[ "MIT" ]
null
null
null
import numpy as np import pickle @np.vectorize
26.457627
70
0.559257
import numpy as np import pickle @np.vectorize def deriv(x): return 1. if x > 0 else 0. class Network: def __init__(self, *args): np.random.seed(1) all_layers = args self.hidden_layers = [] self.biases = [] for l in zip(all_layers, all_layers[1:]): self.hidden_layers.append(np.random.rand(*l) * 2 - 1) self.biases.append(np.random.rand(l[1]) * 2 - 1) self.iteration = 0 self.epoch = 0 self.learning_rate = 0.1 def predict(self, input_data): values = np.array(input_data) for layer, bias in zip(self.hidden_layers, self.biases): values = np.maximum(np.dot(values, layer) + bias, 0) return values def train(self, input_data, target): self.iteration += 1 target = np.array(target) prediction = self.predict(input_data) for layer in self.hidden_layers[::-1]: errors = target - prediction gradients = deriv(prediction) gradients *= errors gradients *= self.learning_rate delta = errors * gradients print(target, prediction, errors, gradients, layer, delta) target = layer layer -= delta prediction = layer @staticmethod def load(path="model.bin"): f = open(path, 'rb') network = pickle.load(f) f.close() return network def save(self, path="model.bin"): f = open(path, 'wb') pickle.dump(self, f) f.close()
1,320
146
45
01fd056ce41c1c67b73640a90525a86f7223ab98
51,070
py
Python
backend/grafit/migrations/0003_load_data.py
fossabot/grafit
c7328cc7ed4d37d36fc735944aa8763fad090d97
[ "MIT" ]
16
2018-10-12T16:33:52.000Z
2020-06-23T20:11:34.000Z
backend/grafit/migrations/0003_load_data.py
fossabot/grafit
c7328cc7ed4d37d36fc735944aa8763fad090d97
[ "MIT" ]
41
2018-10-14T21:28:38.000Z
2021-06-10T22:01:45.000Z
backend/grafit/migrations/0003_load_data.py
fossabot/grafit
c7328cc7ed4d37d36fc735944aa8763fad090d97
[ "MIT" ]
4
2018-10-28T10:47:26.000Z
2020-07-20T04:17:04.000Z
# Generated by Django 2.1.2 on 2018-10-25 09:36 import django.contrib.auth.models import django.contrib.auth.validators from django.db import migrations, models import django.utils.timezone import uuid
100.928854
1,002
0.739495
# Generated by Django 2.1.2 on 2018-10-25 09:36 import django.contrib.auth.models import django.contrib.auth.validators from django.db import migrations, models import django.utils.timezone import uuid class Migration(migrations.Migration): dependencies = [ ('grafit', '0002_article'), ] operations = [ migrations.RunSQL(""" INSERT INTO grafit_article (id, title, text) VALUES (2, 'MongoDB', 'MongoDB is a free and open-source cross-platform document-oriented database program. Classified as a NoSQL database program, MongoDB uses JSON-like documents with schemata. MongoDB is developed by MongoDB Inc., and is published under a combination of the Server Side Public License and the Apache License. 10gen software company began developing MongoDB in 2007 as a component of a planned platform as a service product. In 2009, the company shifted to an open source development model, with the company offering commercial support and other services. In 2013, 10gen changed its name to MongoDB Inc.[6] On October 20, 2017, MongoDB became a publicly-traded company, listed on NASDAQ as MDB with an IPO price of $24 per share.[7] Ad hoc queries MongoDB supports field, range query, and regular expression searches.[8] Queries can return specific fields of documents and also include user-defined JavaScript functions. Queries can also be configured to return a random sample of results of a given size. Indexing Fields in a MongoDB document can be indexed with primary and secondary indices. Replication MongoDB provides high availability with replica sets.[9] A replica set consists of two or more copies of the data. Each replica set member may act in the role of primary or secondary replica at any time. All writes and reads are done on the primary replica by default. Secondary replicas maintain a copy of the data of the primary using built-in replication. When a primary replica fails, the replica set automatically conducts an election process to determine which secondary should become the primary. Secondaries can optionally serve read operations, but that data is only eventually consistent by default. Load balancing[10] MongoDB scales horizontally using sharding. The user chooses a shard key, which determines how the data in a collection will be distributed. The data is split into ranges (based on the shard key) and distributed across multiple shards. (A shard is a master with one or more slaves.). Alternatively, the shard key can be hashed to map to a shard – enabling an even data distribution. MongoDB can run over multiple servers, balancing the load or duplicating data to keep the system up and running in case of hardware failure. '); INSERT INTO grafit_article (id, title, text) VALUES (3, 'NoSQL', 'A NoSQL (originally referring to "non SQL" or "non relational")[1] database provides a mechanism for storage and retrieval of data that is modeled in means other than the tabular relations used in relational databases. Such databases have existed since the late 1960s, but did not obtain the "NoSQL" moniker until a surge of popularity in the early twenty-first century,[2] triggered by the needs of Web 2.0 companies.[3][4][5] NoSQL databases are increasingly used in big data and real-time web applications.[6] NoSQL systems are also sometimes called "Not only SQL" to emphasize that they may support SQL-like query languages, or sit alongside SQL database in a polyglot persistence architecture.[7][8] Motivations for this approach include: simplicity of design, simpler "horizontal" scaling to clusters of machines (which is a problem for relational databases),[2] and finer control over availability. The data structures used by NoSQL databases (e.g. key-value, wide column, graph, or document) are different from those used by default in relational databases, making some operations faster in NoSQL. The particular suitability of a given NoSQL database depends on the problem it must solve. Sometimes the data structures used by NoSQL databases are also viewed as "more flexible" than relational database tables.[9] Many NoSQL stores compromise consistency (in the sense of the CAP theorem) in favor of availability, partition tolerance, and speed. Barriers to the greater adoption of NoSQL stores include the use of low-level query languages (instead of SQL, for instance the lack of ability to perform ad-hoc joins across tables), lack of standardized interfaces, and huge previous investments in existing relational databases.[10] Most NoSQL stores lack true ACID transactions, although a few databases, such as MarkLogic, Aerospike, FairCom c-treeACE, Google Spanner (though technically a NewSQL database), Symas LMDB, and OrientDB have made them central to their designs. (See ACID and join support.) Instead, most NoSQL databases offer a concept of "eventual consistency" in which database changes are propagated to all nodes "eventually" (typically within milliseconds) so queries for data might not return updated data immediately or might result in reading data that is not accurate, a problem known as stale reads.[11] Additionally, some NoSQL systems may exhibit lost writes and other forms of data loss.[12] Some NoSQL systems provide concepts such as write-ahead logging to avoid data loss.[13] For distributed transaction processing across multiple databases, data consistency is an even bigger challenge that is difficult for both NoSQL and relational databases. Even current relational databases "do not allow referential integrity constraints to span databases."[14] There are few systems that maintain both ACID transactions and X/Open XA standards for distributed transaction processing. '); INSERT INTO grafit_article (id, title, text) VALUES (4, 'SQL', 'SQL was initially developed at IBM by Donald D. Chamberlin and Raymond F. Boyce after learning about the relational model from Ted Codd[15] in the early 1970s.[16] This version, initially called SEQUEL (Structured English Query Language), was designed to manipulate and retrieve data stored in IBM''s original quasi-relational database management system, System R, which a group at IBM San Jose Research Laboratory had developed during the 1970s.[16] Chamberlin and Boyce''s first attempt of a relational database language was Square, but it was difficult to use due to subscript notation. After moving to the San Jose Research Laboratory in 1973, they began work on SEQUEL.[15] The acronym SEQUEL was later changed to SQL because "SEQUEL" was a trademark of the UK-based Hawker Siddeley aircraft company.[17] In the late 1970s, Relational Software, Inc. (now Oracle Corporation) saw the potential of the concepts described by Codd, Chamberlin, and Boyce, and developed their own SQL-based RDBMS with aspirations of selling it to the U.S. Navy, Central Intelligence Agency, and other U.S. government agencies. In June 1979, Relational Software, Inc. introduced the first commercially available implementation of SQL, Oracle V2 (Version2) for VAX computers. By 1986, ANSI and ISO standard groups officially adopted the standard "Database Language SQL" language definition. New versions of the standard were published in 1989, 1992, 1996, 1999, 2003, 2006, 2008, 2011,[15] and most recently, 2016. After testing SQL at customer test sites to determine the usefulness and practicality of the system, IBM began developing commercial products based on their System R prototype including System/38, SQL/DS, and DB2, which were commercially available in 1979, 1981, and 1983, respectively.[18] '); INSERT INTO grafit_article (id, title, text) VALUES (5, 'MySQL', 'Built on MySQL Enterprise Edition and powered by the Oracle Cloud, Oracle MySQL Cloud Service provides a simple, automated, integrated and enterprise ready MySQL cloud service, enabling organizations to increase business agility and reduce costs. "Relying on the MySQL engine as the low-level storage layer has allowed us to very quickly build a robust system." "We have successfully implemented MySQL Cluster Carrier Grade Edition for our highly mission critical XDMS application which will enable the next generation of converged services." "We found that MySQL was the best database in terms of the price-point and functionality it offers up. The benefits that MySQL brings to our Brightmail product is its relaiability, robustness and very low-cost administration costs."'); INSERT INTO grafit_article (id, title, text) VALUES (6, 'Critical Flaw Reported In phpMyAdmin Lets Attackers Damage Databases', 'A critical security vulnerability has been reported in phpMyAdmin—one of the most popular applications for managing the MySQL database—which could allow remote attackers to perform dangerous database operations just by tricking administrators into clicking a link. Discovered by an Indian security researcher, Ashutosh Barot, the vulnerability is a cross-site request forgery (CSRF) attack and affects phpMyAdmin versions 4.7.x (prior to 4.7.7). Cross-site request forgery vulnerability, also known as XSRF, is an attack wherein an attacker tricks an authenticated user into executing an unwanted action. According to an advisory released by phpMyAdmin, "by deceiving a user to click on a crafted URL, it is possible to perform harmful database operations such as deleting records, dropping/truncating tables, etc." phpMyAdmin is a free and open source administration tool for MySQL and MariaDB and is widely used to manage the database for websites created with WordPress, Joomla, and many other content management platforms. Moreover, a lot of hosting providers use phpMyAdmin to offer their customers a convenient way to organize their databases. Barot has also released a video, as shown above, demonstrating how a remote attacker can make database admins unknowingly delete (DROP) an entire table from the database just by tricking them into clicking a specially crafted link. "A feature of phpMyAdmin was using a GET request and after that POST request for Database operations such as DROP TABLE table_name; GET requests must be protected against CSRF attacks. In this case, POST requests were used which were sent through URL (for bookmarking purpose may be); it was possible for an attacker to trick a database admin into clicking a button and perform a drop table database query of the attacker’s choice." Barot explains in a blog post. However, performing this attack is not simple as it may sound. To prepare a CSRF attack URL, the attacker should be aware of the name of targeted database and table. "If a user executes a query on the database by clicking insert, DROP, etc. buttons, the URL will contain database name and table name," Barot says. "This vulnerability can result in the disclosure of sensitive information as the URL is stored at various places such as browser history, SIEM logs, Firewall Logs, ISP Logs, etc." Barot reported the vulnerability to phpMyAdmin developers, who confirmed his finding and released phpMyAdmin 4.7.7 to address this issue. So administrators are highly recommended to update their installations as soon as possible. '); INSERT INTO grafit_article (id, title, text) VALUES (25, 'Death By Database', 'The following is a true story, but with names changed. When I work with clients to build software, I take the usual steps of understanding their needs, gathering requirements, learning about their customers, and so on. At this point I have a model on paper of roughly what the software is intended to do, so they get surprised when I immediately turn to database design. "Who care about database design? What about mockups? What about workflows?" Let me tell you about "Bob''s Luxury Goods." I worked for this company many years ago and they had a retail store selling ... you guessed it ... luxury goods. They''d ask all customers for a billing address and if they had a different delivery address. At the database level, they had a "one-to-many" relationship between customers and addresses. That was their first problem. A customer''s partner might come into Bob''s and order something and if the address was entered correctly it would be flagged as "in use" and we had to use a different address or deliberately enter a typo. Fortunately, addresses were case-sensitive, so many people had UPPER-CASE ADDRESSES. We should have had a many-to-many relationship between customers and addresses so we could handle the case where more than one person would share the same address, but we didn''t. Further, I was never allocated the time to fix the database because it was "cheaper" to remove the restriction on "flagged" addresses and allow a duplicate address to be used. Naturally, being a luxury goods company, we had many repeat customers and sometimes they would move and if we didn''t find the duplicate address, or the address with the "typo", we might update the address for one partner, but not the other. That was a headache, but it didn''t happen frequently enough for management to worry about it. That''s when the marketing department had a brilliant, inexpensive idea. You see, we periodically did mass mailings of special events to our customers. Since we had the software to do mass mailings, why not import a mailing list of all addresses in high net worth areas and mail everyone about upcoming special events? So the company went ahead and bought a database with all of these addresses, but forgot to mention to me that I was supposed to implement this. Except that every address record had the customer id embedded in it, so we couldn''t enter an address without a customer. "Curtis," they said, "just enter a dummy customer called ''Occupant'' and attach all addresses to that." Except you couldn''t enter a customer without an order. Except you couldn''t enter an order without at least one item on it. Except you couldn''t enter an item unless it was listed in inventory. Except that reserved the "inventory" item and made it unavailable. Except, except, except ... It came down to trying to create a fake customer, with a fake order, with a fake item, with a fake item category, with a "paid" invoice, with exceptions sprinkled throughout the codebase to handle all of these special cases and probably more that I no longer remember. Then, and only then, could I write the code to provide "generic" mass mailings. Management decided it was easier to hire an outside company to handle the mailing list for them. If they had simply had a proper database design up front, they could have reused their existing system with little trouble. That''s what bad database design costs you and why I usually start with that before writing my software. Note: if you''re not familiar with database design, here''s a talk I give where I make it fairly simple to understand. I mostly avoid big words.'); INSERT INTO grafit_article (id, title, text) VALUES (33, 'GitHub Actions: built by you, run by us', 'Yesterday at GitHub Universe, we announced GitHub Actions, a new way to automate and customize your workflows. Configuring the apps and services that make up your development cycle takes significant time and effort. GitHub Actions applies open source principles to workflow automation, weaving together the tools you use from idea to production into one complete workflow. You can also create, share, and discover any actions your projects require, just as you would create, share, and discover code on GitHub. Learn more about actions As we prepared for Universe, we shared GitHub Actions with a group of customers, integrators, and open source maintainers to see what they could do. In just a few short weeks, talented teams and individuals alike have created hundreds of GitHub Actions. During today’s Universe keynote, we heard directly from developers, and we’re excited to share their work with you'); INSERT INTO grafit_article (id, title, text) VALUES (34, 'Git Submodule Vulnerability Announced ', ' The Git project has disclosed CVE-2018-17456, a vulnerability in Git that can cause arbitrary code to be executed when a user clones a malicious repository. Git v2.19.1 has been released with a fix, along with backports in v2.14.5, v2.15.3, v2.16.5, v2.17.2, and v2.18.1. We encourage all users to update their clients to protect themselves. Until you’ve updated, you can protect yourself by avoiding submodules from untrusted repositories. This includes commands such as git clone --recurse-submodules and git submodule update. Affected products GitHub Desktop GitHub Desktop versions 1.4.1 and older included an embedded version of Git that was affected by this vulnerability. We encourage all GitHub Desktop users to update to the newest version (1.4.2 and 1.4.3-beta0) available today in the Desktop app. Atom Atom included the same embedded Git and was also affected. Releases 1.31.2 and 1.32.0-beta3 include the patch. Ensure you’re on the latest Atom release by completing any of the following: Windows: From the toolbar, click Help -> Check for Updates MacOS: From the menu bar, click Atom -> Check for Update Linux: Update manually by downloading the latest release from atom.io Git on the command line and other clients In order to be protected from the vulnerability, you must update your command-line version of Git, and any other application that may include an embedded version of Git, as they are independent of each other. Additional notes Neither GitHub.com nor GitHub Enterprise are directly affected by the vulnerability. However, as with previously discovered vulnerabilities, GitHub.com will detect malicious repositories, and will reject pushes or API requests attempting to create them. Versions of GitHub Enterprise with this detection will ship on October 9. Details of the vulnerability This vulnerability is very similar to CVE-2017-1000117, as both are option-injection attacks related to submodules. In the earlier attack, a malicious repository would ship a .gitmodules file pointing one of its submodules to a remote repository with an SSH host starting with a dash (-). The ssh program—spawned by Git—would then interpret that as an option. This attack works in a similar way, except that the option-injection is against the child git clone itself. The problem was reported on September 23 by @joernchen, both to Git’s private security list, as well as to GitHub’s Bug Bounty program. Developers at GitHub worked with the Git community to develop a fix. The basic fix was clear from the report. However, due to to the similarity to CVE-2017-1000117, we also audited all of the .gitmodules values and implemented stricter checks as appropriate. These checks should prevent a similar vulnerability in another code path. We also implemented detection of potentially malicious submodules as part of Git’s object quality checks (which was made much easier by the infrastructure added during the last submodule-related vulnerability). The coordinated disclosure date of October 5 was selected by Git developers to allow packagers to prepare for the release. This also provided hosting sites (with custom implementations) ample time to detect and block the attack before it became public. Members of the Git community checked the JGit and libgit2 implementations. Those are not affected by the vulnerability because they clone submodules via function calls rather than separate commands. We were also able to use the time to scan all repositories on GitHub for evidence of the attack being used in the wild. We’re happy to report that no instances were found (and now, with our detection, none can be added). Please update your copy of Git soon, and happy cloning! '); INSERT INTO grafit_article (id, title, text) VALUES (21, 'Hackers Targeting Servers Running Database Services for Mining Cryptocurrency', 'Security researchers have discovered multiple attack campaigns conducted by an established Chinese criminal group that operates worldwide, targeting database servers for mining cryptocurrencies, exfiltrating sensitive data and building a DDoS botnet. The researchers from security firm GuardiCore Labs have analyzed thousands of attacks launched in recent months and identified at least three attack variants—Hex, Hanako, and Taylor—targeting different MS SQL and MySQL servers for both Windows and Linux. The goals of all the three variants are different—Hex installs cryptocurrency miners and remote access trojans (RATs) on infected machines, Taylor installs a keylogger and a backdoor, and Hanako uses infected devices to build a DDoS botnet. So far, researchers have recorded hundreds of Hex and Hanako attacks and tens of thousands of Taylor attacks each month and found that most compromised machines are based in China, and some in Thailand, the United States, Japan and others. To gain unauthorized access to the targeted database servers, the attackers use brute force attacks and then run a series of predefined SQL commands to gain persistent access and evade audit logs. What''s interesting? To launch the attacks against database servers and serve malicious files, attackers use a network of already compromised systems, making their attack infrastructure modular and preventing takedown of their malicious activities.'); INSERT INTO grafit_article (id, title, text) VALUES (22, 'RIP Open Source MySQL', ' This is an excellent opportunity for the Postgres community to step up an promote Postgres. rbanffy on Aug 18, 2012 [-] I think this would be a mistake. This is an excellent opportunity to demonstrate that anyone can fork the MySQL codebase and create other plug-in replacement databases with it, such as MariaDB and Drizzle. All that is lost is the MySQL name and brand. PostgreSQL users and developers must seize the opportunity to show businesses that free software cannot be killed, not even by mighty Oracle. They and, most notably, Microsoft, have been trying to kill it for more than a decade now. Because the anti-free-software FUD machine (fed in part by Oracle itself) is already having a wonderful time with this. Udo on Aug 18, 2012 [-] I wish I could mod this up a hundred times. PostgreSQL people themselves have been playing into the hands of corporate FUDders with their incessant and inappropriate peddling. MySQL is not your enemy, MS SQL Server is. Oracle''s software empire as a whole certainly is your enemy. Show some solidarity with a fellow open source project! MySQL and PostgreSQL represent two very different implementation philosophies, and being able to choose between them according to taste and merits is a good thing. Most of us have suspected that the MySQL project itself was going to die as it was acquired by Oracle, in the same way Open Office died when it was acquired by Oracle. This is a company where good software goes to expire, either due to a deliberate intention or gross incompetence I can''t say but I suspect it''s a mixture of both. However sad that may be for the MySQL (or OpenOffice) brand name, the code itself lives on and continues to evolve within a rich open source ecosystem. Hence, sensational and petulant "RIP $PRODUCTNAME" articles are unnecessary. There is no threat to existing projects based on MySQL or any other successful open source project for that matter. Not only will this stuff be free forever, it will also continue to grow and be developed on its own. The corporate assassination of open source projects will only work if we let it, it''s a purely psychological game. '); INSERT INTO grafit_article (id, title, text) VALUES (23, 'Free Text Sources', 'There are a few interesting things to talk about surrounding free and open textbooks. Quality is one. Usability is another. Why to write one (and/or, why not) is certainly critical. But where can you find these disruptive, open texts? Not all faculty know there are free and open texts they can use; finding free and/or open textbooks (or even knowing to look) can sometimes be a trick. I knew about one or two sources, and did a little bit more digging. Admittedly, many of the sources of free texts linked below have a technical bent. On one hand, this might be because math, computing, and the sciences are familiar with working openly and giving things away. On the other, it might be because I am a member of the computing faculty, and therefore am most familiar with resources in that space.'); INSERT INTO grafit_article (id, title, text) VALUES (24, 'Apache Software Foundation Public Mail Archives', 'A collection of all publicly available mail archives from the Apache55 Software Foundation (ASF), taken on July 11, 2011. This collection contains all publicly available email archives from the ASF''s 80+ projects (http://mail-archives.apache.org/mod_mbox/), including mailing lists such as Apache HTTPD Server, Apache Tomcat, Apache Lucene and Solr, Apache Hadoop and many more. Generally speaking, most projects have at least three lists: user, dev and commits, but some have more, some have less. The user lists are where users of the software ask questions on usage, while the dev list usually contains discussions on the development of the project (code, releases, etc.) The commit lists usually consists of automated notifications sent by the various ASF version control tools, like Subversion or CVS, and contain information about changes made to the project''s source code. Both tarballs and per project sets are available in the snapshot. The tarballs are organized according to project name. Thus, a-d.tar.gz contains all ASF projects that begin with the letters a, b, c or d, such as abdera.apache.org. Files within the project are usually gzipped mbox files. '); INSERT INTO grafit_article (id, title, text) VALUES (26, 'PostgreSQL - Overview', 'PostgreSQL is a powerful, open source object-relational database system. It has more than 15 years of active development phase and a proven architecture that has earned it a strong reputation for reliability, data integrity, and correctness. This tutorial will give you a quick start with PostgreSQL and make you comfortable with PostgreSQL programming. What is PostgreSQL? PostgreSQL (pronounced as post-gress-Q-L) is an open source relational database management system (DBMS) developed by a worldwide team of volunteers. PostgreSQL is not controlled by any corporation or other private entity and the source code is available free of charge. A Brief History of PostgreSQL PostgreSQL, originally called Postgres, was created at UCB by a computer science professor named Michael Stonebraker. Stonebraker started Postgres in 1986 as a follow-up project to its predecessor, Ingres, now owned by Computer Associates. 1977-1985 − A project called INGRES was developed. Proof-of-concept for relational databases Established the company Ingres in 1980 Bought by Computer Associates in 1994 1986-1994 − POSTGRES Development of the concepts in INGRES with a focus on object orientation and the query language - Quel The code base of INGRES was not used as a basis for POSTGRES Commercialized as Illustra (bought by Informix, bought by IBM) 1994-1995 − Postgres95 Support for SQL was added in 1994 Released as Postgres95 in 1995 Re-released as PostgreSQL 6.0 in 1996 Establishment of the PostgreSQL Global Development Team Key Features of PostgreSQL PostgreSQL runs on all major operating systems, including Linux, UNIX (AIX, BSD, HP-UX, SGI IRIX, Mac OS X, Solaris, Tru64), and Windows. It supports text, images, sounds, and video, and includes programming interfaces for C / C++, Java, Perl, Python, Ruby, Tcl and Open Database Connectivity (ODBC). PostgreSQL supports a large part of the SQL standard and offers many modern features including the following − Complex SQL queries SQL Sub-selects Foreign keys Trigger Views Transactions Multiversion concurrency control (MVCC) Streaming Replication (as of 9.0) Hot Standby (as of 9.0) You can check official documentation of PostgreSQL to understand the above-mentioned features. PostgreSQL can be extended by the user in many ways. For example by adding new − Data types Functions Operators Aggregate functions Index methods Procedural Languages Support PostgreSQL supports four standard procedural languages, which allows the users to write their own code in any of the languages and it can be executed by PostgreSQL database server. These procedural languages are - PL/pgSQL, PL/Tcl, PL/Perl and PL/Python. Besides, other non-standard procedural languages like PL/PHP, PL/V8, PL/Ruby, PL/Java, etc., are also supported.'); INSERT INTO grafit_article (id, title, text) VALUES (27, 'Setup PostgreSQL on Windows with Docker', 'Over the weekend I finally got the chance to start reading A Curious Moon by Rob Conery which is a book on learning PostgreSQL by following the fictional Dee Yan as she is thrown into database administrator role at an aerospace startup. I have a lot of experience using Microsoft’s SQL Server, but up until now, I haven’t touched PostgreSQL. For personal projects SQL Server’s cost and be prohibitive and the release of Rob’s book added up to a good time to give PostgreSQL a try. Install Directly or not? On the download section of the official Postgres site, there is an option to download an installer. This is the route I was going to at first, but in Rob’s book, he suggests using a VM for Postgres installation on Windows. This kicked off a lot of searching on my part and didn’t find a good definitive answer on why that is or isn’t the way to do. In the end, I decided to try and run the Postgres process using Docker instead installing directly on Windows or dealing with a full VM. Installing Docker Head to this link and click the Get Docker link to download the installer. After the install is complete you will have to log out and back in. When I logged back in I got a message about Hyper-V not being enabled. After logging back in I then got the following message about hardware-assisted virtualization not being enabled. After tweaking my BIOS settings and logging back in I was greeted by the Docker welcome screen. Open a command prompt and run the following command. docker run hello-world You should output that starts with the following if your installation is working. Hello from Docker! This message shows that your installation appears to be working correctly. What about Postgres? Getting up and going with a container running Postgres was pretty simple and could be done with the following command which will create a container and expose the port used by Postgres so it can be accessed from the host. docker run -p 5432:5432 --name yourContainerName -e POSTGRES_PASSWORD=yourPassword -d postgres The problem with this approach is if you ever need to rebuild the container for some reason, like a new version of Postgres is released, your data will be lost. Thankfully I found this blog post which shows how to use a secondary container for the data leaving the Postgres container able to be destroyed and recreated as needed. The following is the command I used to create my data container. docker create -v /var/lib/postgresql/data --name PostgresData alpine The above creates a container named PostgresData based on the Alpine image. It is important that the -v parameter matches the path that Postgres expects. Now that we have a container that will keep our data safe let’s create the actual Postgres container with the following command. docker run -p 5432:5432 --name yourContainerName -e POSTGRES_PASSWORD=yourPassword -d --volumes-from PostgresData postgres The only difference from the first example run command is the addition of –volumes-from PostgresData which tells the container to use the PostgresData container. If you run the docker ps -a command it will show you all your containers. As you can see in my example I have two containers only one of which is actually running. Make sure you don’t remove the data container just because it will never show as running. '); INSERT INTO grafit_article (id, title, text) VALUES (28, 'DIY: A PostgreSQL database server setup anyone can handle', 'When it comes to databases, I''m a fan of MySQL. The open source database can handle just about any load you want to throw at it, and it has lots of powerful tools that can be used to manage it. The other popular open source database is PostgreSQL, which is cross-platform and is used by numerous applications. Although PostgreSQL is often seen as being as powerful as MySQL, it doesn''t have nearly the number of available tools to make setup and management as easy as its competition. So I''ve written this handy PostgreSQL primer on how to get your database server up and running and ready to use. (Although PostgreSQL is cross-platform, I demonstrate the installation and setup on a Ubuntu 11.04 machine because it''s my platform of choice. The translation to other platforms should be simple.) Step 1: Install PostgreSQL Here are the installation steps on Ubuntu (this installation will also work on any Debian-based distribution): Open a terminal window. Issue the command sudo apt-get install postgresql. Type the sudo password necessary to give you admin rights and hit Enter. Allow apt to pick up any necessary dependencies. Once the installation is complete, it''s time to set this baby up. Step 2: Change the default user password Caution: If you don''t follow this step, you will not be able to add databases and administer PostgreSQL, and the database will not be secure. Here''s how to change the password for the default user. The user in question is postgres, and the password is changed like so: Open a terminal window. Issue the command sudo passwd postgres. Type (and confirm) that password to be used for this user. The postgres user will be the only user on your system that can open the PostgreSQL prompt without defining a database, which means postgres is the only user who can administer PostgreSQL. To test this, change to the postgres user with the command su - postgres and then enter the command psql. You should now be at the Postgres prompt, which looks like: postgres=# All other users have to gain access to the prompt like so: psql DB_NAME where DB_NAME is the name of an existing database. '); INSERT INTO grafit_article (id, title, text) VALUES (31, 'The Marketing Behind MongoDB', ' 100% of my friends who have used Mongo/similar NoSQL have given up and had a nasty rewrite back to pgSQL. This seems to be the journey: 1. Lack of migrations is awesome! We can iterate so quickly for MVP 2. Get users 3. Add features, still enjoying the speed of iteration 4. Get more users 5. Start building reporting features for enterprise/customer support/product metrics (ie: when the real potential success starts) 6. Realise you desperately need joins, transactions and other SQL features 7. Pause product dev for 1-3+ months to migrate back to SQL, or do some weird parallel development process to move it piecemeal back. I think the most interesting question though is would they be able to get MVP and initial customers that set off this if they were moving (slightly) slower due to SQL and slight overhead that comes with? My thought is definitely yes. brandur on Aug 29, 2017 [-] > I think the most interesting question though is would they be able to get MVP and initial customers that set off this if they were moving (slightly) slower due to SQL and slight overhead that comes with? I''ve used Postgres and Mongo pretty extensively, and for any reasonably seasoned developer, the startup overhead of an SQL system is a myth. There may upfront cost to learning how an RDMS and SQL work in the first place, but once you''re familiar with them, they''ll be faster than Mongo on any new project. The schemaless concept of a document database seems to be the major selling factor in velocity of movement, but once you''ve got a good handle on a migration framework in the vein of ActiveRecord or other popular software, that''s negated completely. It also really doesn''t take long before schemaless starts to cause big problems for you in terms of data consistency -- it''s not just the big players that get bitten by this. The simplified query language is another one. SQL is a little bit obtuse, but it''s not that bad once you have a handle on it, and a lot of people are familiar with it. Once you add in an ORM layer, the lazy-style access of a framework like Sequel or SQLAlchemy makes the developer experience quite a bit better than any Mongo APIs that I''ve seen. Also, after you get beyond trivial usage, SQL''s flexibility so wildly outstrips Mongo''s query documents that it''s not even worth talking about. Postgres on the other hand ships with a great management CLI, a very powerful REPL (psql), and features like data types/constraints/transactions that guarantee you correctness with zero effort on your part. I can only speak for myself, but I''d take Postgres to the hackathon any day of the week. martinald on Aug 29, 2017 [-] I totally agree with you, and started writing something about how understanding a good ORM takes nearly all the headache away. I think the thing people do find slow is a lot of ''documents within documents'' in SQL. It turns out this is usually a bad development pattern long term but it is super fast being able to just add docs inside docs with no configuration. It feels very slow writing foreign keys, navigation props and schemas for this in SQL vs JSON, where you can just dump your object in and you''re done. Basically; I think with noSQL you get some very short term gain for a lot of long term pain, and you''re right, ORMs and other tooling solves this mostly. I myself fell for this trap, and while it was a nightmare it actually matured me more as a professional more than anything I''ve ever done recently. Regardless of crazy hype, I don''t think I''ll ever fall for a solution so easily without evaluating it properly. I think I assumed the "crowd" had done the tech due diligence on this stuff and it definitely wasn''t the case. '); INSERT INTO grafit_article (id, title, text) VALUES (32, 'Countless NoSQL databases competed to be the database of choice', 'n 2013, 10gen — the company behind MongoDB — moved into a large 30,000 square foot office in Midtown Manhattan. The transfer into the former New York Times building capped off a tremendous period of growth: the database boasted 4 million downloads, the MongoDB User Groups already attracted 15,000 members, and ~10,000 people had attended a global event in 2012. Their offices were truly global from London to Sydney to Dublin and Barcelona — and a requisite west coast headquarters in Palo Alto. Despite the traction, many startups using MongoDB faced their own challenges. One part of MongoDB’s success among startups was because some didn''t critically assess 10gen’s marketing message. As engineers, we often discuss technical attacks (e.g., DDoS, Sybil attacks, security vulnerabilities), but need to spend time debating how to protect ourselves from marketing “attacks”.1 Today, developer marketing is subtle — third party blog posts, content marketing disguised as engineering lessons, biased talks, and sponsored hackathons — not clearly marked content from vendors. As such, startup engineering decisions can hinge on sources that are not impartial. A large amount of "engineering" content — even when written by engineers — is actually marketing, rather than thoughtful content whose aim is to help you make the best decision. Previously, we looked at the hype around NoSQL and common engineering mistakes to see how MongoDB became so successful. Now, let''s take a look into 10gen''s marketing strategy — as told by their employees.2 10gen’s marketing strategy is an increasingly common playbook and understanding it is useful for future developer tool decisions.'); INSERT INTO grafit_article (id, title, text) VALUES (30, 'Comment Arrango', ' ArangoDB always makes for exciting benchmark posts. I could see myself there in a bowler hat with a fistful of racing chits screaming “go, Postgres, go.” I’d love to see a competition were the developers of each database got to use the same hardware and data then tune the hell out of their configs, queries, and indices. Red Bull could sponsor it. I’d buy a T-shirt. kbenson 8 months ago [-] That doesn''t sound that hard to start. Something like RealWorld[1] and the Web Framework Benchmarks[2] combined but for DB workloads. Have one dataset that includes data amenable to OLAP and OLTP, but have separate tests each consisting of OLAP queries, OLTP queries, and combined queries. Choose a low-end, mid-range and high-end set of AWS or GCE instances/configs to normalize against. Let people submit pull requests with new technologies or configs. You''d want to get some funding to run the tests (or maybe solicit Google or Amazon to see if you could get the instance time donated once a month or something. If you started small, with maybe a portion of these features, and then scaled up over time, you might actually get to the point where you had tests that emulated a power failure, or master/slave and dual master scenarios and how they handle certain common network errors (split-brain). That would be an amazing resource. Edit: It occurs to me I probably should have read more of the article, since this is sort of what they are doing already... 1: https://github.com/gothinkster/realworld 2: https://www.techempower.com/benchmarks/ etxm 8 months ago [-] Yeah after I posted it I started thinking about what it would take and what that would actually look like... and how you’d cheat :) It would probably require a few different categories with some sort of output assertion to validate the query performed right and a means of tracking CPU, usage ram usage, and execution time. It would be cool to see things like disaster recovery and chaos proofing as well. '); INSERT INTO grafit_article (id, title, text) VALUES (35, 'Applying machine intelligence to GitHub security alerts ', 'Last year, we released security alerts that track security vulnerabilities in Ruby and JavaScript packages. Since then, we’ve identified more than four million of these vulnerabilities and added support for Python. In our launch post, we mentioned that all vulnerabilities with CVE IDs are included in security alerts, but sometimes there are vulnerabilities that are not disclosed in the National Vulnerability Database. Fortunately, our collection of security alerts can be supplemented with vulnerabilities detected from activity within our developer community. Leveraging the community There are many places a project can publicize security fixes within a new version: the CVE feed, various mailing lists and open source groups, or even within its release notes or changelog. Regardless of how projects share this information, some developers within the GitHub community will see the advisory and immediately bump their required versions of the dependency to a known safe version. If detected, we can use the information in these commits to generate security alerts for vulnerabilities which may not have been published in the CVE feed. On an average day, the dependency graph can track around 10,000 commits to dependency files for any of our supported languages. We can’t manually process this many commits. Instead, we depend on machine intelligence to sift through them and extract those that might be related to a security release. For this purpose, we created a machine learning model that scans text associated with public commits (the commit message and linked issues or pull requests) to filter out those related to possible security upgrades. With this smaller batch of commits, the model uses the diff to understand how required version ranges have changed. Then it aggregates across a specific timeframe to get a holistic view of all dependencies that a security release might affect. Finally, the model outputs a list of packages and version ranges it thinks require an alert and currently aren’t covered by any known CVE in our system. Always quality focused No machine learning model is perfect. While machine intelligence can sift through thousands of commits in an instant, this anomaly-detection algorithm will still generate false positives for packages where no security patch was released. Security alert quality is a focus for us, so we review all model output before the community receives an alert. Learn more'); INSERT INTO grafit_article (id, title, text) VALUES (29, 'Performance Benchmark 2018', 'I''ve stopped reading database benchmarks, because they are extremely vague. Instead I spend my time optimizing my current solution/stack. For example Postgresql has hundreds of knobs that you can adjust for almost every scenario you can imagine. Sometimes you have a special query and increase the work_mem just for that session. Other cases you adjust the cost settings for another query/session. You can analyze your indexes and index types. And sometimes you need to rewrite parts of a big query. Learning all this takes time, you are much better off learning more about your chosen technology stack than switching to another technology stack. Though in a few rare races, you need a different technology to solve your business problem. In most cases they complement your existing solution, like Elasticsearch/Solr for full-text search or Clickhouse for OLAP workloads. maxxxxx 8 months ago [-] Agreed. Switching to another system is expensive and the benefit is pretty questionable. emsy 8 months ago [-] Unless you hit a very specific use-case/bottleneck, which I only ever witnessed once. TremendousJudge 8 months ago [-] expand, please? maxxxxx 8 months ago [-] I imagine something very specific like having a lot of inserts into a table and that being your main use case. Depending on your data some databases may be better than others and that should be easy to measure. In most real-world cases the requirements however are not very clear and often conflicting so it''s much harder to get data that shows the performance of one system over the other. gopalv 8 months ago [-] > Depending on your data some databases may be better than others and that should be easy to measure. And the performance difference could be an accidental feature of the design and completely unintentional. Postgres for instance has a native data engine, so it can store the exact row-ids for a row into an index, but this means that every update to the row needs all indexes to be updated. Mysql has many data engines (InnoDB and MyISAM to start with), to the row-id is somewhat opaque, so the index stores the primary key which can be pushed to the data engine scans and then have it lookup a row-id internally. This needs an index to be touched for the columns you modify explicitly or if the primary key is updated (which is a usual no-no due to UNIQUE lookup costs). When you have a single wide table with a huge number of indexes, where you update a lot of dimensions frequently, the performance difference between these two solutions is architectural. And if you lookup along an index with few updates, but long running open txns, that is also materially different - one lookup versus two. Though how it came about isn''t really intentional. '); """), ]
0
50,949
23
ddfea5bd5d0e0cf8608cb0a07599e5e6b06f933e
494
py
Python
Python Script Tools/18.0 Create Dataframe And Store It In a CSV.py
juan1305/0.11-incremento_descremento
954ddb32180c3197e5b01cf95d20f5325ada8a29
[ "MIT" ]
1
2020-04-13T00:16:16.000Z
2020-04-13T00:16:16.000Z
Python Script Tools/18.0 Create Dataframe And Store It In a CSV.py
juan1305/0.11-incremento_descremento
954ddb32180c3197e5b01cf95d20f5325ada8a29
[ "MIT" ]
null
null
null
Python Script Tools/18.0 Create Dataframe And Store It In a CSV.py
juan1305/0.11-incremento_descremento
954ddb32180c3197e5b01cf95d20f5325ada8a29
[ "MIT" ]
null
null
null
import pandas as pd # Crear diccionario donde key sera columna a crear # y su valuela informacion de cada columna data = {'paises': ['Mexico', 'España', 'Estados Unidos'], 'Ciudades': ['Monterrey,' 'Madrid', 'Nueva York'], 'Casos': [4291, 3829, 10283]} # Crear un DataFrame pasando el diccioario y # señalizar las columnas creadas df = pd.DataFrame(data, columns=['paises', 'Ciudades', 'Casos']) # Imprimir la info print(df) # Almacenar en archivo CSV df.to_csv('myDataFrame.csv')
27.444444
64
0.700405
import pandas as pd # Crear diccionario donde key sera columna a crear # y su valuela informacion de cada columna data = {'paises': ['Mexico', 'España', 'Estados Unidos'], 'Ciudades': ['Monterrey,' 'Madrid', 'Nueva York'], 'Casos': [4291, 3829, 10283]} # Crear un DataFrame pasando el diccioario y # señalizar las columnas creadas df = pd.DataFrame(data, columns=['paises', 'Ciudades', 'Casos']) # Imprimir la info print(df) # Almacenar en archivo CSV df.to_csv('myDataFrame.csv')
0
0
0
d3b313c3dd0ec4a73ea6c33bd5b776e0285a4fc6
30,581
py
Python
pxr/usd/usdLux/testenv/testUsdLuxLight.py
yurivict/USD
3b097e3ba8fabf1777a1256e241ea15df83f3065
[ "Apache-2.0" ]
1
2021-09-25T12:49:37.000Z
2021-09-25T12:49:37.000Z
pxr/usd/usdLux/testenv/testUsdLuxLight.py
yurivict/USD
3b097e3ba8fabf1777a1256e241ea15df83f3065
[ "Apache-2.0" ]
null
null
null
pxr/usd/usdLux/testenv/testUsdLuxLight.py
yurivict/USD
3b097e3ba8fabf1777a1256e241ea15df83f3065
[ "Apache-2.0" ]
1
2018-10-03T19:08:33.000Z
2018-10-03T19:08:33.000Z
#!/pxrpythonsubst # # Copyright 2017 Pixar # # Licensed under the Apache License, Version 2.0 (the "Apache License") # with the following modification; you may not use this file except in # compliance with the Apache License and the following modification to it: # Section 6. Trademarks. is deleted and replaced with: # # 6. Trademarks. This License does not grant permission to use the trade # names, trademarks, service marks, or product names of the Licensor # and its affiliates, except as required to comply with Section 4(c) of # the License and to reproduce the content of the NOTICE file. # # You may obtain a copy of the Apache License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the Apache License with the above modification is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the Apache License for the specific # language governing permissions and limitations under the Apache License. from __future__ import print_function from pxr import Gf, Sdf, Sdr, Tf, Usd, UsdGeom, UsdLux, UsdShade, Plug import unittest, math if __name__ == '__main__': unittest.main()
48.083333
82
0.632059
#!/pxrpythonsubst # # Copyright 2017 Pixar # # Licensed under the Apache License, Version 2.0 (the "Apache License") # with the following modification; you may not use this file except in # compliance with the Apache License and the following modification to it: # Section 6. Trademarks. is deleted and replaced with: # # 6. Trademarks. This License does not grant permission to use the trade # names, trademarks, service marks, or product names of the Licensor # and its affiliates, except as required to comply with Section 4(c) of # the License and to reproduce the content of the NOTICE file. # # You may obtain a copy of the Apache License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the Apache License with the above modification is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the Apache License for the specific # language governing permissions and limitations under the Apache License. from __future__ import print_function from pxr import Gf, Sdf, Sdr, Tf, Usd, UsdGeom, UsdLux, UsdShade, Plug import unittest, math class TestUsdLuxLight(unittest.TestCase): def test_BlackbodySpectrum(self): warm_color = UsdLux.BlackbodyTemperatureAsRgb(1000) whitepoint = UsdLux.BlackbodyTemperatureAsRgb(6500) cool_color = UsdLux.BlackbodyTemperatureAsRgb(10000) # Whitepoint is ~= (1,1,1) assert Gf.IsClose(whitepoint, Gf.Vec3f(1.0), 0.1) # Warm has more red than green or blue assert warm_color[0] > warm_color[1] assert warm_color[0] > warm_color[2] # Cool has more blue than red or green assert cool_color[2] > cool_color[0] assert cool_color[2] > cool_color[1] def test_BasicConnectableLights(self): # Try checking connectableAPI on core lux types first before going # through the prim. self.assertTrue(UsdShade.ConnectableAPI.HasConnectableAPI( UsdLux.RectLight)) self.assertTrue(UsdShade.ConnectableAPI.HasConnectableAPI( UsdLux.PluginLightFilter)) stage = Usd.Stage.CreateInMemory() rectLight = UsdLux.RectLight.Define(stage, '/RectLight') self.assertTrue(rectLight) lightAPI = rectLight.LightAPI() self.assertTrue(lightAPI) self.assertTrue(lightAPI.ConnectableAPI()) # Rect light has the following built-in inputs attributes. inputNames = ['color', 'colorTemperature', 'diffuse', 'enableColorTemperature', 'exposure', 'height', 'intensity', 'normalize', 'specular', 'texture:file', 'width'] # GetInputs returns only authored inputs by default self.assertEqual(lightAPI.GetInputs(), []) # GetInputs(false) is a super-set of all the built-ins. # There could be other inputs coming from any auto applied APISchemas. allInputs = [inputName.GetBaseName() for inputName in lightAPI.GetInputs(onlyAuthored=False)] self.assertTrue(set(inputNames).issubset(set(allInputs))) # Verify each input's attribute is prefixed. for name in inputNames: self.assertEqual(lightAPI.GetInput(name).GetAttr().GetName(), "inputs:" + name) # Verify input attributes match the getter API attributes. self.assertEqual(lightAPI.GetInput('color').GetAttr(), rectLight.GetColorAttr()) self.assertEqual(lightAPI.GetInput('texture:file').GetAttr(), rectLight.GetTextureFileAttr()) # Create a new input, and verify that the input interface conforming # attribute is created. lightInput = lightAPI.CreateInput('newInput', Sdf.ValueTypeNames.Float) self.assertIn(lightInput, lightAPI.GetInputs()) # By default GetInputs() returns onlyAuthored inputs, of which # there is now 1. self.assertEqual(len(lightAPI.GetInputs()), 1) self.assertEqual(lightAPI.GetInput('newInput'), lightInput) self.assertEqual(lightInput.GetAttr(), lightAPI.GetPrim().GetAttribute("inputs:newInput")) # Rect light has no authored outputs. self.assertEqual(lightAPI.GetOutputs(), []) # Rect light has no built-in outputs, either. self.assertEqual(lightAPI.GetOutputs(onlyAuthored=False), []) # Create a new output, and verify that the output interface conforming # attribute is created. lightOutput = lightAPI.CreateOutput('newOutput', Sdf.ValueTypeNames.Float) self.assertEqual(lightAPI.GetOutputs(), [lightOutput]) self.assertEqual(lightAPI.GetOutputs(onlyAuthored=False), [lightOutput]) self.assertEqual(lightAPI.GetOutput('newOutput'), lightOutput) self.assertEqual(lightOutput.GetAttr(), lightAPI.GetPrim().GetAttribute("outputs:newOutput")) # Do the same with a light filter lightFilter = UsdLux.LightFilter.Define(stage, '/LightFilter') self.assertTrue(lightFilter) self.assertTrue(lightFilter.ConnectableAPI()) # Light filter has no built-in inputs. self.assertEqual(lightFilter.GetInputs(), []) # Create a new input, and verify that the input interface conforming # attribute is created. filterInput = lightFilter.CreateInput('newInput', Sdf.ValueTypeNames.Float) self.assertEqual(lightFilter.GetInputs(), [filterInput]) self.assertEqual(lightFilter.GetInput('newInput'), filterInput) self.assertEqual(filterInput.GetAttr(), lightFilter.GetPrim().GetAttribute("inputs:newInput")) # Light filter has no built-in outputs. self.assertEqual(lightFilter.GetOutputs(), []) self.assertEqual(lightFilter.GetOutputs(onlyAuthored=False), []) # Create a new output, and verify that the output interface conforming # attribute is created. filterOutput = lightFilter.CreateOutput('newOutput', Sdf.ValueTypeNames.Float) self.assertEqual(lightFilter.GetOutputs(), [filterOutput]) self.assertEqual(lightFilter.GetOutputs(onlyAuthored=False), [filterOutput]) self.assertEqual(lightFilter.GetOutput('newOutput'), filterOutput) self.assertEqual(filterOutput.GetAttr(), lightFilter.GetPrim().GetAttribute("outputs:newOutput")) # Test the connection behavior customization. # Create a connectable prim with an output under the light. lightGraph = UsdShade.NodeGraph.Define(stage, '/RectLight/Prim') self.assertTrue(lightGraph) lightGraphOutput = lightGraph.CreateOutput( 'graphOut', Sdf.ValueTypeNames.Float) self.assertTrue(lightGraphOutput) # Create a connectable prim with an output under the light filter. filterGraph = UsdShade.NodeGraph.Define(stage, '/LightFilter/Prim') self.assertTrue(filterGraph) filterGraphOutput = filterGraph.CreateOutput( 'graphOut', Sdf.ValueTypeNames.Float) self.assertTrue(filterGraphOutput) # Light outputs can be connected. self.assertTrue(lightOutput.CanConnect(lightGraphOutput)) self.assertTrue(lightOutput.CanConnect(filterGraphOutput)) # Light inputs diverge from the default behavior and should be # connectable across its own scope (encapsulation is not required) self.assertTrue(lightInput.CanConnect(lightOutput)) self.assertTrue(lightInput.CanConnect(lightGraphOutput)) self.assertTrue(lightInput.CanConnect(filterGraphOutput)) # From the default behavior light filter outputs cannot be connected. self.assertFalse(filterOutput.CanConnect(lightGraphOutput)) self.assertFalse(filterOutput.CanConnect(filterGraphOutput)) # Light filters inputs diverge from the default behavior and should be # connectable across its own scope (encapsulation is not required) self.assertTrue(filterInput.CanConnect(filterOutput)) self.assertTrue(filterInput.CanConnect(filterGraphOutput)) self.assertTrue(filterInput.CanConnect(lightGraphOutput)) # The shaping API can add more connectable attributes to the light # and implements the same connectable interface functions. We test # those here. shapingAPI = UsdLux.ShapingAPI.Apply(lightAPI.GetPrim()) self.assertTrue(shapingAPI) self.assertTrue(shapingAPI.ConnectableAPI()) # Verify input attributes match the getter API attributes. self.assertEqual(shapingAPI.GetInput('shaping:cone:angle').GetAttr(), shapingAPI.GetShapingConeAngleAttr()) self.assertEqual(shapingAPI.GetInput('shaping:focus').GetAttr(), shapingAPI.GetShapingFocusAttr()) # These inputs have the same connectable behaviors as all light inputs, # i.e. they should also diverge from the default behavior of only be # connected to sources from immediate descendant (encapsultated) prims # of the light. shapingInput = shapingAPI.GetInput('shaping:focus') self.assertTrue(shapingInput.CanConnect(lightOutput)) self.assertTrue(shapingInput.CanConnect(lightGraphOutput)) self.assertTrue(shapingInput.CanConnect(filterGraphOutput)) # The shadow API can add more connectable attributes to the light # and implements the same connectable interface functions. We test # those here. shadowAPI = UsdLux.ShadowAPI.Apply(lightAPI.GetPrim()) self.assertTrue(shadowAPI) self.assertTrue(shadowAPI.ConnectableAPI()) # Verify input attributes match the getter API attributes. self.assertEqual(shadowAPI.GetInput('shadow:color').GetAttr(), shadowAPI.GetShadowColorAttr()) self.assertEqual(shadowAPI.GetInput('shadow:distance').GetAttr(), shadowAPI.GetShadowDistanceAttr()) # These inputs have the same connectable behaviors as all light inputs, # i.e. they should also diverge from the default behavior of only be # connected to sources from immediate descendant (encapsultated) prims # of the light. shadowInput = shadowAPI.GetInput('shadow:color') self.assertTrue(shadowInput.CanConnect(lightOutput)) self.assertTrue(shadowInput.CanConnect(lightGraphOutput)) self.assertTrue(shadowInput.CanConnect(filterGraphOutput)) # Even though the shadow and shaping API schemas provide connectable # attributes and an interface for the ConnectableAPI, the typed schema # of the prim is still what provides its connectable behavior. Here # we verify that applying these APIs to a prim whose type is not # connectable does NOT cause the prim to conform to the Connectable API. nonConnectablePrim = stage.DefinePrim("/Sphere", "Sphere") shadowAPI = UsdLux.ShadowAPI.Apply(nonConnectablePrim) self.assertTrue(shadowAPI) self.assertFalse(shadowAPI.ConnectableAPI()) shapingAPI = UsdLux.ShapingAPI.Apply(nonConnectablePrim) self.assertTrue(shapingAPI) self.assertFalse(shapingAPI.ConnectableAPI()) def test_DomeLight_OrientToStageUpAxis(self): stage = Usd.Stage.CreateInMemory() # Try Y-up first. Explicitly set this to override any site-level # override. UsdGeom.SetStageUpAxis(stage, UsdGeom.Tokens.y) # Create a dome. light = UsdLux.DomeLight.Define(stage, '/dome') # No Xform ops to begin with. self.assertEqual(light.GetOrderedXformOps(), []) # Align to up axis. light.OrientToStageUpAxis() # Since the stage is already Y-up, no additional xform op was required. self.assertEqual(light.GetOrderedXformOps(), []) # Now change the stage to Z-up and re-align the dome. UsdGeom.SetStageUpAxis(stage, UsdGeom.Tokens.z) light.OrientToStageUpAxis() # That should require a +90 deg rotate on X. ops = light.GetOrderedXformOps() self.assertEqual(len(ops), 1) self.assertEqual(ops[0].GetBaseName(), UsdLux.Tokens.orientToStageUpAxis) self.assertEqual(ops[0].GetOpType(), UsdGeom.XformOp.TypeRotateX) self.assertEqual(ops[0].GetAttr().Get(), 90.0) def test_UsdLux_HasConnectableAPI(self): self.assertTrue(UsdShade.ConnectableAPI.HasConnectableAPI( UsdLux.LightAPI)) self.assertTrue(UsdShade.ConnectableAPI.HasConnectableAPI( UsdLux.LightFilter)) def test_GetShaderId(self): # Test the LightAPI shader ID API # UsdLuxLightAPI and UsdLuxLightFilter implement the same API for # their shaderId attributes so we can test them using the same function. def _TestShaderIDs(lightOrFilter, shaderIdAttrName): # The default render context's shaderId attribute does exist in the # API. These attributes do not yet exist for other contexts. self.assertEqual( lightOrFilter.GetShaderIdAttrForRenderContext("").GetName(), shaderIdAttrName) self.assertFalse( lightOrFilter.GetShaderIdAttrForRenderContext("ri")) self.assertFalse( lightOrFilter.GetShaderIdAttrForRenderContext("other")) # By default LightAPI shader IDs are empty for all render contexts. self.assertEqual(lightOrFilter.GetShaderId([]), "") self.assertEqual(lightOrFilter.GetShaderId(["other", "ri"]), "") # Set a value in the default shaderID attr. lightOrFilter.GetShaderIdAttr().Set("DefaultLight") # No new attributes were created. self.assertEqual( lightOrFilter.GetShaderIdAttrForRenderContext("").GetName(), shaderIdAttrName) self.assertFalse( lightOrFilter.GetShaderIdAttrForRenderContext("ri")) self.assertFalse( lightOrFilter.GetShaderIdAttrForRenderContext("other")) # The default value is now the shaderID returned for all render # contexts since no render contexts define their own shader ID self.assertEqual( lightOrFilter.GetShaderId([]), "DefaultLight") self.assertEqual( lightOrFilter.GetShaderId(["other", "ri"]), "DefaultLight") # Create a shaderID attr for the "ri" render context with a new ID # value. lightOrFilter.CreateShaderIdAttrForRenderContext("ri", "SphereLight") # The shaderId attr for "ri" now exists self.assertEqual( lightOrFilter.GetShaderIdAttrForRenderContext("").GetName(), shaderIdAttrName) self.assertEqual( lightOrFilter.GetShaderIdAttrForRenderContext("ri").GetName(), "ri:" + shaderIdAttrName) self.assertFalse( lightOrFilter.GetShaderIdAttrForRenderContext("other")) # When passed no render contexts we still return the default # shader ID. self.assertEqual(lightOrFilter.GetShaderId([]), "DefaultLight") # Since we defined a shader ID for "ri" but not "other", the "ri" # shader ID is returned when queryring for both. Querying for just # "other" falls back to the default shaderID self.assertEqual( lightOrFilter.GetShaderId(["other", "ri"]), "SphereLight") self.assertEqual( lightOrFilter.GetShaderId(["ri"]), "SphereLight") self.assertEqual( lightOrFilter.GetShaderId(["other"]), "DefaultLight") # Create an untyped prim with a LightAPI applied and test the ShaderId # functions of UsdLux.LightAPI stage = Usd.Stage.CreateInMemory() prim = stage.DefinePrim("/PrimLight") light = UsdLux.LightAPI.Apply(prim) self.assertTrue(light) _TestShaderIDs(light, "light:shaderId") # Create a LightFilter prim and test the ShaderId functions of # UsdLux.LightFilter lightFilter = UsdLux.LightFilter.Define(stage, "/PrimLightFilter") self.assertTrue(lightFilter) _TestShaderIDs(lightFilter, "lightFilter:shaderId") def test_LightExtentAndBBox(self): # Test extent and bbox computations for the boundable lights. time = Usd.TimeCode.Default() # Helper for computing the extent and bounding boxes for a light and # comparing against an expect extent pair. def _VerifyExtentAndBBox(light, expectedExtent): self.assertEqual( UsdGeom.Boundable.ComputeExtentFromPlugins(light, time), expectedExtent) self.assertEqual( light.ComputeLocalBound(time, "default"), Gf.BBox3d( Gf.Range3d( Gf.Vec3d(expectedExtent[0]), Gf.Vec3d(expectedExtent[1])), Gf.Matrix4d(1.0))) # Create a prim of each boundable light type. stage = Usd.Stage.CreateInMemory() rectLight = UsdLux.RectLight.Define(stage, "/RectLight") self.assertTrue(rectLight) diskLight = UsdLux.DiskLight.Define(stage, "/DiskLight") self.assertTrue(diskLight) cylLight = UsdLux.CylinderLight.Define(stage, "/CylLight") self.assertTrue(cylLight) sphereLight = UsdLux.SphereLight.Define(stage, "/SphereLight") self.assertTrue(sphereLight) # Verify the extent and bbox computations for each light given its # fallback attribute values. _VerifyExtentAndBBox(rectLight, [(-0.5, -0.5, 0.0), (0.5, 0.5, 0.0)]) _VerifyExtentAndBBox(diskLight, [(-0.5, -0.5, 0.0), (0.5, 0.5, 0.0)]) _VerifyExtentAndBBox(cylLight, [(-0.5, -0.5, -0.5), (0.5, 0.5, 0.5)]) _VerifyExtentAndBBox(sphereLight, [(-0.5, -0.5, -0.5), (0.5, 0.5, 0.5)]) # Change the size related attribute of each light and verify the extents # and bounding boxes are updated. rectLight.CreateWidthAttr(4.0) rectLight.CreateHeightAttr(6.0) _VerifyExtentAndBBox(rectLight, [(-2.0, -3.0, 0.0), (2.0, 3.0, 0.0)]) diskLight.CreateRadiusAttr(5.0) _VerifyExtentAndBBox(diskLight, [(-5.0, -5.0, 0.0), (5.0, 5.0, 0.0)]) cylLight.CreateRadiusAttr(4.0) cylLight.CreateLengthAttr(10.0) _VerifyExtentAndBBox(cylLight, [(-4.0, -4.0, -5.0), (4.0, 4.0, 5.0)]) sphereLight.CreateRadiusAttr(3.0) _VerifyExtentAndBBox(sphereLight, [(-3.0, -3.0, -3.0), (3.0, 3.0, 3.0)]) # Special case for portal light. Portal lights don't have any attributes # that affect their extent. Extent values are used only when # explicitly authored but portal lights' do register a # ComputeExtentFuction, which sets the extent as default from the # schema. portalLight = UsdLux.PortalLight.Define(stage, "/PortalLight") self.assertTrue(portalLight) _VerifyExtentAndBBox(portalLight, [(-0.5, -0.5, 0.0), (0.5, 0.5, 0.0)]) # For completeness verify that distant and dome lights are not # boundable. domeLight = UsdLux.DomeLight.Define(stage, "/DomeLight") self.assertTrue(domeLight) self.assertFalse(UsdGeom.Boundable(domeLight)) distLight = UsdLux.DistantLight.Define(stage, "/DistLight") self.assertTrue(distLight) self.assertFalse(UsdGeom.Boundable(distLight)) def test_SdrShaderNodesForLights(self): """ Test the automatic registration of SdrShaderNodes for all the UsdLux light types. """ # The expected shader node inputs that should be found for all of our # UsdLux light types. expectedLightInputNames = [ # LightAPI 'color', 'colorTemperature', 'diffuse', 'enableColorTemperature', 'exposure', 'intensity', 'normalize', 'specular', # ShadowAPI 'shadow:color', 'shadow:distance', 'shadow:enable', 'shadow:falloff', 'shadow:falloffGamma', # ShapingAPI 'shaping:cone:angle', 'shaping:cone:softness', 'shaping:focus', 'shaping:focusTint', 'shaping:ies:angleScale', 'shaping:ies:file', 'shaping:ies:normalize' ] # Map of the names of the expected light nodes to the additional inputs # we expect for those types. expectedLightNodes = { 'CylinderLight' : ['length', 'radius'], 'DiskLight' : ['radius'], 'DistantLight' : ['angle'], 'DomeLight' : ['texture:file', 'texture:format'], 'GeometryLight' : [], 'PortalLight' : [], 'RectLight' : ['width', 'height', 'texture:file'], 'SphereLight' : ['radius'], 'MeshLight' : [], 'VolumeLight' : [] } # Get all the derived types of UsdLuxBoundableLightBase and # UsdLuxNonboundableLightBase that are defined in UsdLux lightTypes = list(filter( Plug.Registry().GetPluginWithName("usdLux").DeclaresType, Tf.Type(UsdLux.BoundableLightBase).GetAllDerivedTypes() + Tf.Type(UsdLux.NonboundableLightBase).GetAllDerivedTypes())) self.assertTrue(lightTypes) # Augment lightTypes to include MeshLightAPI and VolumeLightAPI lightTypes.append( Tf.Type.FindByName('UsdLuxMeshLightAPI')) lightTypes.append( Tf.Type.FindByName('UsdLuxVolumeLightAPI')) # Verify that at least one known light type is in our list to guard # against this giving false positives if no light types are available. self.assertIn(UsdLux.RectLight, lightTypes) self.assertEqual(len(lightTypes), len(expectedLightNodes)) stage = Usd.Stage.CreateInMemory() prim = stage.DefinePrim("/Prim") usdSchemaReg = Usd.SchemaRegistry() for lightType in lightTypes: print("Test SdrNode for schema type " + str(lightType)) if usdSchemaReg.IsAppliedAPISchema(lightType): prim.ApplyAPI(lightType) else: typeName = usdSchemaReg.GetConcreteSchemaTypeName(lightType) if not typeName: continue prim.SetTypeName(typeName) light = UsdLux.LightAPI(prim) self.assertTrue(light) sdrIdentifier = light.GetShaderId([]) self.assertTrue(sdrIdentifier) prim.ApplyAPI(UsdLux.ShadowAPI) prim.ApplyAPI(UsdLux.ShapingAPI) # Every concrete light type and some API schemas (with appropriate # shaderId as sdr Identifier) in usdLux domain will have an # SdrShaderNode with source type 'USD' registered for it under its # USD schema type name. node = Sdr.Registry().GetNodeByIdentifier(sdrIdentifier, ['USD']) self.assertTrue(node is not None) self.assertIn(sdrIdentifier, expectedLightNodes) # Names, identifier, and role for the node all match the USD schema # type name self.assertEqual(node.GetIdentifier(), sdrIdentifier) self.assertEqual(node.GetName(), sdrIdentifier) self.assertEqual(node.GetImplementationName(), sdrIdentifier) self.assertEqual(node.GetRole(), sdrIdentifier) self.assertTrue(node.GetInfoString().startswith(sdrIdentifier)) # The context is always 'light' for lights. # Source type is 'USD' self.assertEqual(node.GetContext(), 'light') self.assertEqual(node.GetSourceType(), 'USD') # Help string is generated and encoded in the node's metadata (no # need to verify the specific wording). self.assertTrue(set(node.GetMetadata().keys()), {'primvars', 'help'}) self.assertEqual(node.GetMetadata()["help"], node.GetHelp()) # Source code and URIs are all empty. self.assertFalse(node.GetSourceCode()) self.assertFalse(node.GetResolvedDefinitionURI()) self.assertFalse(node.GetResolvedImplementationURI()) # Other classifications are left empty. self.assertFalse(node.GetCategory()) self.assertFalse(node.GetDepartments()) self.assertFalse(node.GetFamily()) self.assertFalse(node.GetLabel()) self.assertFalse(node.GetVersion()) self.assertFalse(node.GetAllVstructNames()) self.assertEqual(node.GetPages(), ['']) # The node will be valid for our light types. self.assertTrue(node.IsValid()) # Helper for comparing an SdrShaderProperty from node to the # corresponding UsdShadeInput/UsdShadeOutput from a UsdLux light def _CompareLightPropToNodeProp(nodeInput, primInput): # Input names and default values match. primDefaultValue = primInput.GetAttr().Get() self.assertEqual(nodeInput.GetName(), primInput.GetBaseName()) self.assertEqual(nodeInput.GetDefaultValue(), primDefaultValue) # Some USD property types don't match exactly one to one and are # converted to different types. In particular relevance to # lights and Token becomes String. expectedTypeName = primInput.GetTypeName() # Array valued attributes have their array size determined from # the default value and will be converted to scalar in the # SdrProperty if the array size is zero. if expectedTypeName.isArray: if not primDefaultValue or len(primDefaultValue) == 0: expectedTypeName = expectedTypeName.scalarType elif expectedTypeName == Sdf.ValueTypeNames.Token: expectedTypeName = Sdf.ValueTypeNames.String # Bool SdfTypes should Have Int SdrTypes, but still return as # Bool when queried for GetTypeAsSdfType if expectedTypeName == Sdf.ValueTypeNames.Bool: self.assertEqual(nodeInput.GetType(), Sdf.ValueTypeNames.Int) # Verify the node's input type maps back to USD property's type # (with the noted above exceptions). self.assertEqual( nodeInput.GetTypeAsSdfType()[0], expectedTypeName, msg="{}.{} Type {} != {}".format( str(node.GetName()), str(nodeInput.GetName()), str(nodeInput.GetTypeAsSdfType()[0]), str(expectedTypeName))) # If the USD property type is an Asset, it will be listed in # the node's asset identifier inputs. if expectedTypeName == Sdf.ValueTypeNames.Asset: self.assertIn(nodeInput.GetName(), node.GetAssetIdentifierInputNames()) # There will be a one to one correspondence between node inputs # and prim inputs. Note that the prim may have additional inputs # because of auto applied API schemas, but we only need to verify # that the node has ONLY the expected inputs and the prim at least # has those input proerties. expectedInputNames = \ expectedLightInputNames + expectedLightNodes[sdrIdentifier] # Verify node has exactly the expected inputs. self.assertEqual(sorted(expectedInputNames), sorted(node.GetInputNames())) # Verify each node input matches a prim input. for inputName in expectedInputNames: nodeInput = node.GetInput(inputName) primInput = light.GetInput(inputName) self.assertFalse(nodeInput.IsOutput()) _CompareLightPropToNodeProp(nodeInput, primInput) # None of the UsdLux base lights have outputs self.assertEqual(node.GetOutputNames(), []) self.assertEqual(light.GetOutputs(onlyAuthored=False), []) # The reverse is tested just above, but for all asset identifier # inputs listed for the node there is a corresponding asset value # input property on the prim. for inputName in node.GetAssetIdentifierInputNames(): self.assertEqual(light.GetInput(inputName).GetTypeName(), Sdf.ValueTypeNames.Asset) # These primvars come from sdrMetadata on the prim itself which # isn't supported for light schemas so it will always be empty. self.assertFalse(node.GetPrimvars()) # sdrMetadata on input properties is supported so additional # primvar properties will correspond to prim inputs with that # metadata set. for propName in node.GetAdditionalPrimvarProperties(): self.assertTrue(light.GetInput(propName).GetSdrMetadataByKey( 'primvarProperty')) # Default input can also be specified in the property's sdrMetadata. if node.GetDefaultInput(): defaultInput = light.GetInput( node.GetDefaultInput().GetName()) self.assertTrue(defaultInput.GetSdrMetadataByKey('defaultInput')) if __name__ == '__main__': unittest.main()
21,048
8,253
23
0e80c9e7dca15d7cd5266e3c0a1290507d1a7a09
3,801
py
Python
scripts/fix_rttm.py
sehgal-simran/RPNSD
5ec70d11e3d177fb87a8499b63cd1c5ba60549b6
[ "MIT" ]
59
2020-02-19T11:23:14.000Z
2022-02-06T09:31:32.000Z
scripts/fix_rttm.py
yuzhms/RPNSD
031377388cb498c0dee080a76bd588a9ee8b39e0
[ "MIT" ]
11
2020-03-05T10:23:43.000Z
2021-10-11T02:15:28.000Z
scripts/fix_rttm.py
yuzhms/RPNSD
031377388cb498c0dee080a76bd588a9ee8b39e0
[ "MIT" ]
13
2020-02-19T02:30:43.000Z
2021-01-13T03:06:42.000Z
#!/usr/bin/env python3 # This script fixes some problems the RTTM file # including invalid time boundaries and others import os import sys import numpy as np import argparse if __name__ == "__main__": main()
36.548077
146
0.594843
#!/usr/bin/env python3 # This script fixes some problems the RTTM file # including invalid time boundaries and others import os import sys import numpy as np import argparse def get_args(): parser = argparse.ArgumentParser( description="Fix RTTM file") parser.add_argument("rttm_file", type=str, help="Input RTTM file") parser.add_argument("rttm_output_file", type=str, help="Output RTTM file") parser.add_argument("--channel", type=int, default=1, help="Channel information in the RTTM file") parser.add_argument("--add_uttname", type=int, default=0, help="Whether to add uttname to spkname") args = parser.parse_args() return args def load_rttm(filename): utt2seg = {} with open(filename, 'r') as fh: content = fh.readlines() for line in content: line = line.strip('\n') line_split = line.split() uttname, start_t, duration, spkname = line_split[1], float(line_split[3]), float(line_split[4]), line_split[7] if duration <= 0: print("Invalid line") print(line) continue end_t = start_t + duration if uttname not in utt2seg: utt2seg[uttname] = [] utt2seg[uttname].append([start_t, end_t, spkname]) return utt2seg def merge_same_spk(seg_array): spk_list = list(set(seg_array[:, 2])) seg_array_list = [] for spk in spk_list: seg_array_spk = seg_array[seg_array[:, 2] == spk] seg_list_spk = [] for i in range(len(seg_array_spk)): if i == 0: seg_list_spk.append(seg_array_spk[i, :]) else: if seg_array_spk[i, 0] > seg_list_spk[-1][1]: seg_list_spk.append(seg_array_spk[i, :]) else: seg_list_spk[-1][1] = max(seg_list_spk[-1][1], seg_array_spk[i, 1]) seg_array_spk_new = np.array(seg_list_spk) seg_array_list.append(seg_array_spk_new) seg_array_new = np.concatenate(seg_array_list) seg_array_new = seg_array_new[seg_array_new[:, 0].argsort(), :] return seg_array_new def fix_rttm(utt2seg): uttlist = list(utt2seg.keys()) uttlist.sort() utt2seg_new = {} for utt in uttlist: seg_list = utt2seg[utt] spk_list = list(set([seg[2] for seg in seg_list])) spk_list.sort() seg_array = np.array([[seg[0], seg[1], spk_list.index(seg[2])] for seg in seg_list]) seg_array = seg_array[seg_array[:, 0].argsort(), :] seg_array_new = merge_same_spk(seg_array) seg_list = [] for i in range(len(seg_array_new)): seg_list.append([seg_array_new[i, 0], seg_array_new[i, 1], spk_list[int(seg_array_new[i, 2])]]) utt2seg_new[utt] = seg_list return utt2seg_new def write_rttm(utt2seg, rttm_output_file, add_uttname, channel): uttlist = list(utt2seg.keys()) uttlist.sort() with open(rttm_output_file, 'w') as fh: for utt in uttlist: seg_list = utt2seg[utt] for seg in seg_list: if add_uttname: fh.write("SPEAKER {} {} {:.2f} {:.2f} <NA> <NA> {}_{} <NA> <NA>\n".format(utt, channel, seg[0], seg[1] - seg[0], utt, seg[2])) else: fh.write("SPEAKER {} {} {:.2f} {:.2f} <NA> <NA> {} <NA> <NA>\n".format(utt, channel, seg[0], seg[1] - seg[0], seg[2])) return 0 def main(): args = get_args() # load input RTTM utt2seg = load_rttm(args.rttm_file) # fix RTTM file utt2seg_new = fix_rttm(utt2seg) # write output RTTM write_rttm(utt2seg_new, args.rttm_output_file, args.add_uttname, args.channel) return 0 if __name__ == "__main__": main()
3,448
0
139
78c5929686706d7b4c5c6bb30eecae092b7caa4b
997
py
Python
polymorphism/polymorphism_demos.py
Minkov/python-oop
db9651eef374c0e74c32cb6f2bf07c734cc1d051
[ "MIT" ]
3
2021-11-16T04:52:53.000Z
2022-02-07T20:28:41.000Z
polymorphism/polymorphism_demos.py
Minkov/python-oop
db9651eef374c0e74c32cb6f2bf07c734cc1d051
[ "MIT" ]
null
null
null
polymorphism/polymorphism_demos.py
Minkov/python-oop
db9651eef374c0e74c32cb6f2bf07c734cc1d051
[ "MIT" ]
1
2021-12-07T07:04:38.000Z
2021-12-07T07:04:38.000Z
import math r = Rect(2, 5) c = Circle(3) shapes: list[Shape] = [ r, c, ] [print_area(s) for s in shapes] print(isinstance(r, Rect)) print(isinstance(r, Circle)) print(isinstance(r, Shape)) # print_area(2) print(Rect.mro()) Person().say_hello()
16.616667
50
0.608826
import math class Shape: def area(self): pass class Rect(Shape): def __init__(self, width, height): self.width = width self.height = height def area(self): return self.width * self.height class Circle(Shape): def __init__(self, radius): self.radius = radius def area(self): return self.radius * self.radius * math.pi def print_area(shape: Shape): # if isinstance(shape, Rect): # print(shape.rect_area()) # elif isinstance(shape, Circle): # print(shape.circle_area()) print(shape.area()) # print(shape.width, shape.height) r = Rect(2, 5) c = Circle(3) shapes: list[Shape] = [ r, c, ] [print_area(s) for s in shapes] print(isinstance(r, Rect)) print(isinstance(r, Circle)) print(isinstance(r, Shape)) # print_area(2) print(Rect.mro()) class Person: def say_hello(self): print("Hello! 1") def say_hello(self): print("Hello! 2") Person().say_hello()
455
-21
300