blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 220
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 257
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
995f17f49f0cc20090ed4da3fc31fdabd4c2e5df
|
6a61ef12621c8a917d160db62415487fe2c469f7
|
/aliyun-python-sdk-outboundbot/aliyunsdkoutboundbot/request/v20191226/DeleteJobGroupRequest.py
|
6edfb7caf350c296ba47360d1600bde52a8e0e09
|
[
"Apache-2.0"
] |
permissive
|
zhangwp-cn/aliyun-openapi-python-sdk
|
f0b15369665a956490534c942676ed15410196f7
|
a560e38f97351db05d13f0588f7bdfb4292ed3ae
|
refs/heads/master
| 2022-09-08T13:31:26.842867 | 2020-06-04T03:23:30 | 2020-06-04T03:23:30 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,607 |
py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkoutboundbot.endpoint import endpoint_data
class DeleteJobGroupRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'OutboundBot', '2019-12-26', 'DeleteJobGroup','outboundbot')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_InstanceId(self):
return self.get_query_params().get('InstanceId')
def set_InstanceId(self,InstanceId):
self.add_query_param('InstanceId',InstanceId)
def get_JobGroupId(self):
return self.get_query_params().get('JobGroupId')
def set_JobGroupId(self,JobGroupId):
self.add_query_param('JobGroupId',JobGroupId)
|
[
"sdk-team@alibabacloud.com"
] |
sdk-team@alibabacloud.com
|
e5d45aba73ee5b13499f51e053f224648be79b3d
|
4905ea775218db882f23188df256ab036b182ea7
|
/fortytwo_test_task/migrations/auth/0001_data.py
|
3f22f4f6761315f074469f9ae28041965fd4b4d8
|
[] |
no_license
|
pmorhun/FortyTwoTestTask
|
b57f4597003f45e9a59f0a934d6648229c9cbf01
|
b27c70593a027e62e8ea32b0527caf2c7ff71926
|
refs/heads/master
| 2020-04-03T12:39:19.750194 | 2016-03-09T14:25:59 | 2016-03-09T14:25:59 | 51,739,394 | 0 | 0 | null | 2016-02-15T07:55:31 | 2016-02-15T07:55:30 | null |
UTF-8
|
Python
| false | false | 3,869 |
py
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
class Migration(DataMigration):
def forwards(self, orm):
user = orm['auth.user'](
username="admin",
is_active=True,
is_superuser=True,
is_staff=True,
password="pbkdf2_sha256$12000$zDS0slDEfmlZ$y+G2T/PRVXqGFUubLUTAfRPkAo1mB7bts12VNqadjLE=",
email="admin@admin.com")
user.save()
def backwards(self, orm):
raise RuntimeError("Cannot reverse this migration.")
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['auth']
symmetrical = True
|
[
"petermorhun@gmail.com"
] |
petermorhun@gmail.com
|
5f54f73fc5ddac26d7fa0c7409af8935051d4dd7
|
7c3c01c8d3a88890c17b4746279adc470dee325f
|
/ordersapp/models.py
|
225a2aa025ee0baca4f90756a80c2af82c6dd6f3
|
[] |
no_license
|
Raider992/django_opt
|
c729eb462902b7b49a79ea87f53df2a026ff7f60
|
69516b57875c2eec91ea99d2ab9fe1ca6d113c74
|
refs/heads/master
| 2023-02-28T18:44:22.411815 | 2021-02-09T07:00:53 | 2021-02-09T07:00:53 | 329,073,703 | 0 | 0 | null | 2021-02-09T07:00:54 | 2021-01-12T18:16:36 |
CSS
|
UTF-8
|
Python
| false | false | 2,288 |
py
|
from django.db import models
from django.conf import settings
from django.db import models
from cartapp.models import Cart
from mainapp.models import Product
class Order(models.Model):
FORMING = 'FM'
SENT_TO_PROCESS = 'STP'
PROCESSED = 'PRD'
PAID = 'PD'
READY = 'RDY'
DONE = 'DN'
CANCELLED = 'CNC'
ORDER_STATUSES = (
(FORMING,'формируется'),
(SENT_TO_PROCESS, 'отправлен на обработку'),
(PROCESSED, 'обработан'),
(PAID, 'оплачен'),
(READY, 'готов к выдаче'),
(DONE, 'выдан'),
(CANCELLED, 'отменён'),
)
user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE)
created_at = models.DateTimeField(auto_now_add=True, verbose_name='создан')
updated_at = models.DateTimeField(auto_now_add=True, verbose_name='изменён')
status = models.CharField(
verbose_name='статус',
choices=ORDER_STATUSES,
max_length=4,
default=FORMING
)
is_active = models.BooleanField(default=True, verbose_name='активен')
class Meta:
verbose_name = 'заказ'
verbose_name_plural = 'заказы'
ordering = ('-created_at',)
def get_total_quantity(self):
_items = self.orderitems.select_related()
_total_quantity = sum(list(map(lambda x: x.quantity , _items)))
return _total_quantity
def get_total_price(self):
_items = self.orderitems.select_related()
_total_cost = sum(list(map(lambda x: x.quantity * x.product.price, _items)))
return _total_cost
def delete(self):
for item in self.orderitems.select_related():
item.product.quantity += item.quantity
item.product.save()
self.is_active = False
self.save()
class OrderItem(models.Model):
order = models.ForeignKey(Order, on_delete=models.CASCADE, related_name='orderitems')
product = models.ForeignKey(Product, on_delete=models.CASCADE, verbose_name='продукт')
quantity = models.PositiveIntegerField(default=0, verbose_name='количество')
def get_product_cost(self):
return self.product.price * self.quantity
|
[
"19stargazer92@gmail.com"
] |
19stargazer92@gmail.com
|
8c7b6551a5d0d94ccfa765dd315c04805e05d3a0
|
f85834b22f47c5af0fea7ecd68beea7daaa45ff5
|
/scripts/machineLearning/hyperparametar_tuning.py
|
7df3837b34ce832384fb4c7faef95c2cac423300
|
[] |
no_license
|
cocacolabai/YogaPoseDetection
|
ab906525b2dfc8e83eca9d9e8abf5e10044a0cda
|
37588c211eb6b508a6a5f75c2917397acea81694
|
refs/heads/main
| 2023-04-13T13:12:55.826543 | 2021-04-21T08:36:58 | 2021-04-21T08:36:58 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,082 |
py
|
from sklearn import svm
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.gaussian_process.kernels import Matern, RBF, DotProduct, RationalQuadratic, WhiteKernel
from sklearn.neural_network import MLPClassifier
from sklearn.tree import DecisionTreeClassifier
from scripts.helpers.data_manipulation_helpers import write_multiple_lines
from scripts.helpers.sklearn_helpers import best_hyperparameters
from scripts.machineLearning.ml_data_for_classification import MlDataForModelTraining
all_train_data = MlDataForModelTraining(
'/Users/lucapomer/Documents/bachelor/YogaPoseDetection/old_split_run_through/csv_data_files/train_data_both_with_flipped.csv')
parameters_gaus = {'kernel': [1*RBF(), 1*DotProduct(), 1*Matern(length_scale=1, nu=1.5), 1*RationalQuadratic(), 1*WhiteKernel()]}
parameters_tree = {'max_depth': [5, 6, 7, 8 , 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 30]}
parameters_svc = {'kernel': ('linear', 'rbf', 'poly', 'sigmoid'), 'C': [0.025,0.1, 0.5, 1]}
parameter_mlp = {
'hidden_layer_sizes': [(10,30,10),(20,), 100],
'activation': ['tanh', 'relu', 'identity', 'logistic'],
'solver': ['sgd', 'adam'],
'alpha': [0.0001, 0.05],
'learning_rate': ['constant','adaptive'],'max_iter': [5000, 10000]
}
gaus = GaussianProcessClassifier()
svc = svm.SVC()
mlp = MLPClassifier(max_iter=20000)
tree = DecisionTreeClassifier()
optimal_gauss = best_hyperparameters(parameters_gaus, gaus, all_train_data)
optimal_mlp = best_hyperparameters(parameter_mlp, mlp, all_train_data)
optimal_tree = best_hyperparameters(parameters_tree, tree, all_train_data)
optimal_svc = best_hyperparameters(parameters_svc, svc, all_train_data)
save_info = []
save_info.append(['data', 'old split both'])
save_info.append(['optimal gauss', str(optimal_gauss)])
save_info.append(['optimal mlp', str(optimal_mlp)])
save_info.append(['optimal tree', str(optimal_tree)])
save_info.append(['optimal svc', str(optimal_svc)])
write_multiple_lines(save_info, '/Users/lucapomer/Documents/bachelor/YogaPoseDetection/csv_data_files/hyperparameter_tuning.csv')
|
[
"43848253+LucaPomer@users.noreply.github.com"
] |
43848253+LucaPomer@users.noreply.github.com
|
c79d246f78d0b2e6d4233b6f292eca9186fe7eb2
|
820325f03118f4d3c541c180e7564b578795cbed
|
/projects/compiler/compilation_engine/ExpressionFactory.py
|
fa0cff31519e0354a2c618fea274248a3f4605ff
|
[] |
no_license
|
brendonsoto/the-elements-of-computing-systems-projects
|
e8e33be50cdaab2382a43d16566863e886212a90
|
6cabc9ccbd34fcfc0a838d866b16b276c45fd10b
|
refs/heads/master
| 2021-04-12T03:42:05.114630 | 2018-07-20T16:32:15 | 2018-07-20T16:32:15 | 125,767,572 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,358 |
py
|
from TokenIterator import TokenIterator
unary_operators = '-', '~'
operators = unary_operators + ('+', '-', '*', '/', '&', '|', '<', '>', '=')
keywords = 'true', 'false', 'null', 'this'
'''
get_expression
input: a list of tokens
output: a dictionary representing an expression
'''
def get_expression(tokens):
token_iterator = TokenIterator(tokens)
# Let's assume the currenttoken is at the term
term = token_iterator.getCurrentToken()
expression = { 'node_type': 'expression', }
# Integer constant
if term.isdigit():
expression['expression_type'] = 'integer'
expression['value'] = term
# Keyword constant
elif term in keywords:
expression['expression_type'] = 'keyword'
expression['value'] = term
# String constant -- (str) is used to determine string constants
elif '(str)' in term:
expression['expression_type'] = 'string'
expression['value'] = term.replace('(str) ', '')
# Unary
elif term in unary_operators:
expression = get_unary_expression(token_iterator)
# Expression in parenthesis -- i.e. (5 - 4)
elif term is '(':
expression_tokens = get_inner_expression(token_iterator, ')')
expression['expression_type'] = 'parenthetical'
expression['expressions'] = get_expression(expression_tokens)
# Otherwise we either have an array accessor or a subroutine call
if token_iterator.isThereNextToken():
# First get next token for reference
next_token = token_iterator.getNextToken()
if next_token is '[':
expression_tokens = get_inner_expression(token_iterator, ']')
expression['expression_type'] = 'array_accessor'
expression['array'] = term
expression['accessor'] = get_expression(expression_tokens)
elif next_token is '(':
expression_tokens = get_inner_expression(token_iterator, ')')
expression['expression_type'] = 'subroutine_call'
expression['name'] = term
expression['expression_list'] = get_expression(expression_tokens)
elif next_token is '.':
expression_tokens = get_inner_expression(token_iterator, ')')
expression['expression_type'] = 'subroutine_parent'
expression['class'] = term
expression['expression_list'] = get_expression(expression_tokens)
# TODO Check this out -- see if it's viable for multiple operations
elif next_token in operators:
expression['operator'] = next_token
expression_tokens = get_remainder_of_expression(token_iterator)
expression['operated_value'] = get_expression(expression_tokens)
else:
raise ValueError("Expected a [/(/operator but received {0}".format(next_token))
elif not token_iterator.isThereNextToken() and not 'value' in expression.keys():
expression['term_type'] = 'varName'
expression['value'] = term
return expression
def get_unary_expression(token_iterator):
term = token_iterator.getCurrentToken()
# A unary operation contains an expression, so we'll have to grab everything after the unary operator to get the sub expression
expression_tokens = []
while token_iterator.currentIndex != len(token_iterator.tokens) - 1:
expression_tokens.append(token_iterator.getNextToken())
return {
'node_type': 'expression',
'expression_type': 'unary',
'unary_operator': term,
'value': get_expression(expression_tokens)
}
def get_remainder_of_expression(token_iterator):
expression_tokens = []
while token_iterator.isThereNextToken():
expression_tokens.append(token_iterator.getNextToken())
return expression_tokens
def get_inner_expression(token_iterator, ending_char):
starting_char = token_iterator.getCurrentToken()
indentation_level = 0
expression_tokens = []
while token_iterator.isThereNextToken():
token = token_iterator.getNextToken()
if token is starting_char:
indentation_level += 1
if token is ending_char and indentation_level > 0:
indentation_level -= 1
elif token is ending_char:
break
expression_tokens.append(token)
return expression_tokens
|
[
"bsoto@nhl.com"
] |
bsoto@nhl.com
|
d8c8d2c42a59c105d4f913aba3832a91e2e0b1dc
|
802521e9463d7a99e5a95210b666aea79b9856bc
|
/hello_world.py
|
90a9efa702ba0d0abc98db393068781b808de834
|
[] |
no_license
|
sasha-e-k/git_intro
|
009b00870b12c205be6ab247473769537fa6924c
|
67006b4d0e78a15d83e57c5fd03c41159c8fcaf1
|
refs/heads/master
| 2023-08-15T10:13:43.302372 | 2021-10-13T11:42:25 | 2021-10-13T11:42:25 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 56 |
py
|
# a useful script
print("hello world")
# made in branch
|
[
"qi20129@bristol.ac.uk"
] |
qi20129@bristol.ac.uk
|
2bbd5c8c3393d9a998515b3902ca992a554e3546
|
3ca7e3ec69f5b454a96fcd1b63aba601e829d4e2
|
/fractal_tree.py
|
22ee31ea21f767e8f76b9a10f546f5deb91c3944
|
[] |
no_license
|
pysprings/lsystems
|
2a6b5c6c45355c87ef4d0833b1d8bcd3cd932c72
|
28396c3cdf738216266f8806c5f5652f7f25b6e7
|
refs/heads/master
| 2020-03-17T16:39:37.521017 | 2018-05-17T04:12:17 | 2018-05-17T04:12:17 | 133,757,659 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,746 |
py
|
import numpy as np
from math import pi, sin, cos
import random as rand
from pyglet.gl import *
import sys
AMOUNT_TO_SHRINK = rand.uniform(0.50, 0.75)
# Becareful of setting this too high as it will take longer to create the tree the higher you put it.
# At values higher than 15(2^15 branches) is where you will notice this and it will probably hang for quite some time.
TREE_DEPTH = rand.randint(10, 13)
SIN_MEMOIZED_VALUES = {}
COS_MEMOIZED_VALUES = {}
# Change these RGB colors to your liking to create BEAUTIFUL colored trees.
BRANCH_COLOUR = (101, 67, 33, 101, 67, 33)
BRANCH_LEAF_COLUR = (0, 100, 0, 0, 100, 0)
def memoizedSin(degree):
if degree not in SIN_MEMOIZED_VALUES:
SIN_MEMOIZED_VALUES[degree] = sin(np.deg2rad(degree))
return SIN_MEMOIZED_VALUES[degree]
def memoizedCos(degree):
if degree not in COS_MEMOIZED_VALUES:
COS_MEMOIZED_VALUES[degree] = cos(np.deg2rad(degree))
return COS_MEMOIZED_VALUES[degree]
def rotateVector(vector, degree):
cosAlpha = memoizedCos(degree)
sinAlpha = memoizedSin(degree)
return np.matmul(vector, [[cosAlpha, -sinAlpha], [sinAlpha ,cosAlpha]]) # Rotational counter-clockwise matrix
class Branch:
def __init__(self, begin, end, color):
self.begin = np.array(begin)
self.end = np.array(end)
self.vertices = pyglet.graphics.vertex_list(2, ('v2f', (self.begin[0], self.begin[1], self.end[0] ,self.end[1])),
('c3B', color)
)
def branch(self, degree, color):
dir = self.end - self.begin
dir = rotateVector(dir, degree);
dir = dir * AMOUNT_TO_SHRINK
newEnd = self.end + dir
branch = Branch(self.end, newEnd, color)
return branch
def displayBranch(self):
glLineWidth(2.0)
self.vertices.draw(GL_LINES)
class FractalTree:
def __init__(self, height):
self.branches = []
self.branches.append(Branch([0, -(height / height)], [0, 0], BRANCH_COLOUR))
def createTree(self):
totalBranchesToVisit = int(pow(2, TREE_DEPTH - 1)) - 1
currBranchIndex = 0
while(currBranchIndex < totalBranchesToVisit):
degree = rand.randrange(30, 61)
self.branches.append(self.branches[currBranchIndex].branch(-degree, BRANCH_COLOUR))
self.branches.append(self.branches[currBranchIndex].branch(degree, BRANCH_COLOUR))
currBranchIndex += 1
totalBranches = len(self.branches)
for branchIndex in range(currBranchIndex, totalBranches):
self.branches[branchIndex].vertices.colors = BRANCH_LEAF_COLUR
def displayTree(self):
for branch in self.branches:
branch.displayBranch()
class Window(pyglet.window.Window):
def __init__(self, *args, **kwargs):
if(sys.version_info > (3, 0)):
super().__init__(*args, **kwargs)
else:
super(Window, self).__init__(*args, **kwargs)
self.set_minimum_size(640, 480)
glClearColor(0.5, 0.5, 0.5, 1.0)
glScalef(0.4, 0.4, 0.4)
windowSize = self.get_size()
self.tree = FractalTree(windowSize[1]) # We want the height of the window
self.tree.createTree()
def on_draw(self):
self.clear()
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST)
self.tree.displayTree()
def on_resize(self, width, height):
glViewport(0, 0, width, height)
if __name__ == "__main__":
window = Window(640, 480, "Fractal Trees Demonstration", resizable=True)
pyglet.app.run()
|
[
"ryan.freckleton@gmail.com"
] |
ryan.freckleton@gmail.com
|
7a6ca8b3501bb379c131dccca76387873b61b714
|
3ed490a74e3952dbd5d611b1166ce6e227c9b6cf
|
/a1d05eba1/components/settings.py
|
f69d317a58dbb45737e183bc8b2013371e68a122
|
[
"ISC"
] |
permissive
|
dorey/a1d05eba1
|
771539b5c1e3c11a0d600326fe5d0241b2025f9d
|
eb6f66a946f3c417ab6bf9047ba9715be071967c
|
refs/heads/main
| 2023-08-07T03:54:12.576554 | 2020-08-25T23:36:08 | 2020-08-25T23:36:08 | 273,079,242 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,872 |
py
|
from ..utils.kfrozendict import kfrozendict
from ..utils import kassertfrozen
from ..utils.kfrozendict import shallowfreeze
from ..utils.yparse import yload_file
from ..schema_properties import SETTINGS_PROPERTIES
from .base_component import SurveyComponentWithDict
_standardize_public_key = lambda pk: ''.join(pk.split('\n'))
class Settings(SurveyComponentWithDict):
_pubkey = None
settings_renames_from_1 = yload_file('renames/from1/settings', invert=True)
def load(self):
SKIP_SETTINGS = ['metas', 'default_language']
save = {}
for (key, val) in self.content._data_settings.items():
if key in SKIP_SETTINGS:
continue
if self.content.perform_renames:
key = self.settings_renames_from_1.get(key, key)
if key == 'style' and isinstance(val, str):
if val == '':
continue
val = val.split(' ')
keep_setting = True
strip_uk_setts = self.content.strip_unknown_values
if strip_uk_setts and key not in SETTINGS_PROPERTIES:
keep_setting = False
if keep_setting:
save[key] = val
self._pubkey = save.pop('public_key', None)
if self._pubkey:
self._pubkey = _standardize_public_key(self._pubkey)
self._d = shallowfreeze(save)
@kassertfrozen
def to_frozen_dict(self, export_configs):
out = self._d
if self._pubkey:
out = out.copy(public_key=self._pubkey)
if export_configs.schema == '1' and self.content.initial_tx:
txname = self.content.initial_tx.as_string_or_null()
out = out.copy(default_language=txname)
if export_configs.default_settings:
out = out.copy(**export_configs.default_settings)
return out
|
[
"dorey415@gmail.com"
] |
dorey415@gmail.com
|
b1c06b4f2309fc8aaaf3b6ce7edbcf6c31ace1aa
|
e456ec1f174091a1024dd0ebd7c8f011b3399367
|
/Test.py
|
0045530c15143de17da121041d45b4836a662166
|
[] |
no_license
|
Gummy27/Forritun
|
414e98c0020bdf71a8c2a9b3757ece19c0d01172
|
6a312db6e5a451fac1e6830d7e249663739f15f2
|
refs/heads/master
| 2023-02-15T03:01:00.307741 | 2021-01-07T11:01:37 | 2021-01-07T11:01:37 | 177,342,511 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 57 |
py
|
s1 = "1"
s2 = "2"
print(f'Hérna kemur númerið 1: {1}')
|
[
"brimir27@outlook.com"
] |
brimir27@outlook.com
|
0b4681cbbbd15b1ae82f979dfb0855a484f541fc
|
8e3b452b08139f25be824fae2b8b7aabb158d888
|
/6.00.1.x/Week3/Lecture5/lectureCode_Lec5-towers.py
|
13861370c38b6bf6a8bbf93b0af680633678f9d6
|
[] |
no_license
|
prasannabe2004/MITx
|
d38a11e38a0abb73ffa37dccb363f779011155ab
|
1954b5fc31004c94f46fc8194b7fa773108c4493
|
refs/heads/master
| 2020-05-16T19:14:00.963550 | 2015-08-07T18:50:12 | 2015-08-07T18:50:12 | 25,537,861 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 285 |
py
|
def printMove(fr, to):
print('move from ' + str(fr) + ' to ' + str(to))
def Towers(n, fr, to, spare):
if n == 1:
printMove(fr, to)
else:
Towers(n-1, fr, spare, to)
Towers(1, fr, to, spare)
Towers(n-1, spare, to, fr)
Towers(5, 'f','t','s')
|
[
"prasannabe2004@gmail.com"
] |
prasannabe2004@gmail.com
|
570a9b0f72a1466c0ddd8b2558a7b4b206941056
|
025107917dd1275fd7b5570b8184e6ef8db16b27
|
/mhkit/tests/dolfyn/test_time.py
|
960f7adcad49e399d985981eb15169f249c00417
|
[
"BSD-3-Clause",
"Python-2.0"
] |
permissive
|
MRE-Code-Hub/MHKiT-Python
|
20c61dd8662c045120a9db54f91f99a3dc5b3f42
|
e5916004ff0413f32f9559790582c95193acdd24
|
refs/heads/master
| 2023-08-17T02:56:29.182830 | 2023-02-10T17:56:27 | 2023-02-10T17:56:27 | 253,630,523 | 0 | 0 |
BSD-3-Clause
| 2023-08-31T19:38:20 | 2020-04-06T22:31:16 |
Python
|
UTF-8
|
Python
| false | false | 1,797 |
py
|
from . import test_read_adv as trv
from . import test_read_adp as trp
import mhkit.dolfyn.time as time
from numpy.testing import assert_equal, assert_allclose
import numpy as np
from datetime import datetime
import unittest
class time_testcase(unittest.TestCase):
def test_time_conversion(self):
td = trv.dat_imu.copy(deep=True)
dat_sig = trp.dat_sig_i.copy(deep=True)
dt = time.dt642date(td.time)
dt1 = time.dt642date(td.time[0])
dt_off = time.epoch2date(time.dt642epoch(td.time), offset_hr=-7)
t_str = time.epoch2date(time.dt642epoch(td.time), to_str=True)
assert_equal(dt[0], datetime(2012, 6, 12, 12, 0, 2, 687283))
assert_equal(dt1, [datetime(2012, 6, 12, 12, 0, 2, 687283)])
assert_equal(dt_off[0], datetime(2012, 6, 12, 5, 0, 2, 687283))
assert_equal(t_str[0], '2012-06-12 12:00:02.687283')
# Validated based on data in ad2cp.index file
assert_equal(time.dt642date(dat_sig.time[0])[0],
datetime(2017, 7, 24, 17, 0, 0, 63499))
# This should always be true
assert_equal(time.epoch2date([0])[0], datetime(1970, 1, 1, 0, 0))
def test_datetime(self):
td = trv.dat_imu.copy(deep=True)
dt = time.dt642date(td.time)
epoch = np.array(time.date2epoch(dt))
assert_allclose(time.dt642epoch(td.time.values), epoch, atol=1e-7)
def test_datenum(self):
td = trv.dat_imu.copy(deep=True)
dt = time.dt642date(td.time)
dn = time.date2matlab(dt)
dt2 = time.matlab2date(dn)
epoch = np.array(time.date2epoch(dt2))
assert_allclose(time.dt642epoch(td.time.values), epoch, atol=1e-6)
assert_equal(dn[0], 735032.5000311028)
if __name__ == '__main__':
unittest.main()
|
[
"noreply@github.com"
] |
MRE-Code-Hub.noreply@github.com
|
118d4a7d7f9daf9efaf09bc6317c9ba339801087
|
dd90dceb15ff3552667da8b91210f0fbcc3ee658
|
/src/client.py
|
c5dab9294d51b9937f04a1eea3e07fbb6d1da1d6
|
[] |
no_license
|
faizmaricar/ibtrader
|
10e4e32c762cb988384682b7ac60bb9ad5c854eb
|
8fe3374dbc3077bb38e91fc592e590a196a8c4cd
|
refs/heads/main
| 2023-08-28T23:59:08.550329 | 2021-10-26T08:59:09 | 2021-10-26T08:59:09 | 417,083,922 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 129 |
py
|
from ibapi.client import EClient
class Client(EClient):
def __init__(self, wrapper):
EClient.__init__(self, wrapper)
|
[
"faizmaricar@sistic.com.sg"
] |
faizmaricar@sistic.com.sg
|
e8a046b3192b247fef53416f9af9d159ee31704b
|
95b2c204293341a1033284fbf376a53cf8a7a54e
|
/single/interpret_tree.py
|
74d0217b8933d7fbdb16786017f27e5eb623f18a
|
[] |
no_license
|
amaswea/code
|
17f3ae337c18f6a73b2466d8f6b1638fa1945726
|
c5a0d49be7187a047847f0b69a6ed9c4c18a16fb
|
refs/heads/master
| 2021-09-14T12:34:41.657660 | 2018-05-14T01:13:58 | 2018-05-14T01:13:58 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,758 |
py
|
'''
This is the file where you will implement
your logic for interpreting a Prefab
tree.
The methods here work like event handlers:
each method is executed by Prefab when certain
events occur.
interpret is called when:
Prefab is given a new screenshot to interpret
process_annotations is called when:
(1) An annotation is added/removed
(2) This layer is loaded and it has been
edited after it was previously loaded
'''
from Prefab import *
from PrefabSingle import *
MAX_SPACE_DIST = 10
MAX_ROW_DIST = 10
def interpret(interpret_data):
'''
Implement this method to interpret a
tree that results from the execution
of Prefab.
interpret_data.tree:
This is a mutable object that represents
the current interpretation of a screenshot.
Each node has a dictionary of tags:
tagvalue = tree['tagname']
tree['is_text'] = True
A bounding rectangle:
width = tree.width
height = tree.height
top = tree.top
left = tree.left
A list of children that are contained
spatially within the node's boundary:
children = tree.get_children()
children.add( node )
interpret_data.runtime_storage:
This is a dict where you can access your annotations.
from path_utils import path. Keys to the dict are
path descriptors and values are dicts that contain
metadata about the corresponding tree node.
path = get_path( node )
if path in runtime_storage:
node['is_text'] = runtime_storage[path]['is_text']
'''
find_leaves_tag_as_text(interpret_data.tree)
apply_text_corrections(interpret_data, interpret_data.tree)
from tag_widget_type import recursively_tag_group_next, recursively_apply_groups
recursively_tag_group_next(interpret_data.tree)
recursively_apply_groups(interpret_data.tree)
def find_leaves_tag_as_text(currnode):
'''
Recursively walk through tree and tag leaves as text.
'''
if len( currnode.get_children() ) == 0:
currnode['is_text'] = True
for child in currnode.get_children():
find_leaves_tag_as_text(child)
def apply_text_corrections(interpret_data, currnode):
'''
Walk through tree and use path descriptors to
overwrite erroneous tags.
'''
from path_utils import get_path
#importing the method to compute path descriptors
path = get_path(currnode, interpret_data.tree)
#computing the path descriptor for the current node
if path in interpret_data.runtime_storage:
#if there is an annotation for this node and it has 'is_text' metadata
#then tag the node with that data
if 'is_text' in interpret_data.runtime_storage[path]:
correction = interpret_data.runtime_storage[path]['is_text']
currnode['is_text'] = correction
#recurse on the children of currnode
for child in currnode.get_children():
apply_text_corrections(interpret_data, child)
|
[
"mdixon.github@gmail.com"
] |
mdixon.github@gmail.com
|
3eca144dbdf237601709ad5381d9aff1751bd7d8
|
6730145fe637cc91c3bfd85f1551d6fa942de370
|
/Content Sharing App/node_server.py
|
e0c2a35e43ef217146f2569a86b814a995e8b416
|
[] |
no_license
|
yasasmahima/Blockchain-Learning
|
c31522d5eb5b6061eb83bd596ceee1ac36d08d06
|
cafa2b2d58c3f22bff0b06a81e0450da5aa11cda
|
refs/heads/master
| 2021-05-22T22:47:23.089031 | 2020-10-02T02:03:07 | 2020-10-02T02:03:07 | 253,130,573 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 10,058 |
py
|
from hashlib import sha256
import json
import time
from flask import Flask, request
import requests
class Block:
def __init__(self, index, transactions, timestamp, previous_hash, nonce=0):
self.index = index
self.transactions = transactions
self.timestamp = timestamp
self.previous_hash = previous_hash
self.nonce = nonce
def compute_hash(self):
"""
A function that return the hash of the block contents.
"""
block_string = json.dumps(self.__dict__, sort_keys=True)
return sha256(block_string.encode()).hexdigest()
class Blockchain:
# difficulty of our PoW algorithm
difficulty = 2
def __init__(self):
self.unconfirmed_transactions = []
self.chain = []
def create_genesis_block(self):
"""
A function to generate genesis block and appends it to
the chain. The block has index 0, previous_hash as 0, and
a valid hash.
"""
genesis_block = Block(0, [], 0, "0")
genesis_block.hash = genesis_block.compute_hash()
self.chain.append(genesis_block)
@property
def last_block(self):
return self.chain[-1]
def add_block(self, block, proof):
"""
A function that adds the block to the chain after verification.
Verification includes:
* Checking if the proof is valid.
* The previous_hash referred in the block and the hash of latest block
in the chain match.
"""
previous_hash = self.last_block.hash
if previous_hash != block.previous_hash:
return False
if not Blockchain.is_valid_proof(block, proof):
return False
block.hash = proof
self.chain.append(block)
return True
@staticmethod
def proof_of_work(block):
"""
Function that tries different values of nonce to get a hash
that satisfies our difficulty criteria.
"""
block.nonce = 0
computed_hash = block.compute_hash()
while not computed_hash.startswith('0' * Blockchain.difficulty):
block.nonce += 1
computed_hash = block.compute_hash()
return computed_hash
def add_new_transaction(self, transaction):
self.unconfirmed_transactions.append(transaction)
@classmethod
def is_valid_proof(cls, block, block_hash):
"""
Check if block_hash is valid hash of block and satisfies
the difficulty criteria.
"""
return (block_hash.startswith('0' * Blockchain.difficulty) and
block_hash == block.compute_hash())
@classmethod
def check_chain_validity(cls, chain):
result = True
previous_hash = "0"
for block in chain:
block_hash = block.hash
# remove the hash field to recompute the hash again
# using `compute_hash` method.
delattr(block, "hash")
if not cls.is_valid_proof(block, block_hash) or \
previous_hash != block.previous_hash:
result = False
break
block.hash, previous_hash = block_hash, block_hash
return result
def mine(self):
"""
This function serves as an interface to add the pending
transactions to the blockchain by adding them to the block
and figuring out Proof Of Work.
"""
if not self.unconfirmed_transactions:
return False
last_block = self.last_block
new_block = Block(index=last_block.index + 1,
transactions=self.unconfirmed_transactions,
timestamp=time.time(),
previous_hash=last_block.hash)
proof = self.proof_of_work(new_block)
self.add_block(new_block, proof)
self.unconfirmed_transactions = []
return True
app = Flask(__name__)
# the node's copy of blockchain
blockchain = Blockchain()
blockchain.create_genesis_block()
# the address to other participating members of the network
peers = set()
# endpoint to submit a new transaction. This will be used by
# our application to add new data (posts) to the blockchain
@app.route('/new_transaction', methods=['POST'])
def new_transaction():
tx_data = request.get_json()
required_fields = ["author", "content"]
for field in required_fields:
if not tx_data.get(field):
return "Invalid transaction data", 404
tx_data["timestamp"] = time.time()
blockchain.add_new_transaction(tx_data)
return "Success", 201
# endpoint to return the node's copy of the chain.
# Our application will be using this endpoint to query
# all the posts to display.
@app.route('/chain', methods=['GET'])
def get_chain():
chain_data = []
for block in blockchain.chain:
chain_data.append(block.__dict__)
return json.dumps({"length": len(chain_data),
"chain": chain_data,
"peers": list(peers)})
# endpoint to request the node to mine the unconfirmed
# transactions (if any). We'll be using it to initiate
# a command to mine from our application itself.
@app.route('/mine', methods=['GET'])
def mine_unconfirmed_transactions():
result = blockchain.mine()
if not result:
return "No transactions to mine"
else:
# Making sure we have the longest chain before announcing to the network
chain_length = len(blockchain.chain)
consensus()
if chain_length == len(blockchain.chain):
# announce the recently mined block to the network
announce_new_block(blockchain.last_block)
return "Block #{} is mined.".format(blockchain.last_block.index)
# endpoint to add new peers to the network.
@app.route('/register_node', methods=['POST'])
def register_new_peers():
node_address = request.get_json()["node_address"]
if not node_address:
return "Invalid data", 400
# Add the node to the peer list
peers.add(node_address)
# Return the consensus blockchain to the newly registered node
# so that he can sync
return get_chain()
@app.route('/register_with', methods=['POST'])
def register_with_existing_node():
"""
Internally calls the `register_node` endpoint to
register current node with the node specified in the
request, and sync the blockchain as well as peer data.
"""
node_address = request.get_json()["node_address"]
if not node_address:
return "Invalid data", 400
data = {"node_address": request.host_url}
headers = {'Content-Type': "application/json"}
# Make a request to register with remote node and obtain information
response = requests.post(node_address + "/register_node",
data=json.dumps(data), headers=headers)
if response.status_code == 200:
global blockchain
global peers
# update chain and the peers
chain_dump = response.json()['chain']
blockchain = create_chain_from_dump(chain_dump)
peers.update(response.json()['peers'])
return "Registration successful", 200
else:
# if something goes wrong, pass it on to the API response
return response.content, response.status_code
def create_chain_from_dump(chain_dump):
generated_blockchain = Blockchain()
generated_blockchain.create_genesis_block()
for idx, block_data in enumerate(chain_dump):
if idx == 0:
continue # skip genesis block
block = Block(block_data["index"],
block_data["transactions"],
block_data["timestamp"],
block_data["previous_hash"],
block_data["nonce"])
proof = block_data['hash']
added = generated_blockchain.add_block(block, proof)
if not added:
raise Exception("The chain dump is tampered!!")
return generated_blockchain
# endpoint to add a block mined by someone else to
# the node's chain. The block is first verified by the node
# and then added to the chain.
@app.route('/add_block', methods=['POST'])
def verify_and_add_block():
block_data = request.get_json()
block = Block(block_data["index"],
block_data["transactions"],
block_data["timestamp"],
block_data["previous_hash"],
block_data["nonce"])
proof = block_data['hash']
added = blockchain.add_block(block, proof)
if not added:
return "The block was discarded by the node", 400
return "Block added to the chain", 201
# endpoint to query unconfirmed transactions
@app.route('/pending_tx')
def get_pending_tx():
return json.dumps(blockchain.unconfirmed_transactions)
def consensus():
"""
Our naive consnsus algorithm. If a longer valid chain is
found, our chain is replaced with it.
"""
global blockchain
longest_chain = None
current_len = len(blockchain.chain)
for node in peers:
response = requests.get('{}chain'.format(node))
length = response.json()['length']
chain = response.json()['chain']
if length > current_len and blockchain.check_chain_validity(chain):
current_len = length
longest_chain = chain
if longest_chain:
blockchain = longest_chain
return True
return False
def announce_new_block(block):
"""
A function to announce to the network once a block has been mined.
Other blocks can simply verify the proof of work and add it to their
respective chains.
"""
for peer in peers:
url = "{}add_block".format(peer)
headers = {'Content-Type': "application/json"}
requests.post(url,
data=json.dumps(block.__dict__, sort_keys=True),
headers=headers)
# Uncomment this line if you want to specify the port number in the code
#app.run(debug=True, port=8000)
|
[
"yasasymahima@gmail.com"
] |
yasasymahima@gmail.com
|
5bcf6011e0f7c74069e6d1d8c3ec7e61f97c9de6
|
eccb5ca401b3a737b7c88a417cd0e1fbd916b6db
|
/settings.py
|
2aa76360139244baf5934b12a0e8d562d1af3248
|
[] |
no_license
|
andreychyuko/guestbook
|
b07c5dc066ec232d70fb6826514479423a172f76
|
7b322fa21e7d5d6fbd70dae0c22653ece8dbcc16
|
refs/heads/master
| 2022-12-22T22:53:46.072879 | 2020-09-23T16:18:15 | 2020-09-23T16:18:15 | 272,920,818 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,478 |
py
|
import os
from dotenv import load_dotenv
load_dotenv()
# COMMON
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
print(BASE_DIR)
DEBUG = os.getenv('DEBUG') == '1'
SECRET_KEY = 'td=222=rhm2r(!f#h+k6ocz&+@#o0tg1n$z$a08b#s!l3*p_89'
ALLOWED_HOSTS = ['andreychuyko.ru']
# POSTGRES
POSTGRES_DB = os.getenv('POSTGRES_DB')
POSTGRES_USER = os.getenv('POSTGRES_USER')
POSTGRES_PASSWORD = os.getenv('POSTGRES_PASSWORD')
POSTGRES_HOST = os.getenv('POSTGRES_HOST')
POSTGRES_PORT = os.getenv('POSTGRES_PORT')
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'news.apps.NewsConfig',
'meal'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'guestbook.urls'
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"OPTIONS": {
"context_processors": [
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
"django.template.context_processors.csrf",
"django.template.context_processors.debug",
"django.template.context_processors.i18n",
"django.template.context_processors.media",
"django.template.context_processors.media",
"django.template.context_processors.static",
"django.template.context_processors.request",
],
"loaders": [ "django.template.loaders.filesystem.Loader", "django.template.loaders.app_directories.Loader"],
},
"DIRS": [os.path.join(BASE_DIR, "guestbook", 'templates', )],
},
]
WSGI_APPLICATION = 'guestbook.wsgi.application'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': POSTGRES_DB,
'USER': POSTGRES_USER,
'PASSWORD': POSTGRES_PASSWORD,
'HOST': POSTGRES_HOST,
'PORT': POSTGRES_PORT,
}
}
print(DATABASES)
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'ru'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'guestbook/static')
]
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
MEDIA_URL = '/media/'
|
[
"andreychyuko@gmail.com"
] |
andreychyuko@gmail.com
|
97154d734ad322d59ad5c14a9e6ac7dc4eee02e5
|
90eb57f3c0eb3a39144038d144d3fab43ea09299
|
/flaskr/base_xss/models.py
|
9bb065b0dab8db733471a642f3eac82286f52dc3
|
[] |
no_license
|
minight/comp6443-lecturectf-xss
|
2e096b190d3475d76ee1c47a4ee2aa8da0280c8c
|
23a30efc512baff1248353e73ed21d2674f7f5bf
|
refs/heads/master
| 2021-01-23T01:34:57.602922 | 2017-03-23T07:38:02 | 2017-03-23T07:38:02 | 85,920,408 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 207 |
py
|
from ..main import db
class Xss(db.Model):
id = db.Column(db.Integer(), primary_key=True)
title = db.Column(db.UnicodeText(500), index=True)
content = db.Column(db.UnicodeText(500), index=True)
|
[
"sean.yeoh1@gmail.com"
] |
sean.yeoh1@gmail.com
|
a8f609b70b6b1dd02c881af65a5d5389e0591310
|
4faeb88e4451dc5f0e7e2314a6323beb630d99f0
|
/src/run.py
|
fdc2bfb2c8c4a7d2ec77cc9e7306f738a333f802
|
[
"MIT"
] |
permissive
|
Fitilbam/TC-Bot
|
17ee2d6ac01a29733c297a494e94e8110d4a8072
|
89deb4c0ba94e25bb44eff5c05eaab9125bf9254
|
refs/heads/master
| 2021-01-23T06:05:26.546654 | 2017-03-14T20:43:20 | 2017-03-14T20:43:20 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 20,414 |
py
|
"""
Created on May 22, 2016
This should be a simple minimalist run file. It's only responsibility should be to parse the arguments (which agent, user simulator to use) and launch a dialog simulation.
Rule-agent: python run.py --agt 6 --usr 1 --max_turn 40 --episodes 150 --movie_kb_path .\deep_dialog\data\movie_kb.1k.p --run_mode 2
movie_kb:
movie_kb.1k.p: 94% success rate
movie_kb.v2.p: 36% success rate
user goal files:
first turn: user_goals_first_turn_template.v2.p
all turns: user_goals_all_turns_template.p
user_goals_first_turn_template.part.movie.v1.p: a subset of user goal. [Please use this one, the upper bound success rate on movie_kb.1k.json is 0.9765.]
Commands:
Rule: python run.py --agt 5 --usr 1 --max_turn 40 --episodes 150 --movie_kb_path .\deep_dialog\data\movie_kb.1k.p --goal_file_path .\deep_dialog\data\user_goals_first_turn_template.part.movie.v1.p --intent_err_prob 0.00 --slot_err_prob 0.00 --episodes 500 --act_level 1 --run_mode 1
Training:
RL: python run.py --agt 9 --usr 1 --max_turn 40 --movie_kb_path .\deep_dialog\data\movie_kb.1k.p --dqn_hidden_size 80 --experience_replay_pool_size 1000 --episodes 500 --simulation_epoch_size 100 --write_model_dir .\deep_dialog\checkpoints\rl_agent\ --run_mode 3 --act_level 0 --slot_err_prob 0.05 --intent_err_prob 0.00 --batch_size 16 --goal_file_path .\deep_dialog\data\user_goals_first_turn_template.part.movie.v1.p --warm_start 1 --warm_start_epochs 120
Predict:
RL: python run.py --agt 9 --usr 1 --max_turn 40 --movie_kb_path .\deep_dialog\data\movie_kb.1k.p --dqn_hidden_size 80 --experience_replay_pool_size 1000 --episodes 300 --simulation_epoch_size 100 --write_model_dir .\deep_dialog\checkpoints\rl_agent\ --slot_err_prob 0.00 --intent_err_prob 0.00 --batch_size 16 --goal_file_path .\deep_dialog\data\user_goals_first_turn_template.part.movie.v1.p --episodes 200 --trained_model_path .\deep_dialog\checkpoints\rl_agent\agt_9_22_30_0.37000.p --run_mode 3
@author: xiul, t-zalipt
"""
import argparse, json, copy, os
import cPickle as pickle
from deep_dialog.dialog_system import DialogManager, text_to_dict
from deep_dialog.agents import AgentCmd, InformAgent, RequestAllAgent, RandomAgent, EchoAgent, RequestBasicsAgent, AgentDQN
from deep_dialog.usersims import RuleSimulator
from deep_dialog import dialog_config
from deep_dialog.dialog_config import *
from deep_dialog.nlu import nlu
from deep_dialog.nlg import nlg
"""
Launch a dialog simulation per the command line arguments
This function instantiates a user_simulator, an agent, and a dialog system.
Next, it triggers the simulator to run for the specified number of episodes.
"""
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--dict_path', dest='dict_path', type=str, default='./deep_dialog/data/dicts.v3.p', help='path to the .json dictionary file')
parser.add_argument('--movie_kb_path', dest='movie_kb_path', type=str, default='./deep_dialog/data/movie_kb.1k.p', help='path to the movie kb .json file')
parser.add_argument('--act_set', dest='act_set', type=str, default='./deep_dialog/data/dia_acts.txt', help='path to dia act set; none for loading from labeled file')
parser.add_argument('--slot_set', dest='slot_set', type=str, default='./deep_dialog/data/slot_set.txt', help='path to slot set; none for loading from labeled file')
parser.add_argument('--goal_file_path', dest='goal_file_path', type=str, default='./deep_dialog/data/user_goals_first_turn_template.part.movie.v1.p', help='a list of user goals')
parser.add_argument('--diaact_nl_pairs', dest='diaact_nl_pairs', type=str, default='./deep_dialog/data/dia_act_nl_pairs.v6.json', help='path to the pre-defined dia_act&NL pairs')
parser.add_argument('--max_turn', dest='max_turn', default=20, type=int, help='maximum length of each dialog (default=20, 0=no maximum length)')
parser.add_argument('--episodes', dest='episodes', default=1, type=int, help='Total number of episodes to run (default=1)')
parser.add_argument('--slot_err_prob', dest='slot_err_prob', default=0.05, type=float, help='the slot err probability')
parser.add_argument('--slot_err_mode', dest='slot_err_mode', default=0, type=int, help='slot_err_mode: 0 for slot_val only; 1 for three errs')
parser.add_argument('--intent_err_prob', dest='intent_err_prob', default=0.05, type=float, help='the intent err probability')
parser.add_argument('--agt', dest='agt', default=0, type=int, help='Select an agent: 0 for a command line input, 1-6 for rule based agents')
parser.add_argument('--usr', dest='usr', default=0, type=int, help='Select a user simulator. 0 is a Frozen user simulator.')
parser.add_argument('--epsilon', dest='epsilon', type=float, default=0, help='Epsilon to determine stochasticity of epsilon-greedy agent policies')
# load NLG & NLU model
parser.add_argument('--nlg_model_path', dest='nlg_model_path', type=str, default='./deep_dialog/models/nlg/lstm_tanh_relu_[1468202263.38]_2_0.610.p', help='path to model file')
parser.add_argument('--nlu_model_path', dest='nlu_model_path', type=str, default='./deep_dialog/models/nlu/lstm_[1468447442.91]_39_80_0.921.p', help='path to the NLU model file')
parser.add_argument('--act_level', dest='act_level', type=int, default=0, help='0 for dia_act level; 1 for NL level')
parser.add_argument('--run_mode', dest='run_mode', type=int, default=0, help='run_mode: 0 for default NL; 1 for dia_act; 2 for both')
parser.add_argument('--auto_suggest', dest='auto_suggest', type=int, default=0, help='0 for no auto_suggest; 1 for auto_suggest')
parser.add_argument('--cmd_input_mode', dest='cmd_input_mode', type=int, default=0, help='run_mode: 0 for NL; 1 for dia_act')
# RL agent parameters
parser.add_argument('--experience_replay_pool_size', dest='experience_replay_pool_size', type=int, default=1000, help='the size for experience replay')
parser.add_argument('--dqn_hidden_size', dest='dqn_hidden_size', type=int, default=60, help='the hidden size for DQN')
parser.add_argument('--batch_size', dest='batch_size', type=int, default=16, help='batch size')
parser.add_argument('--gamma', dest='gamma', type=float, default=0.9, help='gamma for DQN')
parser.add_argument('--predict_mode', dest='predict_mode', type=bool, default=False, help='predict model for DQN')
parser.add_argument('--simulation_epoch_size', dest='simulation_epoch_size', type=int, default=50, help='the size of validation set')
parser.add_argument('--warm_start', dest='warm_start', type=int, default=1, help='0: no warm start; 1: warm start for training')
parser.add_argument('--warm_start_epochs', dest='warm_start_epochs', type=int, default=100, help='the number of epochs for warm start')
parser.add_argument('--trained_model_path', dest='trained_model_path', type=str, default=None, help='the path for trained model')
parser.add_argument('-o', '--write_model_dir', dest='write_model_dir', type=str, default='./deep_dialog/checkpoints/', help='write model to disk')
parser.add_argument('--save_check_point', dest='save_check_point', type=int, default=10, help='number of epochs for saving model')
parser.add_argument('--success_rate_threshold', dest='success_rate_threshold', type=float, default=0.3, help='the threshold for success rate')
args = parser.parse_args()
params = vars(args)
print 'Dialog Parameters: '
print json.dumps(params, indent=2)
max_turn = params['max_turn']
num_episodes = params['episodes']
agt = params['agt']
usr = params['usr']
dict_path = params['dict_path']
goal_file_path = params['goal_file_path']
# load the user goals from .p file
goal_set = pickle.load(open(goal_file_path, 'rb'))
movie_kb_path = params['movie_kb_path']
movie_kb = pickle.load(open(movie_kb_path, 'rb'))
act_set = text_to_dict(params['act_set'])
slot_set = text_to_dict(params['slot_set'])
################################################################################
# a movie dictionary for user simulator - slot:possible values
################################################################################
movie_dictionary = pickle.load(open(dict_path, 'rb'))
dialog_config.run_mode = params['run_mode']
dialog_config.auto_suggest = params['auto_suggest']
################################################################################
# Parameters for Agents
################################################################################
agent_params = {}
agent_params['max_turn'] = max_turn
agent_params['epsilon'] = params['epsilon']
agent_params['agent_run_mode'] = params['run_mode']
agent_params['agent_act_level'] = params['act_level']
agent_params['experience_replay_pool_size'] = params['experience_replay_pool_size']
agent_params['dqn_hidden_size'] = params['dqn_hidden_size']
agent_params['batch_size'] = params['batch_size']
agent_params['gamma'] = params['gamma']
agent_params['predict_mode'] = params['predict_mode']
agent_params['trained_model_path'] = params['trained_model_path']
agent_params['warm_start'] = params['warm_start']
agent_params['cmd_input_mode'] = params['cmd_input_mode']
if agt == 0:
agent = AgentCmd(movie_kb, act_set, slot_set, agent_params)
elif agt == 1:
agent = InformAgent(movie_kb, act_set, slot_set, agent_params)
elif agt == 2:
agent = RequestAllAgent(movie_kb, act_set, slot_set, agent_params)
elif agt == 3:
agent = RandomAgent(movie_kb, act_set, slot_set, agent_params)
elif agt == 4:
agent = EchoAgent(movie_kb, act_set, slot_set, agent_params)
elif agt == 5:
agent = RequestBasicsAgent(movie_kb, act_set, slot_set, agent_params)
elif agt == 9:
agent = AgentDQN(movie_kb, act_set, slot_set, agent_params)
################################################################################
# Add your agent here
################################################################################
else:
pass
################################################################################
# Parameters for User Simulators
################################################################################
usersim_params = {}
usersim_params['max_turn'] = max_turn
usersim_params['slot_err_probability'] = params['slot_err_prob']
usersim_params['slot_err_mode'] = params['slot_err_mode']
usersim_params['intent_err_probability'] = params['intent_err_prob']
usersim_params['simulator_run_mode'] = params['run_mode']
usersim_params['simulator_act_level'] = params['act_level']
if usr == 0:# real user
user_sim = RealUser(movie_dictionary, act_set, slot_set, goal_set, usersim_params)
elif usr == 1:
user_sim = RuleSimulator(movie_dictionary, act_set, slot_set, goal_set, usersim_params)
################################################################################
# Add your user simulator here
################################################################################
else:
pass
################################################################################
# load trained NLG model
################################################################################
nlg_model_path = params['nlg_model_path']
diaact_nl_pairs = params['diaact_nl_pairs']
nlg_model = nlg()
nlg_model.load_nlg_model(nlg_model_path)
nlg_model.load_predefine_act_nl_pairs(diaact_nl_pairs)
agent.set_nlg_model(nlg_model)
user_sim.set_nlg_model(nlg_model)
################################################################################
# load trained NLU model
################################################################################
nlu_model_path = params['nlu_model_path']
nlu_model = nlu()
nlu_model.load_nlu_model(nlu_model_path)
agent.set_nlu_model(nlu_model)
user_sim.set_nlu_model(nlu_model)
################################################################################
# Dialog Manager
################################################################################
dialog_manager = DialogManager(agent, user_sim, act_set, slot_set, movie_kb)
################################################################################
# Run num_episodes Conversation Simulations
################################################################################
status = {'successes': 0, 'count': 0, 'cumulative_reward': 0}
simulation_epoch_size = params['simulation_epoch_size']
batch_size = params['batch_size'] # default = 16
warm_start = params['warm_start']
warm_start_epochs = params['warm_start_epochs']
success_rate_threshold = params['success_rate_threshold']
save_check_point = params['save_check_point']
""" Best Model and Performance Records """
best_model = {}
best_res = {'success_rate': 0, 'ave_reward':float('-inf'), 'ave_turns': float('inf'), 'epoch':0}
best_model['model'] = copy.deepcopy(agent)
best_res['success_rate'] = 0
performance_records = {}
performance_records['success_rate'] = {}
performance_records['ave_turns'] = {}
performance_records['ave_reward'] = {}
""" Save model """
def save_model(path, agt, success_rate, agent, best_epoch, cur_epoch):
filename = 'agt_%s_%s_%s_%.5f.p' % (agt, best_epoch, cur_epoch, success_rate)
filepath = os.path.join(path, filename)
checkpoint = {}
if agt == 9: checkpoint['model'] = copy.deepcopy(agent.dqn.model)
checkpoint['params'] = params
try:
pickle.dump(checkpoint, open(filepath, "wb"))
print 'saved model in %s' % (filepath, )
except Exception, e:
print 'Error: Writing model fails: %s' % (filepath, )
print e
""" save performance numbers """
def save_performance_records(path, agt, records):
filename = 'agt_%s_performance_records.json' % (agt)
filepath = os.path.join(path, filename)
try:
json.dump(records, open(filepath, "wb"))
print 'saved model in %s' % (filepath, )
except Exception, e:
print 'Error: Writing model fails: %s' % (filepath, )
print e
""" Run N simulation Dialogues """
def simulation_epoch(simulation_epoch_size):
successes = 0
cumulative_reward = 0
cumulative_turns = 0
res = {}
for episode in xrange(simulation_epoch_size):
dialog_manager.initialize_episode()
episode_over = False
while(not episode_over):
episode_over, reward = dialog_manager.next_turn()
cumulative_reward += reward
if episode_over:
if reward > 0:
successes += 1
print ("simulation episode %s: Success" % (episode))
else: print ("simulation episode %s: Fail" % (episode))
cumulative_turns += dialog_manager.state_tracker.turn_count
res['success_rate'] = float(successes)/simulation_epoch_size
res['ave_reward'] = float(cumulative_reward)/simulation_epoch_size
res['ave_turns'] = float(cumulative_turns)/simulation_epoch_size
print ("simulation success rate %s, ave reward %s, ave turns %s" % (res['success_rate'], res['ave_reward'], res['ave_turns']))
return res
""" Warm_Start Simulation (by Rule Policy) """
def warm_start_simulation():
successes = 0
cumulative_reward = 0
cumulative_turns = 0
res = {}
for episode in xrange(warm_start_epochs):
dialog_manager.initialize_episode()
episode_over = False
while(not episode_over):
episode_over, reward = dialog_manager.next_turn()
cumulative_reward += reward
if episode_over:
if reward > 0:
successes += 1
print ("warm_start simulation episode %s: Success" % (episode))
else: print ("warm_start simulation episode %s: Fail" % (episode))
cumulative_turns += dialog_manager.state_tracker.turn_count
if len(agent.experience_replay_pool) >= agent.experience_replay_pool_size:
break
agent.warm_start = 2
res['success_rate'] = float(successes)/simulation_epoch_size
res['ave_reward'] = float(cumulative_reward)/simulation_epoch_size
res['ave_turns'] = float(cumulative_turns)/simulation_epoch_size
print ("Warm_Start %s epochs, success rate %s, ave reward %s, ave turns %s" % (episode+1, res['success_rate'], res['ave_reward'], res['ave_turns']))
print ("Current experience replay buffer size %s" % (len(agent.experience_replay_pool)))
def run_episodes(count, status):
successes = 0
cumulative_reward = 0
cumulative_turns = 0
if agt == 9 and params['trained_model_path'] == None and warm_start == 1:
print ('warm_start starting ...')
warm_start_simulation()
print ('warm_start finished, start RL training ...')
for episode in xrange(count):
print ("Episode: %s" % (episode))
dialog_manager.initialize_episode()
episode_over = False
while(not episode_over):
episode_over, reward = dialog_manager.next_turn()
cumulative_reward += reward
if episode_over:
if reward > 0:
print ("Successful Dialog!")
successes += 1
else: print ("Failed Dialog!")
cumulative_turns += dialog_manager.state_tracker.turn_count
# simulation
if agt == 9 and params['trained_model_path'] == None:
agent.predict_mode = True
simulation_res = simulation_epoch(simulation_epoch_size)
performance_records['success_rate'][episode] = simulation_res['success_rate']
performance_records['ave_turns'][episode] = simulation_res['ave_turns']
performance_records['ave_reward'][episode] = simulation_res['ave_reward']
if simulation_res['success_rate'] >= best_res['success_rate']:
if simulation_res['success_rate'] >= success_rate_threshold: # threshold = 0.30
agent.experience_replay_pool = []
simulation_epoch(simulation_epoch_size)
if simulation_res['success_rate'] > best_res['success_rate']:
best_model['model'] = copy.deepcopy(agent)
best_res['success_rate'] = simulation_res['success_rate']
best_res['ave_reward'] = simulation_res['ave_reward']
best_res['ave_turns'] = simulation_res['ave_turns']
best_res['epoch'] = episode
agent.clone_dqn = copy.deepcopy(agent.dqn)
agent.train(batch_size, 1)
agent.predict_mode = False
print ("Simulation success rate %s, Ave reward %s, Ave turns %s, Best success rate %s" % (performance_records['success_rate'][episode], performance_records['ave_reward'][episode], performance_records['ave_turns'][episode], best_res['success_rate']))
if episode % save_check_point == 0 and params['trained_model_path'] == None: # save the model every 10 episodes
save_model(params['write_model_dir'], agt, best_res['success_rate'], best_model['model'], best_res['epoch'], episode)
save_performance_records(params['write_model_dir'], agt, performance_records)
print("Progress: %s / %s, Success rate: %s / %s Avg reward: %.2f Avg turns: %.2f" % (episode+1, count, successes, episode+1, float(cumulative_reward)/(episode+1), float(cumulative_turns)/(episode+1)))
print("Success rate: %s / %s Avg reward: %.2f Avg turns: %.2f" % (successes, count, float(cumulative_reward)/count, float(cumulative_turns)/count))
status['successes'] += successes
status['count'] += count
if agt == 9 and params['trained_model_path'] == None:
save_model(params['write_model_dir'], agt, float(successes)/count, best_model['model'], best_res['epoch'], count)
save_performance_records(params['write_model_dir'], agt, performance_records)
run_episodes(num_episodes, status)
|
[
"yvchen@linux7.csie.ntu.edu.tw"
] |
yvchen@linux7.csie.ntu.edu.tw
|
2e9e653a3ba5f6b2d39e8bc2a9b81531627f0d53
|
be5c86e8fe3f5836b7d2097dd5272c72b5b28f15
|
/binary-search/Python/0069-sqrtx(调试代码).py
|
34fb4dc1e8fd789b231dfc3dc042a189448bc516
|
[
"Apache-2.0"
] |
permissive
|
lemonnader/LeetCode-Solution-Well-Formed
|
d24674898ceb5441c036016dc30afc58e4a1247a
|
baabdb1990fd49ab82a712e121f49c4f68b29459
|
refs/heads/master
| 2021-04-23T18:49:40.337569 | 2020-03-24T04:50:27 | 2020-03-24T04:50:27 | 249,972,064 | 1 | 0 |
Apache-2.0
| 2020-03-25T12:26:25 | 2020-03-25T12:26:24 | null |
UTF-8
|
Python
| false | false | 1,303 |
py
|
class Solution:
def mySqrt(self, x: int) -> int:
if x == 0:
return 0
left = 1
right = x // 2
while left < right:
# 调试代码开始:为了仔细观察区间左右端点,我们每进入一次循环,让线程休眠 1 秒
import time
time.sleep(1)
print('调试代码,观察区间左右端点、中位数,和进入的分支: left = {} , right = {} , '.format(left, right), end='')
# 调试代码结束
# 错误代码,在分支左区间不发生收缩的情况下,中位数应该取右中位数
# mid = left + (right - left) // 2
mid = (left + right) >> 1
# 调试代码
print('mid = {} ,'.format(mid), end=' ')
square = mid * mid
if square > x:
# 调试代码
print('进入 right = mid - 1 这个分支。')
right = mid - 1
else:
# 调试代码
print('进入 left = mid 这个分支。')
left = mid
return left
if __name__ == '__main__':
# 当 x = 8 的时候,代码能得出正确答案
x = 9
solution = Solution()
res = solution.mySqrt(x)
print(res)
|
[
"121088825@qq.com"
] |
121088825@qq.com
|
06c0dbfe0bbb879d1f90f6a8109b9356f8922abb
|
e9e4182ab5c6fc07b9090923b85b487a8d140be4
|
/ilp/database/__init__.py
|
165a886de5ca9776041c41459074ee17ed2b732a
|
[] |
no_license
|
slowpoketail/ilp
|
2c68cd48c560e6e8e24fa99fb125519f3d297ed8
|
db1681aede0c507003453c055a0427b9b9659082
|
refs/heads/master
| 2020-04-17T17:13:22.839052 | 2015-07-06T16:20:21 | 2015-07-06T16:20:21 | 38,626,930 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 311 |
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Kirschwasser - a tag based file indexer
#
# Author: slowpoke <mail+git@slowpoke.io>
#
# This program is Free Software under the non-terms
# of the Anti-License. Do whatever the fuck you want.
from ._api import DatabaseAPI as API
from ._shelvedb import ShelveDB
|
[
"mail+git@slowpoke.io"
] |
mail+git@slowpoke.io
|
21e37b4f7a6e38423629ff7f88949c775997a74a
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02536/s375378291.py
|
93ecd40e03e6ad422973faf79ca95508b26c6569
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,207 |
py
|
import sys
sys.setrecursionlimit(10 ** 9)
class UnionFind():
def __init__(self, n):
self.n = n
self.root = [-1]*(n+1)
self.rank = [0]*(n+1)
def find(self, x):#親となる要素を探索
if self.root[x] < 0:
return x
else:
self.root[x] = self.find(self.root[x])#再帰
return self.root[x]
def unite(self, x, y):
x = self.find(x)
y = self.find(y)
if x == y:
return
elif self.rank[x] > self.rank[y]:#深い木に連結
self.root[x] += self.root[y]
self.root[y] = x#yの親をxとする
else:
self.root[y] += self.root[x]
self.root[x] = y
if self.rank[x] == self.rank[y]:
self.rank[y] += 1
def issame(self, x, y):#x, yが同じ集合か判定
return self.find(x) == self.find(y)
def count(self, x):#要素の個数
return (-1)*self.root[self.find(x)]
n, m = map(int, input().split())
uf = UnionFind(n)
for i in range(m):
a, b = map(int, input().split())
uf.unite(a-1, b-1)
ans = set()
for i in range(n):
ans.add(uf.find(i))
print(len(ans)-1)
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
c1f51bda93c447b50b5b61dece51c0c3bc2b7faf
|
044bb913a4d1e1056b1e5f7a4c127b630de51af7
|
/AlexReceitas/asgi.py
|
d6f8ec566796de044326333fc24f33b3791e7bb2
|
[] |
no_license
|
alexpb1/AlexReceitas
|
a5c77a52af7434afc6db5845c1715da0e9da730f
|
e4bf6931f5ccf75bfe8309b04c26f3fbf8497b57
|
refs/heads/main
| 2023-08-02T08:24:15.189132 | 2021-09-19T23:25:01 | 2021-09-19T23:25:01 | 387,615,015 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 401 |
py
|
"""
ASGI config for AlexReceitas project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'AlexReceitas.settings')
application = get_asgi_application()
|
[
"alexsandropb3@gmail.com"
] |
alexsandropb3@gmail.com
|
e978d5eca5c736f3b8f826507cd71ef790771729
|
f6ed068e6035987d71c17d27e630af0f4b92370e
|
/Validacion_Surface/valSurface.py
|
c8f76a3671883e4bd0782c19e322c1ad889ed053
|
[] |
no_license
|
LuisFelipeArteaga/Reconstruccion
|
b61e50cf0f8b27989f2d898967a5537409d63a5c
|
3ae222e5cc2f9ada476892f933c273faa3885708
|
refs/heads/master
| 2023-01-24T12:09:19.770548 | 2020-11-15T00:24:29 | 2020-11-15T00:24:29 | 114,788,374 | 2 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 7,681 |
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Feb 26 09:33:33 2019
@author: felipe
"""
import numpy as np
#import matplotlib.pyplot as plt
#import time
#import nibabel as nib
#import cv2
import open3d as opd
import os
import itertools
from functools import partial
from multiprocessing import Pool
from sklearn.model_selection import KFold
#from scipy import stats
from scipy import spatial
from sklearn.svm import SVR
#from mpl_toolkits.mplot3d import Axes3D
#from sklearn.preprocessing import StandardScaler
#from sklearn.model_selection import GridSearchCV
from numpy import linalg as LA
from joblib import dump
#from loblib import load
##############################################################################
def crossval_bal(X,t,nPart):
kf = KFold(n_splits=nPart,shuffle=True,random_state=123)
kf.get_n_splits(X)
Train_Index = []
Test_Index = []
for train_index, test_index in kf.split(t):
Train_Index.append(train_index)
Test_Index.append(test_index)
return Train_Index,Test_Index
###############################################################################
def Feature_Scaling(X,a,b):
return a + ((X-np.min(X))*(b-a))/(np.max(X)-np.min(X))
##############################################################################
def svrTrain(X,t,XTest,Kernel,sig,c,e):
svr = SVR(kernel = 'rbf',gamma=sig, C=c, epsilon = e)
svr.fit(X,t)
svr.predict(XTest)
return svr, svr.predict(XTest)
##############################################################################
def svrTrainP(X,t,XTest,tTest,param):
sig = param[0]
c = param[1]
e = param[2]
svr = SVR(kernel = 'rbf',gamma=sig, C=c, epsilon = e)
svr.fit(X,t)
t_est = svr.predict(XTest)
Error = LA.norm((t_est - tTest))/LA.norm(tTest)
outPutParam = [Error,sig,c,e]
return outPutParam
##############################################################################
def FindParamSVR(X,t,Kernel,t_data,ii):
#
num_Workers = 4
nP = 10
Train_Index2,Test_Index2 = crossval_bal(X,t,nP)
#-------------------------------------------------
#exp1 = [-9, -7, -5, -3, -1, 0, 1, 3, 5, 7, 9]
exp1 = [-9, -3, -1, 0, 1, 3, 9]
c = np.exp2(exp1)
epsilon = [0.0,0.0005,0.01,0.05,0.1,0.2]
if Kernel == 'rbf':
Nsig = 10
s0 = np.median(spatial.distance.pdist(X,metric='euclidean'))
sig = np.linspace(0.1*s0,s0,Nsig)
else:
sig = 1
Nsig = 1
#----------------------------------------------
## Partition
cList = []
gammaList = []
epsilonList = []
errorList = []
for ff in range(nP):
Xtrain = X[Train_Index2[ff]]
Xtest = X[Test_Index2[ff]]
tTrain = t[Train_Index2[ff]]
tTest = t_data[Test_Index2[ff]]
#------------------------------------------------------------
param= list(itertools.product(sig,c,epsilon))
#-------------------------------------------------------------
with Pool(num_Workers) as p :
funcSVR = partial(svrTrainP,Xtrain,tTrain,Xtest,tTest)
Output = p.map(funcSVR,param)
#---------------------------------------------------------
Output = np.asarray(Output)
minArg = np.argmin(Output[:,0])
print(Output[minArg,1],Output[minArg,2],Output[minArg,3])
gammaList.append(Output[minArg,1])
cList.append(Output[minArg,2])
epsilonList.append(Output[minArg,3])
errorList.append(Output[minArg,0])
filename2 = os.path.join('data_save','Error','error'+str(ii)+'_'+str(ff)+'.npy')
np.save(filename2,cDic)
return cList, gammaList, epsilonList, errorList
##############################################################################
def get_data_myc(filename):
#filename = os.path.join('Points_Clouds',name)
pcd1 = opd.read_point_cloud(filename)
auxPointsS = Feature_Scaling(np.asarray(pcd1.points),-1,1)
for pp,ii in enumerate(auxPointsS):
pcd1.points[pp] = ii
pcd = opd.voxel_down_sample(pcd1, voxel_size = 0.06)
#pcd=pcd1
print('Point Cloud size = ',pcd.dimension)
print('compute normals ...')
opd.estimate_normals(pcd, search_param = opd.KDTreeSearchParamHybrid(
radius = .5, max_nn = 30))
print('end')
mycCenter = np.mean(np.asarray(pcd.points[:]),axis=0)
for ii,qq in enumerate(pcd.points):
p1 = mycCenter - qq;
p2 = np.asarray(pcd.normals)[ii,:]
angle = np.arctan2(np.linalg.norm(np.cross(p1,p2)),np.dot(p1,p2))
if angle < np.pi/2 or angle < -np.pi/2:
pcd.normals[ii] = -pcd.normals[ii]
delta = 0.01;
pointsOut = np.asarray(pcd.points) + delta*np.asarray(pcd.normals)
pointsIn = np.asarray(pcd.points) - delta*np.asarray(pcd.normals)
data = np.concatenate((pointsOut,np.asarray(pcd.points),pointsIn),axis = 0)
labels = np.hstack((np.ones(pointsIn.shape[0],),
np.zeros(pointsIn.shape[0],),
-np.ones(pointsIn.shape[0],)))
opd.draw_geometries([pcd])
return data,labels
###############################################################################
###############################################################################
if __name__ == '__main__':
#path = '/home/felipe/Desktop/Data_Heart/MM-WHS_2/Validacion_Surface/Point_cloud'
nPart = 10
filename = os.path.join('Point_Cloud','bunny.ply')
X,t = get_data_myc(filename)
#---------------------------------------------------------
#Validation
Train_Index,Test_Index = crossval_bal(X,t,nPart)
print(X.shape[0]/3)
#---------------------------------------------------------
#Classification initialization
#--------------------------------------------------------
#main loop
Kernel ='rbf'
cDic = {}
gammaDic = {}
epsilonDic = {}
errorDic = {}
errorList2 = []
for ii in range(1):
print('Fold ',ii+1,' of ' ,nPart)
#-----------------------------------------------------
#Partition
Xtrain = X[Train_Index[ii]]
Xtest = X[Test_Index[ii]]
#Xtest = np.reshape(Xtest,(Xtest.shape[0],1))
tTrain = t[Train_Index[ii]]
tTest = t[Test_Index[ii]]
#
y_dataR = t[Train_Index[ii]]
#
#
cList, gammaList, epsilonList, errorList = FindParamSVR(
Xtrain,tTrain,Kernel,y_dataR,ii)
# #--------------------------------------------
# #Best Parametros
indx = np.argmin(errorList)
#
cbest = cList[indx]
gammabest = gammaList[indx]
epsilonbest = epsilonList[indx]
svr,t_est2 = svrTrain(Xtrain,tTrain,Xtest,Kernel,gammabest,cbest,epsilonbest)
filename2 = os.path.join('Model','svrBunny'+str(ii+1)+'.joblib')
print('SAVE ',filename2,'\n')
dump(svr, filename2)
Error2 = LA.norm((t_est2 - tTest))/LA.norm(tTest)
errorList2.append(Error2)
#
# #-------------------------------------------
# #save
cDic[str(ii)] = cList
gammaDic[str(ii)] = gammaList
epsilonDic[str(ii)] = epsilonList
errorDic[str(ii)] = errorList
#
np.save('data_save/cDicBunny.npy',cDic)
np.save('data_save/gammaDicBunny.npy',gammaDic)
np.save('data_save/epsilonDicBunny.npy',epsilonDic)
np.save('data_save/errorDicBunny.npy',errorDic)
np.save('data_save/error2DicBunny.npy',errorList2)
|
[
"noreply@github.com"
] |
LuisFelipeArteaga.noreply@github.com
|
d34d5c5575b66e420737645c5c908346ca17c1d5
|
8b20ede3175df4fe3329ce6f5bc6c608d4066bd9
|
/2016-05-11-LDW-Selenium-facebook-select.py
|
a60a386019e78d8fa599851d4c071a200a5ca8c5
|
[
"MIT"
] |
permissive
|
lukexyz/Selenium-Examples
|
487b048a41732eaf22b9f42397bcc325321b151f
|
70ec3f85a84e4657b5c275a74270d22aefe47e72
|
refs/heads/master
| 2021-01-01T05:11:06.483365 | 2016-05-17T10:59:07 | 2016-05-17T10:59:07 | 58,638,323 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,043 |
py
|
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support.select import Select
import unittest
import time
class LoginTest(unittest.TestCase):
def setUp(self):
# Define driver
self.driver = webdriver.Chrome()
self.driver.get("https://www.facebook.com")
def test_Login(self):
driver = self.driver
# Web element variables
emailFieldID = "email"
monthDropDownID = "month"
# Create elements (allowing 10s to load)
WebDriverWait(driver, 10).until(lambda driver: driver.find_element_by_id(emailFieldID))
monthDropDownElement = WebDriverWait(driver, 10).until(lambda driver: driver.find_element_by_id(monthDropDownID))
# Action: select month from drop down
Select(monthDropDownElement).select_by_visible_text("May")
# Verify the actions visually
time.sleep(3)
def tearDown(self):
self.driver.quit()
if __name__ == '__main__':
unittest.main()
|
[
"lukedwoods@gmail.com"
] |
lukedwoods@gmail.com
|
e1508f8201b4113f896bf0ace8208bf541a2431b
|
de4d88db6ea32d20020c169f734edd4b95c3092d
|
/aiotdlib/api/types/sponsored_message.py
|
d0baa01e34edffdfdd0b1242e871c3ddd8921c86
|
[
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] |
permissive
|
thiagosm/aiotdlib
|
5cc790a5645f7e4cc61bbd0791433ed182d69062
|
4528fcfca7c5c69b54a878ce6ce60e934a2dcc73
|
refs/heads/main
| 2023-08-15T05:16:28.436803 | 2021-10-18T20:41:27 | 2021-10-18T20:41:27 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,493 |
py
|
# =============================================================================== #
# #
# This file has been generated automatically!! Do not change this manually! #
# #
# =============================================================================== #
from __future__ import annotations
import typing
from pydantic import Field
from .internal_link_type import InternalLinkType
from .message_content import MessageContent
from ..base_object import BaseObject
class SponsoredMessage(BaseObject):
"""
Describes a sponsored message
:param id: Unique sponsored message identifier
:type id: :class:`int`
:param sponsor_chat_id: Chat identifier
:type sponsor_chat_id: :class:`int`
:param link: An internal link to be opened when the sponsored message is clicked; may be null. If null, the sponsor chat needs to be opened instead, defaults to None
:type link: :class:`InternalLinkType`, optional
:param content: Content of the message
:type content: :class:`MessageContent`
"""
ID: str = Field("sponsoredMessage", alias="@type")
id: int
sponsor_chat_id: int
link: typing.Optional[InternalLinkType] = None
content: MessageContent
@staticmethod
def read(q: dict) -> SponsoredMessage:
return SponsoredMessage.construct(**q)
|
[
"pylakey@protonmail.com"
] |
pylakey@protonmail.com
|
1cfb6f95e2751534279008265249d4fcd99135a7
|
477b9d178fa0a5b60a234e845692c1b3ac42f20c
|
/python-basics/input-output/files-directories.py
|
3e6a3790d242079545837131a37e1518fd78757b
|
[] |
no_license
|
DMS-online/python3-for-beginners
|
95c4a6036bf82f4f2991ca76ad3cdd448c9c8b6f
|
7721a8173187ae38792743d22c4e8565a60cb152
|
refs/heads/master
| 2023-03-16T02:02:01.378995 | 2020-04-17T03:20:46 | 2020-04-17T03:20:46 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 394 |
py
|
#Working with files and directories
import os
#Create a directory
if (not os.path.isdir(os.getcwd() + "/files")) :
os.mkdir("files")
#Place a file in the directory
with open ("files/test.txt", "w") as data:
data.write("This is a test file")
#Rename file
os.rename("files/test.txt", "files/test2.txt")
#Delete a file
os.remove("files/test2.txt")
#Delete a directory
os.rmdir("files")
|
[
"leeassam@gmail.com"
] |
leeassam@gmail.com
|
58735fe65a67b7f724a8be2f26ad1e17b44edd41
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03285/s132035467.py
|
cb2b23dc5e61a4328b3cff2153a67f2c568cb830
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 118 |
py
|
n=int(input())
for i in range(n):
for j in range(n):
if i*4+j*7==n:
print('Yes')
exit()
print('No')
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
46653a9c57737b49f529a02867c747ffd19fc874
|
9b1323e7a3b14159a7de17cb76b5650c00ea3957
|
/train_data_resize.py
|
c2433805d7aade128cfa16cfbb8322315b7cc4f9
|
[] |
no_license
|
amit-iiitm/ANPR
|
96f089c0088a07f84de4a8482e8d5753cedeefdc
|
3ae6bd3d313777688ee872d2603719d665106e20
|
refs/heads/master
| 2021-01-10T13:44:59.424946 | 2015-10-27T22:34:27 | 2015-10-27T22:34:27 | 45,073,727 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,570 |
py
|
from multiprocessing import Pool
from PIL import Image
import os
import os.path
#p=Pool(5)
def process_fpath(path):
#listing=os.listdir(path)
#im=Image.open(path1+path)
inc=1
im=Image.open(path)
im=im.convert('L')
imw=im.size[0]
imh=im.size[1]
pix=im.load()
start=0
flag=0
end=0
flag1=0
flag2=0
flag3=0
left=128
right=0
for i in range(1,imh):
for j in range(1,imw):
if (pix[i,j]!=255):
flag=1
if (flag==1):
start=i
break
for i in range(imh-1,-1,-1):
for j in range(1,imw):
if (pix[i,j]!=255):
flag1=1
if (flag1==1):
end=i
break
for i in range(1,imh):
flag2=0
for j in range(1,imw):
if (pix[i,j]!=255):
flag2=1
if (flag2==1):
left=min(left,j)
break
for i in range(imh-1,-1,-1):
flag3=0
for j in range(1,imw):
if (pix[i,j]!=255):
flag3=1
if (flag3==1):
right=max(right,j)
break
box=(left,start-10,right+5,end+10)
area=im.crop(box)
area.show()
im.resize((28,28),Image.ANTIALIAS)
#im1=im.copy()
#loc=os.path.join('/home/nikhil/n/imp/btp/testimages/newimages/',inc+".jpg")
#im1.save(loc)
#inc=inc+1
im.show()
#print start
#print inc
#im.save(os.path.join(path2,path), "JPEG")
process_fpath('img.bmp')
|
[
"akumar4850@gmail.com"
] |
akumar4850@gmail.com
|
24d5f241df7f107b548f10c64a50f3693ad6b648
|
bad9e83194cf042eb06e7c13f64520bbaeafa0a2
|
/lib/rosgraph/impl/__init__.py
|
6e2ea2e90c5c4f25417096a0f7076e21a3b6be99
|
[] |
no_license
|
moocoder/MsgRouter
|
e0cddc0fde1a36184d6b4e46174e59d647a186ac
|
0c9305208696e2fb62fc7f4b67b6e6e4d2b3c1b4
|
refs/heads/master
| 2021-01-10T06:09:50.121985 | 2016-03-21T14:42:40 | 2016-03-21T14:42:40 | 53,139,897 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,668 |
py
|
# Software License Agreement (BSD License)
#
# Copyright (c) 2009, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# Revision $Id: __init__.py 5735 2009-08-20 21:31:27Z sfkwc $
|
[
"clouderow@gmail.com"
] |
clouderow@gmail.com
|
ed63d950e87ed10e96578f8dbf94561087eac0f7
|
cc7349f85a1eac68729acfc0118d59a3fac56181
|
/applications/TransactiveEnergy-eioc/ControllerAgent/controller/controllerAgent.py
|
6bc8c9287b413dd545faef71eabc1bc5958fddd5
|
[] |
no_license
|
YingyingTang111/GOSS-GridAPPS-D
|
cb5fd9e6dab39263fe1eb30c8b55bb8be1b26e6c
|
5a08a5592c6fc1fe83ba3e630957578c77d38aac
|
refs/heads/master
| 2021-09-10T14:19:29.766291 | 2018-03-27T17:01:45 | 2018-03-27T17:01:45 | 105,942,437 | 0 | 0 | null | 2017-10-05T21:29:34 | 2017-10-05T21:29:33 | null |
UTF-8
|
Python
| false | false | 71,996 |
py
|
import datetime
import logging
import sys
import uuid
import math
import warnings
import json
from volttron.platform.vip.agent import Agent, Core, PubSub, compat
from volttron.platform.agent import utils
from volttron.platform.messaging import topics, headers as headers_mod
from matplotlib import cm
utils.setup_logging()
_log = logging.getLogger(__name__)
__version__ = '0.1'
def controller_agent(config_path, **kwargs):
# Obtain the agent subscription and initial values from config file
config = utils.load_config(config_path)
houses = config['houses']
aggregatorDetail = config['aggregator']
aggregatorInfo = config['aggregator_information']
fncs_bridgeInfo = config['fncs_bridge']
# agentSubscription = config['subscriptions']
# agentInitialVal = config['initial_value']
class controllerAgent(Agent):
'''This agent is the bottom level controller that bids the price and quantity to the upper level agent,
recives the clearing price, and adjusts the house set point accordingly
'''
def __init__(self, **kwargs):
super(controllerAgent, self).__init__(**kwargs)
self.startTime = datetime.datetime.now()
_log.info('Simulation starts from: {0} in controller agent {1}.'.format(str(self.startTime), config['agentid']))
houseGroupId = config['agentid'].replace('controller', '')
# Initialize the instance variables
self.aggregator = {'name': 'none', 'market_id': 0, 'average_price': -1, 'std_dev': -1, 'clear_price': -1, \
'initial_price': -1, 'price_cap':9999.0, 'period': -1}
# market information for aggregator agent - Market registration information
# self.aggregator['name'] = self.controller[houseName]['aggregatorName']
self.aggregator['market_id'] = aggregatorInfo['market_id']
self.aggregator['market_unit'] = aggregatorInfo['aggregator_unit']
self.aggregator['initial_price'] = aggregatorInfo['initial_price']
self.aggregator['average_price'] = aggregatorInfo['average_price']
self.aggregator['std_dev'] = aggregatorInfo['std_dev']
self.aggregator['clear_price'] = aggregatorInfo['clear_price']
self.aggregator['price_cap'] = aggregatorInfo['price_cap']
self.aggregator['period'] = aggregatorInfo['period']
#
self.house = {}
self.controller = {}
self.controller_bid = {}
# self.bidded = {}
self.subscriptions = {}
self.allHouseBids = {}
# Initialize the variables - loop through each house controlled by this controller
for oneHouse in houses:
houseName = oneHouse.keys()
if len(houseName) != 1:
raise ValueError('For each house, more than one house keys are given')
else:
houseName = houseName[0]
houseInfo = oneHouse[houseName]
agentInitialVal = houseInfo['initial_value']
agentSubscription = houseInfo['subscriptions']
self.house[houseName] = {'target': 'air_temperature', 'setpoint0':-1, 'lastsetpoint0': 0, 'hvac_load': 1, 'controlled_load_curr': 1, \
'uncontrolled_load': 1, 'deadband': 0, 'air_temperature': -1, 'power_state': 'UNKNOWN', 'last_pState': 'UNKNOWN',
'heating_demand': 0, 'cooling_demand': 0, 'aux_state': 0, 'heat_state': 0, 'cool_state': 0,
'thermostat_state': 'UNKNOWN',
'heating_setpoint0': -1, 'cooling_setpoint0': -1,
're_override': 'NORMAL',
'UA': 0.0, 'mass_heat_coeff': 0.0, 'air_heat_capacity_cd': 0.0, 'mass_heat_capacity': 0.0, 'solar_gain': 0.0, 'heat_cool_gain': 0.0,
'outdoor_temperature': 0.0, 'mass_temperature': 0.0, 'design_cooling_capacity':0.0, 'cooling_COP': 0.0
}
self.controller[houseName] = {'name': 'none','marketName': 'none', 'houseName': 'none', 'simple_mode': 'none', 'setpoint': 'none', 'lastbid_id': -1, 'lastmkt_id': -1, 'bid_id': 'none', \
'slider_setting': -0.001, 'period': -1, 'ramp_low': 0, 'ramp_high': 0, 'range_low': 0, \
'range_high': 0, 'dir': 0, 'direction': 0, 'use_predictive_bidding': 0, 'deadband': 0, 'last_p': 0, \
'last_q': 0, 'setpoint0': -1, 'minT': 0, 'maxT': 0, 'bid_delay': 60, 'next_run': 0, 't1': 0, 't2': 0,
'use_override': 'OFF', 'control_mode': 'CN_RAMP', 'resolve_mode': 'DEADBAND',
'slider_setting': -0.001, 'slider_setting_heat': -0.001, 'slider_setting_cool': -0.001, 'sliding_time_delay': -1,
'heat_range_high': 3, 'heat_range_low': -5, 'heat_ramp_high': 0, 'heating_ramp_low': 0,
'cool_range_high': 5, 'cool_range_low': -3, 'cooling_ramp_high': 0, 'cooling_ramp_low': 0,
'heating_setpoint0': -1, 'cooling_setpoint0': -1, 'heating_demand': 0, 'cooling_demand': 0,
'sliding_time_delay': -1,
'thermostat_mode': 'INVALID', 'last_mode': 'INVALID', 'previous_mode': 'INVALID',
'time_off': sys.maxsize
}
self.controller_bid[houseName] = {'market_id': -1, 'bid_id': 'none', 'bid_price': 0.0, 'bid_quantity': 0, 'bid_accepted': 1, \
'state': 'UNKNOWN', 'rebid': 0}
# Read and assign initial values from agentInitialVal
# controller information
self.controller[houseName]['name'] = houseName #config['agentid']
self.controller[houseName]['ctrl_cap'] = agentInitialVal['controller_information']['ctrl_cap']
self.controller[houseName]['control_mode'] = agentInitialVal['controller_information']['control_mode']
self.controller[houseName]['aggregatorName'] = agentInitialVal['controller_information']['aggregatorName']
self.controller[houseName]['houseName'] = agentInitialVal['controller_information']['houseName']
self.controller[houseName]['bid_id'] = agentInitialVal['controller_information']['bid_id']
self.controller[houseName]['period'] = agentInitialVal['controller_information']['period']
self.controller[houseName]['ramp_low'] = agentInitialVal['controller_information']['ramp_low']
self.controller[houseName]['ramp_high'] = agentInitialVal['controller_information']['ramp_high']
self.controller[houseName]['range_low'] = agentInitialVal['controller_information']['range_low']
self.controller[houseName]['range_high'] = agentInitialVal['controller_information']['range_high']
self.controller[houseName]['setpoint0'] = agentInitialVal['controller_information']['base_setpoint']
# self.controller[houseName]['bid_delay'] = agentInitialVal['controller_information']['bid_delay']
self.controller[houseName]['use_predictive_bidding'] = agentInitialVal['controller_information']['use_predictive_bidding']
self.controller[houseName]['use_override'] = agentInitialVal['controller_information']['use_override']
self.controller[houseName]['last_setpoint'] = self.controller[houseName]['setpoint0']
# Read in pre-run hvac and Qh data
self.house[houseName]['hvac_load'] = agentInitialVal['controller_information']['hvac_load']
self.house[houseName]['heat_cool_gain'] = agentInitialVal['controller_information']['heat_cool_gain']
# house information - values will be given after the first time step, thereforely here set as default zero values
self.house[houseName]['air_temperature'] = 0
self.house[houseName]['power_state'] = "ON"
# self.house[houseName]['hvac_load'] = 0
self.house[houseName]['target'] = "air_temperature"
self.house[houseName]['deadband'] = 2
self.house[houseName]['MassInternalGainFraction'] = 0.5
self.house[houseName]['MassSolarGainFraction'] = 0.5
self.house[houseName]['Qi'] = 6819.0
self.house[houseName]['cooling_COP'] = 4.07
self.controller[houseName]['deadband'] = self.house[houseName]['deadband']
# self.house['setpoint0'] = 0
# self.house['lastsetpoint0'] = self.house['setpoint0']
## Rearrange object based on given initial value
# Assign default values if it is simple mode:
if self.controller[houseName]['simple_mode'] == 'house_heat':
self.controller[houseName]['setpoint'] = 'heating_setpoint'
self.controller[houseName]['ramp_low'] = self.controller[houseName]['ramp_high'] = -2
self.controller[houseName]['range_low'] = -5
self.controller[houseName]['range_high'] = 0
self.controller[houseName]['dir'] = -1
elif self.controller[houseName]['simple_mode'] == 'house_cool':
self.controller[houseName]['setpoint'] = 'cooling_setpoint'
self.controller[houseName]['ramp_low'] = self.controller[houseName]['ramp_high'] = 2
self.controller[houseName]['range_low'] = 0
self.controller[houseName]['range_high'] = 5
self.controller[houseName]['dir'] = 1
elif self.controller[houseName]['simple_mode'] == 'house_preheat':
self.controller[houseName]['setpoint'] = 'heating_setpoint'
self.controller[houseName]['ramp_low'] = self.controller[houseName]['ramp_high'] = -2
self.controller[houseName]['range_low'] = -5
self.controller[houseName]['range_high'] = 3
self.controller[houseName]['dir'] = -1
elif self.controller[houseName]['simple_mode'] == 'house_precool':
self.controller[houseName]['setpoint'] = 'cooling_setpoint'
self.controller[houseName]['ramp_low'] = self.controller[houseName]['ramp_high'] = 2
self.controller[houseName]['range_low'] = -3
self.controller[houseName]['range_high'] = 5
self.controller[houseName]['dir'] = 1
elif self.controller[houseName]['simple_mode'] == 'waterheater':
self.controller[houseName]['setpoint'] = 'tank_setpoint'
self.controller[houseName]['ramp_low'] = self.controller[houseName]['ramp_high'] = -2
self.controller[houseName]['range_low'] = 0
self.controller[houseName]['range_high'] = 10
elif self.controller[houseName]['simple_mode'] == 'double_ramp':
self.controller[houseName]['heating_setpoint'] = 'heating_setpoint'
self.controller[houseName]['cooling_setpoint'] = 'cooling_setpoint'
self.controller[houseName]['heat_ramp_low'] = self.controller[houseName]['heat_ramp_high'] = -2
self.controller[houseName]['heat_range_low'] = -1
self.controller[houseName]['heat_range_high'] = 5
self.controller[houseName]['cool_ramp_low'] = self.controller[houseName]['cool_ramp_high'] = 2
self.controller[houseName]['cool_range_low'] = 5
self.controller[houseName]['cool_range_high'] = 5
# Update controller bidding period:
if self.controller[houseName]['period'] == 0.0:
self.controller[houseName]['period'] = 60
# If the controller time interval is smaller than the aggregator time interval
if self.aggregator['period'] > self.controller[houseName]['period']:
if self.aggregator['period'] % self.controller[houseName]['period'] != 0:
warnings.warn('The supply bid and demand bids do not coincide, with the given aggregator time interval\
%d s and controller time interval %d s' % (self.aggregator['period'], self.controller[houseName]['period']))
elif self.aggregator['period'] < self.controller[houseName]['period']:
# It is not allowed to have larger controller time interval than the market time interval
warnings.warn('The controller time interval %d s is larger than the aggregator time interval %d s' \
% (self.controller[houseName]['period'], self.aggregator['period']))
# Update bid delay:
if self.controller[houseName]['bid_delay'] < 0:
self.controller[houseName]['bid_delay'] = -self.controller[houseName]['bid_delay']
if self.controller[houseName]['bid_delay'] > self.controller[houseName]['period']:
warnings.warn('Bid delay is greater than the controller period. Resetting bid delay to 0.')
self.controller[houseName]['bid_delay'] = 0
# Check for abnormal input given
if self.controller[houseName]['use_predictive_bidding'] == 1 and self.controller[houseName]['deadband'] == 0:
warnings.warn('Controller deadband property not specified')
# Calculate dir:
if self.controller[houseName]['dir'] == 0:
high_val = self.controller[houseName]['ramp_high'] * self.controller[houseName]['range_high']
low_val = self.controller[houseName]['ramp_low'] * self.controller[houseName]['range_low']
if high_val > low_val:
self.controller[houseName]['dir'] = 1
elif high_val < low_val:
self.controller[houseName]['dir'] = -1
elif high_val == low_val and (abs(self.controller[houseName]['ramp_high']) > 0.001 or abs(self.controller[houseName]['ramp_low']) > 0.001):
self.controller[houseName]['dir'] = 0
if abs(self.controller[houseName]['ramp_high']) > 0:
self.controller[houseName]['direction'] = 1
else:
self.controller[houseName]['direction'] = -1
if self.controller[houseName]['ramp_low'] * self.controller[houseName]['ramp_high'] < 0:
warnings.warn('controller price curve is not injective and may behave strangely')
# Check double_ramp controller mode:
if self.controller[houseName]['sliding_time_delay'] < 0:
self.controller[houseName]['sliding_time_delay'] = 21600 # default sliding_time_delay of 6 hours
else:
self.controller[houseName]['sliding_time_delay'] = int(self.controller[houseName]['sliding_time_delay'])
# use_override
if self.controller[houseName]['use_override'] == 'ON' and self.controller[houseName]['bid_delay'] <= 0:
self.controller[houseName]['bid_delay'] = 1
# Check slider_setting values
if self.controller[houseName]['control_mode'] == 'CN_RAMP' or self.controller[houseName]['control_mode'] == 'CN_DOUBLE_PRICE':
if self.controller[houseName]['slider_setting'] < -0.001:
warnings.warn('slider_setting is negative, reseting to 0.0')
self.controller[houseName]['slider_setting'] = 0.0
elif self.controller[houseName]['slider_setting'] > 1.0:
warnings.warn('slider_setting is greater than 1.0, reseting to 1.0')
self.controller[houseName]['slider_setting'] = 1.0
# Obtain minnn and max values - presync part in GLD
if self.controller[houseName]['slider_setting'] == -0.001:
minT = self.controller[houseName]['setpoint0'] + self.controller[houseName]['range_low']
maxT = self.controller[houseName]['setpoint0'] + self.controller[houseName]['range_high']
elif self.controller[houseName]['slider_setting'] > 0:
minT = self.controller[houseName]['setpoint0'] + self.controller[houseName]['range_low'] * self.controller[houseName]['slider_setting']
maxT = self.controller[houseName]['setpoint0'] + self.controller[houseName]['range_high'] * self.controller[houseName]['slider_setting']
if self.controller[houseName]['range_low'] != 0:
self.controller[houseName]['ramp_low'] = 2 + (1 - self.controller[houseName]['slider_setting'])
else:
self.controller[houseName]['ramp_low'] = 0
if self.controller[houseName]['range_high'] != 0:
self.controller[houseName]['ramp_high'] = 2 + (1 - self.controller[houseName]['slider_setting'])
else:
self.controller[houseName]['ramp_high'] = 0
else:
minT = maxT = self.controller[houseName]['setpoint0']
# Update controller parameters
self.controller[houseName]['minT'] = minT;
self.controller[houseName]['maxT'] = maxT;
else:
raise ValueError('Currently only the ramp mode controller is defined')
# Intialize controller own parameters (the same for all houses)
self.controller['next_run'] = self.startTime
self.controller['lastmkt_id'] = -1
self.controller['bid_delay'] = agentInitialVal['controller_information']['bid_delay']
self.controller['period'] = agentInitialVal['controller_information']['period']
self.bid = True
# Flag indicating whether controller has submitted the bid in this market period, withoout changes of house setpoints or power state
# self.bidded[houseName] = False
# Intialize the controller last time price and quantity
self.controller[houseName]['last_p'] = self.aggregator['initial_price']
self.controller[houseName]['last_q'] = 0
## Read and define subscription topics from agentSubscription
self.subscriptions[houseName] = []
# Check agentSubscription
house = agentSubscription['house']
if len(house) != 1:
raise ValueError('The controller is defined to control more/less than one house, which is not correct')
# subscription from house
for key, val in house[0].items():
if self.controller[houseName]['houseName'] != key:
raise ValueError('The house name written into subscriptions is not the same as in initial_value')
self.subscriptions[houseName] = []
for key2, val2 in val.items():
# topic = 'fncs/output/devices/fncs_Test/' + key + '/' + key2
self.subscriptions[houseName].append(key2) # Put house property into subscriptions, rather than topic
# subscription from aggregator agent
aggregator = aggregatorDetail
if len(aggregator) != 1:
raise ValueError('The controller is defined to be controlled by more/less than one aggregator agent, which is not correct')
for key, val in aggregator[0].items():
if self.controller[houseName]['aggregatorName'] != key:
raise ValueError('The aggregator name written into subscriptions is not the same as in initial_value')
self.subscriptions['aggregator'] = ''
topic = 'aggregator/' + key + '/all'
self.subscriptions['aggregator'] = topic
# subscription from fncs_bridge
self.subscriptions['fncs_bridge'] = []
fncs_bridge = fncs_bridgeInfo[0]
for key, val in fncs_bridge.items():
topic = key + '/simulation_end'
self.subscriptions['fncs_bridge'].append(topic)
@Core.receiver('onsetup')
def setup(self, sender, **kwargs):
self._agent_id = config['agentid']
@Core.receiver('onstart')
def startup(self, sender, **kwargs):
# Initialize subscription function to change setpoints
# Subscription to houses in GridLAB-D needs to post-process JSON format messages of all GLD objects together
subscription_topic = 'fncs/output/devices/fncs_Test/fncs_output'
self.vip.pubsub.subscribe(peer='pubsub',
prefix=subscription_topic,
callback=self.on_receive_house_message_fncs)
# for oneHouse in houses:
# houseName = oneHouse.keys()
# if len(houseName) != 1:
# raise ValueError('For each house, more than one house keys are given')
# else:
# houseName = houseName[0]
# # Assign to subscription topics
# for subscription_topic in self.subscriptions[houseName]:
# _log.info('Subscribing to ' + subscription_topic)
# self.vip.pubsub.subscribe(peer='pubsub',
# prefix=subscription_topic,
# callback=self.on_receive_house_message_fncs)
# Initialize subscription function for aggregator
subscription_topic = self.subscriptions['aggregator']
_log.info('Subscribing to ' + subscription_topic)
self.vip.pubsub.subscribe(peer='pubsub',
prefix=subscription_topic,
callback=self.on_receive_aggregator_message)
# Initialize subscription function to fncs_bridge:
for topic in self.subscriptions['fncs_bridge']:
_log.info('Subscribing to ' + topic)
self.vip.pubsub.subscribe(peer='pubsub',
prefix=topic,
callback=self.on_receive_fncs_bridge_message_fncs)
# ====================extract float from string ===============================
def get_num(self,fncs_string):
return float(''.join(ele for ele in fncs_string if ele.isdigit() or ele == '.'))
# ====================Obtain values from house ===========================
def on_receive_house_message_fncs(self, peer, sender, bus, topic, headers, message):
"""Subscribe to house publications and change the data accordingly
"""
# _log.info("Whole message", topic, message)
# #The time stamp is in the headers
# _log.info('Date', headers['Date'])
# Recieve from GLD the property values of all configured objects, need to extract the house objects and the corresponding properties
# Extract the message
message = json.loads(message[0])
val = message['fncs_Test']
for oneHouse in houses:
houseName = oneHouse.keys()
if len(houseName) != 1:
raise ValueError('For each house, more than one house keys are given')
else:
houseName = houseName[0]
# Assign to subscription topics
for subscription_property in self.subscriptions[houseName]:
valTemp = val[houseName][subscription_property]
if valTemp != self.house[houseName][subscription_property]:
if subscription_property != 'power_state':
valTemp = float(valTemp)
# Record hvac load value only when non-zero
if (subscription_property == 'hvac_load'):
if (valTemp > 0.0):
self.house[houseName][subscription_property] = valTemp
else:
self.house[houseName][subscription_property] = valTemp
else:
self.house[houseName][subscription_property] = valTemp
# _log.info('Controller {0:s} recieves from house {2:s} the property {1:s} value {3:s}.'.format(config['agentid'], subscription_property, houseName, str(valTemp)))
# # Find the object name who sends the message
# device = topic.split("/")[-2]
# # Find the property sent
# topicProperty = topic.split("/")[-1]
# # Update controller data for house
# val = message[0]
# if "air_temperature" == topicProperty:
# self.house[device]['air_temperature'] = val
# # _log.info('Controller {0:s} recieves from house {2:s} the current temperature {1:f}.'.format(config['agentid'], val, device))
# if "power_state" == topicProperty:
# self.house[device]['power_state'] = val
# # _log.info('Controller {0:s} recieves from house {2:s} the power state {1:s}.'.format(config['agentid'], val, device))
# if "hvac_load" == topicProperty:
# self.house[device]['hvac_load'] = val
# # _log.info('Controller {0:s} recieves from house {2:s} the hvac load amount {1:f}.'.format(config['agentid'], val, device))
# ====================Obtain values from aggregator ===========================
def on_receive_aggregator_message(self, peer, sender, bus, topic, headers, message):
"""Subscribe to aggregator publications and change the data accordingly
"""
# Find the aggregator name who sends the message
aggregatorName = topic.split("/")[-2]
# Update controller data
val = message[0]
# _log.info('At time {1:s}, controller {0:s} recieves from aggregator the cleared data.'.format(self.controller['name']), self.timeSim.strftime("%Y-%m-%d %H:%M:%S"))
self.aggregator['market_id'] = val['market_id']
self.aggregator['std_dev'] = val['std_dev']
self.aggregator['average_price'] = val['average_price']
self.aggregator['clear_price']= val['clear_price']
self.aggregator['price_cap'] = val['price_cap']
self.aggregator['initial_price'] = val['initial_price']
# ====================Obtain values from fncs_bridge ===========================
def on_receive_fncs_bridge_message_fncs(self, peer, sender, bus, topic, headers, message):
"""Subscribe to fncs_bridge publications and change the data accordingly
"""
val = message[0] # value True
# _log.info('Aggregator {0:s} recieves from fncs_bridge the simulation ends message {1:s}'.format(self.market['name'], val))
if (val == 'True'):
# Dump to JSON fies and close the files
# print (json.dumps(self.controller_bids_metrics), file=controller_op)
# print (json.dumps(self.aggregator_cleared_metrics), file=aggregator_op)
# aggregator_op.close()
# controller_op.close()
# End the agent
self.core.stop()
# ==================== Obtain Tmin value =======================================
def calcTemp1(self, monitor, deadband, powerstate):
if powerstate == 'OFF':
Tmin = monitor - deadband *0.5
else:
Tmin = monitor + deadband *0.5
return Tmin
# =================== Obtain Tmax value ========================================
def calcTemp2(self, Ua, Hm, Ca, Cm, MassInternalGainFraction, MassSolarGainFraction, Qi, Qs, Qh, Tout, monitor, Tmass, deadband, powerstate):
# Qh = -3.802783436188669e+04
# Initialization
Qh_estimate = 0.0
Qh_average = 0.0
Qh_count = 0.0
if Qh < 0.0:
Qh_estimate = Qh
if Qh_count > 0.0:
Qh_average = (Qh_average * Qh_count + Qh) / (Qh_count + 1.0)
Qh_count = Qh_count + 1.0
else:
Qh_average = Qh
Qh_count = 1.0
else:
Qh_estimate = Qh_average
Qa_OFF = ((1 - MassInternalGainFraction)*Qi) + ((1 - MassSolarGainFraction)*Qs)
Qa_ON = Qh + ((1 - MassInternalGainFraction)*Qi) + ((1 - MassSolarGainFraction)*Qs)
Qm = (MassInternalGainFraction*Qi) + (MassSolarGainFraction*Qs)
A_ETP = [[0.0, 0.0],[0.0, 0.0]]
B_ETP_ON = [0.0, 0.0]
B_ETP_OFF = [0.0, 0.0]
x = [monitor, Tmass]
L = [1.0, 0.0]
T = (self.controller['bid_delay'] + self.controller['period']) / 3600.0
AEI = [[0.0, 0.0], [0.0, 0.0]]
LAEI = [0.0, 0.0]
AET = [[0.0, 0.0], [0.0, 0.0]]
eAET = [[0.0, 0.0], [0.0, 0.0]]
LT = [0.0, 0.0]
AEx = [0.0, 0.0]
AxB_ON = [0.0, 0.0]
AxB_OFF = [0.0, 0.0]
LAxB = 0.0
LAIB = 0.0
Tmax = 0.0
if Ca != 0.0:
A_ETP[0][0] = -1.0 * (Ua + Hm) / Ca
A_ETP[0][1] = Hm / Ca
B_ETP_ON[0] = (Ua * Tout / Ca) + (Qa_ON / Ca)
B_ETP_OFF[0] = (Ua * Tout / Ca) + (Qa_OFF / Ca);
if Cm != 0.0:
A_ETP[1][0] = Hm / Cm
A_ETP[1][1] = -1.0 * Hm / Cm
B_ETP_ON[1] = Qm / Cm
B_ETP_OFF[1] = Qm / Cm
# Calculate inverse of A_ETP
detA = 0.0
if(((A_ETP[0][0]*A_ETP[1][1]) - (A_ETP[0][1]*A_ETP[1][0])) != 0.0):
detA = ((A_ETP[0][0]*A_ETP[1][1]) - (A_ETP[0][1]*A_ETP[1][0]))
AEI[0][0] = A_ETP[1][1]/detA
AEI[0][1] = -1*A_ETP[0][1]/detA
AEI[1][1] = A_ETP[0][0]/detA
AEI[1][0] = -1*A_ETP[1][0]/detA
else:
if powerstate == 'OFF':
return monitor - deadband / 2.0
else:
return monitor + deadband / 2.0
# Calculate exp(A_ETP*T)
AET[0][0] = A_ETP[0][0]*T
AET[0][1] = A_ETP[0][1]*T
AET[1][0] = A_ETP[1][0]*T
AET[1][1] = A_ETP[1][1]*T
if (AET[0][1] == 0.0 and AET[1][0] == 0.0): #diagonal matrix
eAET[0][0] = math.exp(AET[0][0])
eAET[0][1] = 0.0
eAET[1][0] = 0.0
eAET[1][1] = math.exp(AET[1][1])
elif AET[1][0] == 0.0: # upper triangular matrix
if(math.fabs(AET[0][0] - AET[1][1]) <= 1e-37): #nilpotent
eAET[0][0] = math.exp(AET[0][0])
eAET[0][1] = math.exp(AET[0][0]) * AET[0][1]
eAET[1][0] = 0.0
eAET[1][1] = math.exp(AET[0][0])
else:
eAET[0][0] = math.exp(AET[0][0])
eAET[0][1] = (AET[0][1]*(math.exp(AET[0][0]) - math.exp(AET[1][1])))/(AET[0][0] - AET[1][1])
eAET[1][0] = 0.0
eAET[1][1] = math.exp(AET[1][1])
else:
discr = (AET[0][0] - AET[1][1])*(AET[0][0] - AET[1][1]) + (4.0*AET[0][1]*AET[1][0])
pre = math.exp((AET[0][0] + AET[1][1])/2.0)
g = 0.0
if(math.fabs(discr) <= 1e-37):
eAET[0][0] = pre*(1.0 + ((AET[0][0] - AET[1][1])/2.0))
eAET[0][1] = pre*AET[0][1]
eAET[1][0] = pre*AET[1][0]
eAET[1][1] = pre*(1.0 - ((AET[0][0] - AET[1][1])/2.0))
elif (discr > 1e-37):
g = 0.5*math.sqrt(discr)
eAET[0][0] = pre*(math.cosh(g) + ((AET[0][0] - AET[1][1])*math.sinh(g)/(2.0*g)))
eAET[0][1] = pre*AET[0][1]*math.sinh(g)/g
eAET[1][0] = pre*AET[1][0]*math.sinh(g)/g
eAET[1][1] = pre*(math.cosh(g) - ((AET[0][0] - AET[1][1])*math.sinh(g)/(2.0*g)))
else:
g = 0.5*math.sqrt(math.fabs(discr));
eAET[0][0] = pre*(math.cos(g) + ((AET[0][0] - AET[1][1])*math.sin(g)/(2.0*g)))
eAET[0][1] = pre*AET[0][1]*math.sin(g)/g
eAET[1][0] = pre*AET[1][0]*math.sin(g)/g
eAET[1][1] = pre*(math.cos(g) - ((AET[0][0] - AET[1][1])*math.sin(g)/(2.0*g)))
# Calculate L*inv(A_ETP)
LAEI[0] = (L[0]*AEI[0][0]) + (L[1]*AEI[1][0])
LAEI[1] = (L[0]*AEI[0][1]) + (L[1]*AEI[1][1])
# Calculate L*inv(A_ETP)expm(A_ETP*T)
LT[0] = (LAEI[0]*eAET[0][0]) + (LAEI[1]*eAET[1][0])
LT[1] = (LAEI[0]*eAET[0][1]) + (LAEI[1]*eAET[1][1])
# Calculate A_ETP*x
AEx[0] = (A_ETP[0][0]*x[0]) + (A_ETP[0][1]*x[1])
AEx[1] = (A_ETP[1][0]*x[0]) + (A_ETP[1][1]*x[1])
# Calculate A_ETP*x + B_ETP_ON/OFF
AxB_OFF[0] = AEx[0] + B_ETP_OFF[0]
AxB_OFF[1] = AEx[1] + B_ETP_OFF[1]
AxB_ON[0] = AEx[0] + B_ETP_ON[0]
AxB_ON[1] = AEx[1] + B_ETP_ON[1]
# Calculate L*inv(A_ETP)expm(A_ETP*T)(A_ETP*x + B_ETP_ON/OFF)
LAxB_ON = (LT[0]*AxB_ON[0]) + (LT[1]*AxB_ON[1])
LAxB_OFF = (LT[0]*AxB_OFF[0]) + (LT[1]*AxB_OFF[1])
# Calculate L*inv(A_ETP)*B_ETP_ON/OFF
# if powerstate == 'OFF':
LAIB_OFF = (LAEI[0]*B_ETP_OFF[0]) + (LAEI[1]*B_ETP_OFF[1])
# else:
LAIB_ON = (LAEI[0]*B_ETP_ON[0]) + (LAEI[1]*B_ETP_ON[1])
# Calculate L*inv(A_ETP)expm(A_ETP*T)(A_ETP*x + B_ETP_ON/OFF) - L*inv(A_ETP)*B_ETP_ON/OFF +/- halfband
tmpOFF = LAxB_OFF - LAIB_OFF
tmpON = LAxB_ON - LAIB_ON
# Calculate Tmin and Tmax seperately
if powerstate == 'OFF':
Tmin = min(monitor - (deadband/2.0), tmpON + (deadband/2.0))# LAxB - LAIB - (deadband/2.0)
Tmax = tmpOFF - (deadband/2.0)
else:
Tmin = tmpON + (deadband/2.0)
Tmax = max(monitor + (deadband/2.0), tmpOFF - (deadband/2.0))
Tavg = (Tmin + Tmax) / 2.0
return Tavg
@Core.periodic(1)
def controller_implementation(self):
''' This method comes from the sync and poostsync part of the controller source code in GLD
'''
self.controller_sync()
self.controller_postsync()
# ====================Sync content ===========================
def controller_sync(self):
''' This method comes from the sync and poostsync part of the controller source code in GLD
'''
# Creat dict to store all bids to markets
# allHouseBids = {}
# Inputs from market object:
marketId = self.aggregator['market_id']
clear_price = self.aggregator['clear_price']
avgP = self.aggregator['average_price']
stdP = self.aggregator['std_dev']
bid_delay = self.controller['bid_delay']
# Update controller t1 information
self.controller['t1'] = datetime.datetime.now()
# determine what we have to do in this sync step
update_setpoints = False
update_bid = False
# ========================================= Test ==========================================
# update_bid = True
# ========================================= Test ==========================================
if marketId != self.controller['lastmkt_id']:
# print ('sync: market changed, need to update the setpoints', t1, next_run, marketId, lastmkt_id)
update_setpoints = True
self.controller['lastmkt_id'] = marketId
elif self.controller['t1'] >= self.controller['next_run'] - datetime.timedelta(0,bid_delay) and self.bid == True: # ony allow one bid in one market cycle
# print ('sync: t1 within bidding window, need to publish bid and state', t1, next_run - bid_delay)
update_bid = True
else:
# print (' returning', next_run)
return
for oneHouse in houses:
houseName = oneHouse.keys()
if len(houseName) != 1:
raise ValueError('For each house, more than one house keys are given')
else:
houseName = houseName[0]
# # Update controller t1 information
# self.controller[houseName]['t1'] = datetime.datetime.now()
# Inputs from controller:
ramp_low = self.controller[houseName]['ramp_low']
ramp_high = self.controller[houseName]['ramp_high']
range_low = self.controller[houseName]['range_low']
range_high = self.controller[houseName]['range_high']
deadband = self.controller[houseName]['deadband']
setpoint0 = self.controller[houseName]['setpoint0']
last_setpoint = self.controller[houseName]['last_setpoint']
minT = self.controller[houseName]['minT']
maxT = self.controller[houseName]['maxT']
# bid_delay = self.controller[houseName]['bid_delay']
direction = self.controller[houseName]['direction']
ctrl_cap = self.controller[houseName]['ctrl_cap']
# Inputs from house object:
demand = self.house[houseName]['hvac_load']
monitor = self.house[houseName]['air_temperature']
powerstate = self.house[houseName]['power_state']
# variables needed for double_price bid mode
Ua = self.house[houseName]['UA']
Hm = self.house[houseName]['mass_heat_coeff']
Ca = self.house[houseName]['air_heat_capacity_cd']
Cm = self.house[houseName]['mass_heat_capacity']
MassInternalGainFraction = self.house[houseName]['MassInternalGainFraction']
MassSolarGainFraction = self.house[houseName]['MassSolarGainFraction']
Qi = self.house[houseName]['Qi']
Qs = self.house[houseName]['solar_gain']
Qh = self.house[houseName]['heat_cool_gain']
Tout = self.house[houseName]['outdoor_temperature']
Tmass = self.house[houseName]['mass_temperature']
# print (" sync:", demand, power_state, monitor, last_setpoint, deadband, direction, clear_price, avgP, stdP)
# # Check t1 to determine if the sync part is needed to be processed or not
# if self.controller[houseName]['t1'] == self.controller[houseName]['next_run'] and marketId == lastmkt_id :
# return
#
# if self.controller[houseName]['t1'] < self.controller[houseName]['next_run'] and marketId == lastmkt_id :
# if self.controller[houseName]['t1'] <= self.controller[houseName]['next_run'] - datetime.timedelta(0,bid_delay):
# if self.controller[houseName]['use_predictive_bidding'] == 1 and ((self.controller[houseName]['control_mode'] == 'CN_RAMP' and setpoint0 != last_setpoint)):
# # Base set point setpoint0 is changed, and therefore sync is needed:
# pass
# elif self.house[houseName]['last_pState'] != powerstate:
# # house power state is changed, therefore sync is needed
# pass
# elif self.controller[houseName]['use_override'] == 'ON' and self.controller[houseName]['t1'] >= self.controller[houseName]['next_run']- datetime.timedelta(0,bid_delay) and self.bidded[houseName] == False :
# # At the exact time that controller is operating, therefore sync is needed:
# self.bidded[houseName] = True # set it as true so that in the same controller period, it will not bid without the changes of house setpoint/power state
# pass
# else:
# return
# else:
# return
# If market get updated, then update the set point
deadband_shift = 0
# Set deadband shift if user predictive bidding is true
if self.controller[houseName]['use_predictive_bidding'] == 1:
deadband_shift = 0.5 * deadband
# controller update house setpoint if market clears
if self.controller[houseName]['control_mode'] == 'CN_RAMP' or self.controller[houseName]['control_mode'] == 'CN_DOUBLE_PRICE':
# If market clears, update the setpoints based on cleared market price;
# Or, at the beginning of the simlation, update house setpoint based on controller settings (lastmkt_id == -1 at the begining, therefore will go through here)
# if marketId != lastmkt_id:
if update_setpoints == True:
# Update controller last market id and bid id
# self.controller[houseName]['lastmkt_id'] = marketId
self.controller[houseName]['lastbid_id'] = -1
self.controller_bid[houseName]['rebid'] = 0
# Calculate shift direction
shift_direction = 0
if self.controller[houseName]['control_mode'] == 'CN_RAMP' and self.controller[houseName]['use_predictive_bidding'] == 1:
if (self.controller[houseName]['dir'] > 0 and clear_price < self.controller[houseName]['last_p']) or (self.controller[houseName]['dir'] < 0 and clear_price > self.controller[houseName]['last_p']):
shift_direction = -1
elif (self.controller[houseName]['dir'] > 0 and clear_price >= self.controller[houseName]['last_p']) or (self.controller[houseName]['dir'] < 0 and clear_price <= self.controller[houseName]['last_p']):
shift_direction = 1
else:
shift_direction = 0
# Calculate updated set_temp
if self.controller[houseName]['control_mode'] == 'CN_RAMP':
if abs(stdP) < 0.0001:
set_temp = setpoint0
elif clear_price < avgP and range_low != 0:
set_temp = setpoint0 + (clear_price - avgP) * abs(range_low) / (ramp_low * stdP) + deadband_shift*shift_direction
elif clear_price > avgP and range_high != 0:
set_temp = setpoint0 + (clear_price - avgP) * abs(range_high) / (ramp_high * stdP) + deadband_shift*shift_direction
else:
set_temp = last_setpoint # setpoint0 + deadband_shift*shift_direction
else:
if abs(stdP) < 0.0001:
set_temp = setpoint0
elif clear_price < avgP and range_low != 0:
set_temp = setpoint0 + (clear_price - avgP) * abs(range_low) / (ramp_low * stdP)
elif clear_price > avgP and range_high != 0:
set_temp = setpoint0 + (clear_price - avgP) * abs(range_high) / (ramp_high * stdP)
else:
set_temp = last_setpoint #setpoint0
# override
# if self.controller[houseName]['use_override'] == 'ON' and self.house[houseName]['re_override'] != 'none':
# if clear_price <= self.controller[houseName]['last_p']:
# self.fncs_publish['controller'][self.controller[houseName]['name']]['override_prop'] = 'ON'
# else:
# self.fncs_publish['controller'][self.controller[houseName]['name']]['override_prop'] = 'OFF'
# Check if set_temp is out of limit
if set_temp > maxT:
set_temp = maxT
elif set_temp < minT:
set_temp = minT
# Update last_setpoint if changed
if last_setpoint != set_temp:
self.controller[houseName]['last_setpoint'] = set_temp
# Update house set point
# if self.controller[houseName]['next_run'] != self.startTime: # At starting time of the simulation, setpoints also need to be updated
# Publish the changed setpoints:
pub_topic = 'fncs/input/' + houseName + '/cooling_setpoint'
# pub_topic = 'fncs/input' + houseGroupId + '/controller_' + houseName + '/cooling_setpoint'
_log.info('controller agent {0} publishes updated setpoints {1} to house controlled with topic: {2}'.format(config['agentid'], set_temp, pub_topic))
#Create timestamp
now = datetime.datetime.utcnow().isoformat(' ') + 'Z'
headers = {
headers_mod.DATE: now
}
self.vip.pubsub.publish('pubsub', pub_topic, headers, set_temp)
# Calculate bidding price
## ------------- ramp controller bid calculation -----------------------------------------
if self.controller[houseName]['control_mode'] == 'CN_RAMP':
# Bidding price when monitored load temperature is at the min and max limit of the controller
bid_price = -1
no_bid = 0
T_avg = 0.0
if self.controller[houseName]['dir'] > 0:
if self.controller[houseName]['use_predictive_bidding'] == 1:
if powerstate == 'OFF' and monitor > (maxT - deadband_shift):
bid_price = self.aggregator['price_cap']-1 #ctrl_cap
elif powerstate != 'OFF' and monitor < (minT + deadband_shift):
bid_price = 0
no_bid = 1
elif powerstate != 'OFF' and monitor > maxT:
bid_price = self.aggregator['price_cap']-1 #ctrl_cap
elif powerstate == 'OFF' and monitor < minT:
bid_price = 0
no_bid = 1
else:
if monitor > maxT:
bid_price = self.aggregator['price_cap']-1 #ctrl_cap
elif monitor < minT:
bid_price = 0
no_bid = 1
elif self.controller[houseName]['dir'] < 0:
if self.controller[houseName]['use_predictive_bidding'] == 1:
if powerstate == 'OFF' and monitor < (minT + deadband_shift):
bid_price = self.aggregator['price_cap']-1 #ctrl_cap
elif powerstate != 'OFF' and monitor > (maxT - deadband_shift):
bid_price = 0
no_bid = 1
elif powerstate != 'OFF' and monitor < minT:
bid_price = self.aggregator['price_cap']-1 #ctrl_cap
elif powerstate == 'OFF' and monitor > maxT:
bid_price = 0
no_bid = 1
else:
if monitor < minT:
bid_price = self.aggregator['price_cap']-1 #ctrl_cap
elif monitor > maxT:
bid_price = 0
no_bid = 1
elif self.controller[houseName]['dir'] == 0:
if self.controller[houseName]['use_predictive_bidding'] == 1:
if not(direction):
warnings.warn('the variable direction did not get set correctly')
elif ((monitor > maxT + deadband_shift) or (powerstate != 'OFF' and monitor > minT - deadband_shift)) and direction > 0:
bid_price = self.aggregator['price_cap']-1 #ctrl_cap
elif ((monitor < minT - deadband_shift) or (powerstate != 'OFF' and monitor < maxT + deadband_shift)) and direction < 0:
bid_price = self.aggregator['price_cap']-1 #ctrl_cap
elif powerstate == 'OFF' and monitor > maxT:
bid_price = 0
no_bid = 1
else:
if monitor < minT:
bid_price = self.aggregator['price_cap']-1 #ctrl_cap
elif monitor > maxT:
bid_price = 0
no_bid = 1
else:
bid_price = avgP
# Bidding price when the monitored load temperature is within the controller temp limit
if monitor > setpoint0:
k_T = ramp_high
T_lim = range_high
elif monitor < setpoint0:
k_T = ramp_low
T_lim = range_low
else:
k_T = 0
T_lim = 0
bid_offset = 0.0001
if bid_price < 0 and monitor != setpoint0:
if abs(stdP) < bid_offset:
bid_price = avgP
else:
bid_price = avgP + (monitor - setpoint0)*(k_T * stdP) / abs(T_lim)
elif monitor == setpoint0:
bid_price = avgP
# Update the outputs
if demand > 0 and no_bid != 1:
# Update bid price and quantity
self.controller[houseName]['last_p'] = bid_price
self.controller[houseName]['last_q'] = demand
# Check market unit with controller default unit kW
if (self.aggregator['market_unit']).lower() != "kW":
if (self.aggregator['market_unit']).lower() == "w":
self.controller[houseName]['last_q'] = self.controller[houseName]['last_q']*1000
elif (self.aggregator['market_unit']).lower() == "mw":
self.controller[houseName]['last_q'] = self.controller[houseName]['last_q']/1000
# Update parameters
self.controller_bid[houseName]['market_id'] = self.controller['lastmkt_id']
self.controller_bid[houseName]['bid_price'] = self.controller[houseName]['last_p']
self.controller_bid[houseName]['bid_quantity'] = self.controller[houseName]['last_q']
# Set controller_bid state
self.controller_bid[houseName]['state'] = powerstate
else:
# Update bid price and quantity
self.controller[houseName]['last_p'] = 0
self.controller[houseName]['last_q'] = 0
# Update controller_bid parameters
self.controller_bid[houseName]['market_id'] = 0
self.controller_bid[houseName]['bid_price'] = 0
self.controller_bid[houseName]['bid_quantity'] = 0
## ------------- double_price controller bid calculation -----------------------------------------
else:
# Calculate the temperature used for bid
# ================================================== TEST =========================================================
# monitor = 72.94548906584963
# Ua = 441.77610735085636
# Hm = 4173.792160282198
# Ca = 322.4454408
# Cm = 3042.1163728000006
# MassInternalGainFraction = 0.5
# MassSolarGainFraction = 0.5
# Qi = 6819.0
# Qs = 0.0
# Tout = 76.204
# Tmass = 72.72451992089181
# powerstate = 'OFF'
#
# setpoint0 = 72.04
# avgP = 65
# stdP = 16
# maxT = 77.69579380701853
# minT = 66.31803185749966
# ramp_high = 1.7248877792031796
# range_high = 5.6557938070185285
# ramp_low = 1.7232495193001907
# range_low = -5.721968142500344
# ==================================================TEST =========================================================
# ================================================== PRINT =========================================================
# if (houseName == 'house_3_7B_tm_3_B_7_Meter_95_1104' or houseName == 'house_3_15B_tm_3_B_15_Meter_87_1037'):
# print('House name:', houseName)
# print('House with monitor,Ua, Hm, Ca, Cm, MassInternalGainFraction, MassSolarGainFraction, Qi, Qs, Qh, Tout, monitor, Tmass, deadband, powerstate =', monitor,Ua, Hm, Ca, Cm, MassInternalGainFraction, MassSolarGainFraction, Qi, Qs, Qh, Tout, monitor, Tmass, deadband, powerstate)
# print('House setpoint0, avgP, stdP, k_T, T_lim, maxT, minT, ramp_high, range_high, ramp_low, range_low:', setpoint0, avgP, stdP, k_T, T_lim, maxT, minT, ramp_high, range_high, ramp_low, range_low)
# ================================================== PRINT =========================================================
# T_min = self.calcTemp1(monitor, deadband, powerstate)
T_avg = self.calcTemp2(Ua, Hm, Ca, Cm, MassInternalGainFraction, MassSolarGainFraction, Qi, Qs, Qh, Tout, monitor, Tmass, deadband, powerstate)
# T_avg = (T_min + T_max) / 2.0
# Based on temperature, get the bid price
bid_price = -1
no_bid = 0
if self.controller[houseName]['dir'] > 0:
if self.controller[houseName]['use_predictive_bidding'] == 1:
if powerstate == 'OFF' and T_avg > (maxT - deadband_shift):
bid_price = self.aggregator['price_cap']-1 #ctrl_cap
elif powerstate != 'OFF' and T_avg < (minT + deadband_shift):
bid_price = 0
no_bid = 1
elif powerstate != 'OFF' and T_avg > maxT:
bid_price = self.aggregator['price_cap']-1 #ctrl_cap
elif powerstate == 'OFF' and T_avg < minT:
bid_price = 0
no_bid = 1
else:
if T_avg > maxT:
bid_price = self.aggregator['price_cap']-1 #ctrl_cap
elif T_avg < minT:
bid_price = 0
no_bid = 1
elif self.controller[houseName]['dir'] < 0:
if self.controller[houseName]['use_predictive_bidding'] == 1:
if powerstate == 'OFF' and T_avg < (minT + deadband_shift):
bid_price = self.aggregator['price_cap']-1 #ctrl_cap
elif powerstate != 'OFF' and T_avg > (maxT - deadband_shift):
bid_price = 0
no_bid = 1
elif powerstate != 'OFF' and T_avg < minT:
bid_price = self.aggregator['price_cap']-1 #ctrl_cap
elif powerstate == 'OFF' and T_avg > maxT:
bid_price = 0
no_bid = 1
else:
if T_avg < minT:
bid_price = self.aggregator['price_cap']-1 #ctrl_cap
elif T_avg > maxT:
bid_price = 0
no_bid = 1
elif self.controller[houseName]['dir'] == 0:
if self.controller[houseName]['use_predictive_bidding'] == 1:
if not(direction):
warnings.warn('the variable direction did not get set correctly')
elif ((T_avg > maxT + deadband_shift) or (powerstate != 'OFF' and T_avg > minT - deadband_shift)) and direction > 0:
bid_price = self.aggregator['price_cap']-1 #ctrl_cap
elif ((T_avg < minT - deadband_shift) or (powerstate != 'OFF' and T_avg < maxT + deadband_shift)) and direction < 0:
bid_price = self.aggregator['price_cap']-1 #ctrl_cap
elif powerstate == 'OFF' and T_avg > maxT:
bid_price = 0
no_bid = 1
else:
if T_avg < minT:
bid_price = self.aggregator['price_cap']-1 #ctrl_cap
elif T_avg > maxT:
bid_price = 0
no_bid = 1
else:
bid_price = avgP
# Bidding price when T_avg temperature is within the controller temp limit
if T_avg > setpoint0:
k_T = ramp_high
T_lim = range_high
elif T_avg < setpoint0:
k_T = ramp_low
T_lim = range_low
else:
k_T = 0
T_lim = 0
bid_offset = 0.0001
if bid_price < 0 and T_avg != setpoint0:
if abs(stdP) < bid_offset:
bid_price = avgP
else:
bid_price = avgP + (T_avg - setpoint0)*(k_T * stdP) / abs(T_lim)
elif T_avg == setpoint0:
bid_price = avgP
# Update the outputs (no_bid is not used)
# Update bid price and quantity
# Check bid quantity for each house. If it is never turn on starting from simulation, need to get it bid quantity value
if demand <= 0:
demand = self.house[houseName]['design_cooling_capacity']*0.001/(3.4120*4.07)
#
self.controller[houseName]['last_p'] = bid_price
self.controller[houseName]['last_q'] = demand
# Check market unit with controller default unit kW
if (self.aggregator['market_unit']).lower() != "kW":
if (self.aggregator['market_unit']).lower() == "w":
self.controller[houseName]['last_q'] = self.controller[houseName]['last_q']*1000
elif (self.aggregator['market_unit']).lower() == "mw":
self.controller[houseName]['last_q'] = self.controller[houseName]['last_q']/1000
# Update parameters
self.controller_bid[houseName]['market_id'] = self.controller['lastmkt_id']
self.controller_bid[houseName]['bid_price'] = self.controller[houseName]['last_p']
self.controller_bid[houseName]['bid_quantity'] = self.controller[houseName]['last_q']
# Set controller_bid state
self.controller_bid[houseName]['state'] = powerstate
else:
raise ValueError('Currently only the ramp mode or double_price mode controller is defined')
# Update house last power state
self.house[houseName]['last_pState'] = powerstate
# Display the bid only when bidding quantity if not 0
if self.controller_bid[houseName]['bid_quantity'] > 0 and self.controller_bid[houseName]['bid_price'] > 0.0:
_log.info('At time {0:s}, house {5:s} bids information stored with market_id {1:d}, base setpoint is {9}, monitored temperature is {7} F, average temperature for bidding is {8} F, bidding price is {2:f} $, bidding quantity is {3:f} kW, rebid is {4:d}, state is {6}'.format(self.controller['t1'].strftime("%Y-%m-%d %H:%M:%S"), self.controller_bid[houseName]['market_id'], self.controller_bid[houseName]['bid_price'], self.controller_bid[houseName]['bid_quantity'], self.controller_bid[houseName]['rebid'], self.controller[houseName]['name'], self.house[houseName]['last_pState'], monitor, T_avg, setpoint0))
# Issue a bid, if appropriate
if self.controller_bid[houseName]['bid_quantity'] > 0.0 and self.controller_bid[houseName]['bid_price'] > 0.0:
# Publish the changed setpoints:
# Create a message for all points.
all_message = {'market_id': self.controller_bid[houseName]['market_id'],
'bid_id': self.controller[houseName]['name'],
'price': self.controller_bid[houseName]['bid_price'],
'quantity': self.controller_bid[houseName]['bid_quantity'],
'bid_accepted': no_bid == 0,
'state': self.controller_bid[houseName]['state'],
'rebid': self.controller_bid[houseName]['rebid'],
'bid_name': self.controller[houseName]['name']
}
# pub_topic = 'controller/controller_' + houseName + '/all'
# _log.info('controller agent {0} publishes bids to aggregator with topic: {1}'.format(config['agentid'], pub_topic))
#Create timestamp
# now = datetime.datetime.utcnow().isoformat(' ') + 'Z'
# headers = {
# headers_mod.DATE: now
# }
# self.vip.pubsub.publish('pubsub', pub_topic, headers, all_message)
# Store the publish data temporarily here
self.allHouseBids[houseName] = all_message
# print(' (temp,state,load,avg,std,clear,cap,init)',self.house[houseName]['air_temperature'],self.house[houseName]['power_state'],self.house[houseName]['hvac_load'],self.market['average_price'],self.market['std_dev'],self.market['clear_price'],self.market['price_cap'],self.market['initial_price'])
# print (timeSim, 'Bidding PQSrebid',self.controller_bid[houseName]['bid_price'],self.controller_bid[houseName]['bid_quantity'],self.controller_bid[houseName]['state'],self.controller_bid[houseName]['rebid'])
# Set controller_bid rebid value to true after publishing
self.controller_bid[houseName]['rebid'] = 1
# Publish all houses bids together
if update_bid == True and len(self.allHouseBids) != 0 and self.bid == True:
pub_topic = 'controller/' + config['agentid'] + '/all'
# _log.info('controller agent {0} publishes bids to aggregator with message: {1}'.format(config['agentid'], str(allHouseBids)))
_log.info('controller agent {0} publishes bids to aggregator'.format(config['agentid']))
#Create timestamp
now = datetime.datetime.utcnow().isoformat(' ') + 'Z'
headers = {
headers_mod.DATE: now
}
self.vip.pubsub.publish('pubsub', pub_topic, headers, self.allHouseBids)
self.bid = False # Set it as false until the next market cycle
self.allHouseBids = {}
# ====================Postsync content ===========================
def controller_postsync(self):
''' This method comes from the postsync part of the controller source code in GLD
'''
if self.controller['t1'] >= self.controller['next_run']:
self.controller['next_run'] += datetime.timedelta(0,self.controller['period'])
self.bid = True
for oneHouse in houses:
houseName = oneHouse.keys()
if len(houseName) != 1:
raise ValueError('For each house, more than one house keys are given')
else:
houseName = houseName[0]
# Update last setpoint if setpoint0 changed
# if (self.controller[houseName]['control_mode'] == 'CN_RAMP' or self.controller[houseName]['control_mode'] == 'CN_DOUBLE_PRICE') and self.controller[houseName]['last_setpoint'] != self.controller[houseName]['setpoint0']:
# self.controller[houseName]['last_setpoint'] = self.controller[houseName]['setpoint0']
# Compare t1 with next_run
# if self.controller[houseName]['t1'] < self.controller[houseName]['next_run'] - self.controller[houseName]['bid_delay']:
# postsyncReturn = self.controller[houseName]['next_run'] - self.controller[houseName]['bid_delay']
# return postsyncReturns
#
# if self.controller[houseName]['t1'] - self.controller[houseName]['next_run'] < self.controller[houseName]['bid_delay']:
# postsyncReturn = self.controller[houseName]['next_run']
Agent.__name__ = config['agentid']
return controllerAgent(**kwargs)
def main(argv=sys.argv):
'''Main method called by the eggsecutable.'''
try:
utils.vip_main(controller_agent)
except Exception as e:
print e
_log.exception('unhandled exception')
if __name__ == '__main__':
# Entry point for script
try:
sys.exit(main())
except KeyboardInterrupt:
pass
|
[
"Thomas.McDermott@pnnl.gov"
] |
Thomas.McDermott@pnnl.gov
|
cf25d98d80195e4a85904cdae8c075d9d448de2e
|
e6cf76a21b532a28d5093abe0638e730453b900a
|
/hirednot/hired.py
|
ebf7a21b25e1c6110948c5f2915150e6e7af3612
|
[] |
no_license
|
chinmay002/HiredorNot
|
0b7e326e6cbbbb53ef28c9756c674aaba73a325b
|
18653ec74e29084dc01f43d121f503c4dcdfdacb
|
refs/heads/master
| 2023-03-23T23:47:53.588580 | 2020-08-14T09:31:12 | 2020-08-14T09:31:12 | 284,063,487 | 0 | 0 | null | 2021-03-20T04:59:36 | 2020-07-31T15:04:32 |
HTML
|
UTF-8
|
Python
| false | false | 2,058 |
py
|
import pickle
from flask import Flask,render_template,request
import sklearn
app=Flask(__name__)
model = pickle.load(open('model_upd.pkl','rb'))
@app.route("/")
def home():
return render_template("basic.html")
@app.route("/predict", methods = ["GET", "POST"])
def predict():
if request.method == "POST":
#gender
sex=request.form['gn']
if sex=='M':
sex=1
else:
sex=0
#sslc percentage
sslc=float(request.form['sslcp'])
#PUC percentage
puc=float(request.form['pup'])
#degree percentage
degree=float(request.form['degp'])
#enterance exam
ent=float(request.form['etest'])
#mba percentage
mba=float(request.form['mbap'])
#pucstream
pucstream=request.form['pu_s']
if pucstream=='Commerce':
pucstream=2
elif pucstream=='Science':
pucstream=1
else:
pucstream=0
#Dedree Stream
degstream=request.form['deg_s']
if degstream=='Comm&Mgmt':
degstream=2
elif degstream=='Sci&Tech':
degstream=1
else:
degstream=0
#Work Exp
work_exp=request.form['w_exp']
if work_exp=='Yes':
work_exp=1
else:
work_exp=0
#MBA specialisation
mbas=request.form['mba_s']
if mbas =='Mkt&HR':
mbas=1
else:
mbas=0
prediction=model.predict([[sslc,puc,degree,ent,mba,pucstream,degstream,sex,work_exp,mbas]])
output=prediction[0]
#print(output)
if output=='Placed':
return render_template("basic.html",text="Congrats You are {}!!!!!".format(output))
else:
return render_template("basic.html",text="Sorry!! you'r {} Better luck next time ".format(output))
#return render_template("basic.html")
|
[
"noreply@github.com"
] |
chinmay002.noreply@github.com
|
5d7a0f8526dc63f238a3f6f2206db5b31e8b70c5
|
e999e5618ca462ddcbc41baae4edec91fc92ab87
|
/HNews.py
|
f370b9a173bdca77b5ade0636235f545812172ff
|
[] |
no_license
|
maadpeal/DC_Hnews
|
0b8758168b3103724cb4620439d29bc14930b577
|
48cb5d5d5b7ac2140e679ed21258ab982d32336a
|
refs/heads/master
| 2020-07-29T23:49:35.664557 | 2019-09-21T15:15:19 | 2019-09-21T15:15:19 | 210,006,453 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,042 |
py
|
from csv import reader
import datetime as dt
opened_file = open('hacker_news.csv')
read_file = reader(opened_file)
hn = list(read_file)
ask_posts = []
show_posts = []
other_posts = []
#In this step we're going to separated the publications
for row in hn:
title = row[1].lower()
if title.startswith('ask hn'):
ask_posts.append(row)
elif title.startswith('show hn'):
show_posts.append(row)
else:
other_posts.append(row)
# here we add the comments by each publication
def totalComments(lista, indice):
total_comments = 0
for row in lista:
num_comments = int(row[indice])
total_comments += num_comments
return total_comments
total = totalComments(ask_posts, 4)
avg_ask_comments = total / len(ask_posts)
print(avg_ask_comments)
total_show = totalComments(show_posts, 4)
avg_show_comments = total_show / len(show_posts)
print(avg_show_comments)
result_list = []
for row in ask_posts:
created_at = row[6]
num_comments = int(row[4])
result_list.append([created_at, num_comments])
counts_by_hour = {}
comments_by_hour = {}
for row in result_list:
date, time = row[0].split()
#print(time) #here is the hour
#print(date) #8/28/2016
date = dt.datetime.strptime(date, '%m/%d/%Y')
if time not in counts_by_hour:
counts_by_hour[time] = 1
comments_by_hour[time] = row[1]
else:
counts_by_hour[time] += 1
comments_by_hour[time] += row[1]
avg_by_hour = []
for row in counts_by_hour:
avg_by_hour.append([row, comments_by_hour[row] / counts_by_hour[row]])
swap_avg_by_hour = []
for element in avg_by_hour:
swap_avg_by_hour.append([element[1], element[0]])
sorted_swap = swap_avg_by_hour.sort(reverse = True)
#print(swap_avg_by_hour)
sorted_swap = swap_avg_by_hour
print(sorted_swap[:3])
for lista in sorted_swap:
date = dt.datetime.strptime(lista[1], '%H:%M')
date = dt.datetime.strftime(date, '%H:%M')
string = "{} {:.2f} average comments per post".format(date, lista[0])
print(string)
|
[
"matuteaa@globalhitss.com"
] |
matuteaa@globalhitss.com
|
7c58225e0298f0eb97d46880cdce6e151cba4249
|
3fe710bc9480a938255520fc565a2b0364064df7
|
/test/client.py
|
08fc96c58bc29fbe41f34b4034658886048ae154
|
[] |
no_license
|
jakearmendariz/loadbalancer
|
7c4b6ad4cdcd995a5ca7b42c9ceeb99df0b485a8
|
5cc4e62a2afb5c14459df3482aad0e1af90fb473
|
refs/heads/master
| 2022-12-01T05:51:47.836249 | 2020-08-07T22:30:11 | 2020-08-07T22:30:11 | 271,505,536 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,036 |
py
|
#Load Balancer timer
import time, os, threading, sys
def single_run():
tm = time.time()
command = "curl localhost:1234/httpserver > out"
result = os.system(command)
if result == 0:
return time.time() - tm
else:
return -1
file = open("results."+sys.argv[1], "w+")
def closed_loop():
count = 0
while count < 400/int(sys.argv[1]):
tm = single_run()
count += 1
if tm != -1:
file.write(str(tm) + "\n")
else:
file.write(str(-1)+"\n")
time.sleep(.25)
time.sleep(.2)
concurrency = int(sys.argv[1])
tm = time.time()
thread_array = []
for i in range(concurrency):
thread_array.append(threading.Thread(target=closed_loop))
thread_array[i].start()
for i in range(concurrency):
thread_array[i].join()
print("TOTAL TIME", round(time.time() - tm, 3))
file.close()
#return round(time.time() - tm, 3)
# 2 2.71s user 4.36s system 14% cpu 47.554 total
# 10 3.13s user 5.63s system 48% cpu 17.905 total
|
[
"jakearmendariz99@gmail.com"
] |
jakearmendariz99@gmail.com
|
cc22eb65548658c38744428507dca2a687dfba3f
|
c8dd7d899d0fe203b9bf29ccc3e979b853285fc0
|
/myGUI.py
|
77a7ee1c40c1913b619a5714d6bd566569ebd5f3
|
[] |
no_license
|
HankSeametrics/AutoDocumentation
|
988e79df2d9ca923d4196d88e0889eb882b4dbd1
|
b51803391c1a44f9ca4c98aca57d77f869c16510
|
refs/heads/master
| 2020-06-20T09:36:14.359120 | 2019-07-17T01:49:11 | 2019-07-17T01:49:11 | 197,079,725 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,863 |
py
|
#----------------------------------------------------------------------
# Bench Issue Documentation Generator GUI
''' author: Henry Beck '''
# date: 07/16/2019
# email: hbeck@seametrics.com
#----------------------------------------------------------------------
#----------------------------------------------------------------------
# Revision Log
#
# Rev Date Author Description
#----------------------------------------------------------------------
'''
1 2019/07/16 HTB (1) Initial Release
'''
#----------------------------------------------------------------------
# MODULES
#----------------------------------------------------------------------
import wx
class FrontPanel(wx.Frame):
def __init__(self, parent, title):
super(FrontPanel, self).__init__(parent, title = title,size = (350,250))
panel = wx.Panel(self)
vbox = wx.BoxSizer(wx.VERTICAL)
hbox1 = wx.BoxSizer(wx.HORIZONTAL)
l1 = wx.StaticText(panel, -1, "Text Field")
hbox1.Add(l1, 1, wx.EXPAND|wx.ALIGN_LEFT|wx.ALL,5)
self.t1 = wx.TextCtrl(panel)
hbox1.Add(self.t1,1,wx.EXPAND|wx.ALIGN_LEFT|wx.ALL,5)
self.t1.Bind(wx.EVT_TEXT,self.OnKeyTyped)
vbox.Add(hbox1)
hbox2 = wx.BoxSizer(wx.HORIZONTAL)
l2 = wx.StaticText(panel, -1, "password field")
hbox2.Add(l2, 1, wx.ALIGN_LEFT|wx.ALL,5)
self.t2 = wx.TextCtrl(panel,style = wx.TE_PASSWORD)
self.t2.SetMaxLength(5)
hbox2.Add(self.t2,1,wx.EXPAND|wx.ALIGN_LEFT|wx.ALL,5)
vbox.Add(hbox2)
self.t2.Bind(wx.EVT_TEXT_MAXLEN,self.OnMaxLen)
hbox3 = wx.BoxSizer(wx.HORIZONTAL)
l3 = wx.StaticText(panel, -1, "Multiline Text")
hbox3.Add(l3,1, wx.EXPAND|wx.ALIGN_LEFT|wx.ALL,5)
self.t3 = wx.TextCtrl(panel,size = (200,100),style = wx.TE_MULTILINE)
hbox3.Add(self.t3,1,wx.EXPAND|wx.ALIGN_LEFT|wx.ALL,5)
vbox.Add(hbox3)
self.t3.Bind(wx.EVT_TEXT_ENTER,self.OnEnterPressed)
hbox4 = wx.BoxSizer(wx.HORIZONTAL)
l4 = wx.StaticText(panel, -1, "Read only text")
hbox4.Add(l4, 1, wx.EXPAND|wx.ALIGN_LEFT|wx.ALL,5)
self.t4 = wx.TextCtrl(panel, value = "ReadOnlyText",style = wx.TE_READONLY|wx.TE_CENTER)
hbox4.Add(self.t4,1,wx.EXPAND|wx.ALIGN_LEFT|wx.ALL,5)
vbox.Add(hbox4)
panel.SetSizer(vbox)
self.Centre()
self.Show()
self.Fit()
def OnKeyTyped(self, event):
print event.GetString()
def OnEnterPressed(self,event):
print "Enter pressed"
def OnMaxLen(self,event):
print "Maximum length reached"
if __name__ == '__main__':
app = wx.App()
frm = FrontPanel(None, 'TextCtrl demo')
frm.Show()
app.MainLoop()
|
[
"hbeck@seametrics.com"
] |
hbeck@seametrics.com
|
4ad8afc10d1dee60125ddd7a1ab867ca80fbc3ec
|
e9d37df3750fac7a39bcb4a5ce6e5e22f9487998
|
/1295FindNumbersEven.py
|
01b1f1838923587e4fd5f2bc76e80d14fa6c2f76
|
[
"MIT"
] |
permissive
|
vkaushik189/ltcode_solutions
|
cf985d2b2f47406f2a0cc108f9e127dba2ce9da7
|
035a74ee51d636989aa183e16c6ebb81dfccf74c
|
refs/heads/master
| 2021-07-03T12:50:32.470448 | 2020-08-05T02:44:22 | 2020-08-05T02:44:22 | 132,412,974 | 0 | 0 |
MIT
| 2020-07-23T03:02:15 | 2018-05-07T05:48:32 |
Python
|
UTF-8
|
Python
| false | false | 196 |
py
|
class Solution:
def findNumbers(self, nums: List[int]) -> int:
count = 0
for num in nums:
if len(str(num)) % 2 == 0:
count += 1
return count
|
[
"noreply@github.com"
] |
vkaushik189.noreply@github.com
|
ac26164832c46775f5a84e39ca503cf215374a68
|
98e4f7d2d7e7813d4029acdab76d08161eeac240
|
/tests/utils/test_requirements_utils.py
|
f7162a97b20d3185af1c91ee93a27ee014a8082c
|
[
"Apache-2.0"
] |
permissive
|
dmatrix/mlflow
|
dd013c1bb9dd8cacd163024e42cec37b821ca3ae
|
c2aeec21298fa05006a7a0030651d27ad578fae7
|
refs/heads/master
| 2021-08-29T09:41:36.776475 | 2021-08-17T02:06:47 | 2021-08-17T02:06:47 | 144,213,366 | 2 | 1 |
Apache-2.0
| 2018-08-09T23:30:59 | 2018-08-09T23:30:59 | null |
UTF-8
|
Python
| false | false | 9,122 |
py
|
import os
import sys
import importlib
from unittest import mock
import importlib_metadata
import pytest
import mlflow
from mlflow.utils.requirements_utils import (
_is_comment,
_is_empty,
_is_requirements_file,
_strip_inline_comment,
_join_continued_lines,
_parse_requirements,
_prune_packages,
_strip_local_version_label,
_get_installed_version,
_get_pinned_requirement,
_module_to_packages,
_infer_requirements,
)
def test_is_comment():
assert _is_comment("# comment")
assert _is_comment("#")
assert _is_comment("### comment ###")
assert not _is_comment("comment")
assert not _is_comment("")
def test_is_empty():
assert _is_empty("")
assert not _is_empty(" ")
assert not _is_empty("a")
def test_is_requirements_file():
assert _is_requirements_file("-r req.txt")
assert _is_requirements_file("-r req.txt")
assert _is_requirements_file("--requirement req.txt")
assert _is_requirements_file("--requirement req.txt")
assert not _is_requirements_file("req")
def test_strip_inline_comment():
assert _strip_inline_comment("aaa # comment") == "aaa"
assert _strip_inline_comment("aaa # comment") == "aaa"
assert _strip_inline_comment("aaa # comment") == "aaa"
assert _strip_inline_comment("aaa # com1 # com2") == "aaa"
# Ensure a URI fragment is not stripped
assert (
_strip_inline_comment("git+https://git/repo.git#subdirectory=subdir")
== "git+https://git/repo.git#subdirectory=subdir"
)
def test_join_continued_lines():
assert list(_join_continued_lines(["a"])) == ["a"]
assert list(_join_continued_lines(["a\\", "b"])) == ["ab"]
assert list(_join_continued_lines(["a\\", "b\\", "c"])) == ["abc"]
assert list(_join_continued_lines(["a\\", " b"])) == ["a b"]
assert list(_join_continued_lines(["a\\", " b\\", " c"])) == ["a b c"]
assert list(_join_continued_lines(["a\\", "\\", "b"])) == ["ab"]
assert list(_join_continued_lines(["a\\", "b", "c\\", "d"])) == ["ab", "cd"]
assert list(_join_continued_lines(["a\\", "", "b"])) == ["a", "b"]
assert list(_join_continued_lines(["a\\"])) == ["a"]
assert list(_join_continued_lines(["\\", "a"])) == ["a"]
def test_parse_requirements(request, tmpdir):
"""
Ensures `_parse_requirements` returns the same result as `pip._internal.req.parse_requirements`
"""
from pip._internal.req import parse_requirements as pip_parse_requirements
from pip._internal.network.session import PipSession
root_req_src = """
# No version specifier
noverspec
no-ver-spec
# Version specifiers
verspec<1.0
ver-spec == 2.0
# Environment marker
env-marker; python_version < "3.8"
inline-comm # Inline comment
inlinecomm # Inline comment
# Git URIs
git+https://github.com/git/uri
git+https://github.com/sub/dir#subdirectory=subdir
# Requirements files
-r {relative_req}
--requirement {absolute_req}
# Constraints files
-c {relative_con}
--constraint {absolute_con}
# Line continuation
line-cont\
==\
1.0
# Line continuation with spaces
line-cont-space \
== \
1.0
# Line continuation with a blank line
line-cont-blank\
# Line continuation at EOF
line-cont-eof\
""".strip()
try:
os.chdir(tmpdir)
root_req = tmpdir.join("requirements.txt")
# Requirements files
rel_req = tmpdir.join("relative_req.txt")
abs_req = tmpdir.join("absolute_req.txt")
# Constraints files
rel_con = tmpdir.join("relative_con.txt")
abs_con = tmpdir.join("absolute_con.txt")
# pip's requirements parser collapses an absolute requirements file path:
# https://github.com/pypa/pip/issues/10121
# As a workaround, use a relative path on Windows.
absolute_req = abs_req.basename if os.name == "nt" else abs_req.strpath
absolute_con = abs_con.basename if os.name == "nt" else abs_con.strpath
root_req.write(
root_req_src.format(
relative_req=rel_req.basename,
absolute_req=absolute_req,
relative_con=rel_con.basename,
absolute_con=absolute_con,
)
)
rel_req.write("rel-req-xxx\nrel-req-yyy")
abs_req.write("abs-req-zzz")
rel_con.write("rel-con-xxx\nrel-con-yyy")
abs_con.write("abs-con-zzz")
expected_cons = [
"rel-con-xxx",
"rel-con-yyy",
"abs-con-zzz",
]
expected_reqs = [
"noverspec",
"no-ver-spec",
"verspec<1.0",
"ver-spec == 2.0",
'env-marker; python_version < "3.8"',
"inline-comm",
"inlinecomm",
"git+https://github.com/git/uri",
"git+https://github.com/sub/dir#subdirectory=subdir",
"rel-req-xxx",
"rel-req-yyy",
"abs-req-zzz",
"line-cont==1.0",
"line-cont-space == 1.0",
"line-cont-blank",
"line-cont-eof",
]
parsed_reqs = list(_parse_requirements(root_req.basename, is_constraint=False))
pip_reqs = list(pip_parse_requirements(root_req.basename, session=PipSession()))
# Requirements
assert [r.req_str for r in parsed_reqs if not r.is_constraint] == expected_reqs
assert [r.requirement for r in pip_reqs if not r.constraint] == expected_reqs
# Constraints
assert [r.req_str for r in parsed_reqs if r.is_constraint] == expected_cons
assert [r.requirement for r in pip_reqs if r.constraint] == expected_cons
finally:
os.chdir(request.config.invocation_dir)
def test_prune_packages():
assert _prune_packages(["mlflow"]) == {"mlflow"}
assert _prune_packages(["mlflow", "packaging"]) == {"mlflow"}
assert _prune_packages(["mlflow", "scikit-learn"]) == {"mlflow", "scikit-learn"}
def test_capture_imported_modules():
from mlflow.utils._capture_modules import _CaptureImportedModules
with _CaptureImportedModules() as cap:
# pylint: disable=unused-import,unused-variable
import math
__import__("pandas")
importlib.import_module("numpy")
assert "math" in cap.imported_modules
assert "pandas" in cap.imported_modules
assert "numpy" in cap.imported_modules
def test_strip_local_version_label():
assert _strip_local_version_label("1.2.3") == "1.2.3"
assert _strip_local_version_label("1.2.3+ab") == "1.2.3"
assert _strip_local_version_label("1.2.3rc0+ab") == "1.2.3rc0"
assert _strip_local_version_label("1.2.3.dev0+ab") == "1.2.3.dev0"
assert _strip_local_version_label("1.2.3.post0+ab") == "1.2.3.post0"
assert _strip_local_version_label("invalid") == "invalid"
def test_get_installed_version(tmpdir):
import numpy as np
import pandas as pd
import sklearn
assert _get_installed_version("mlflow") == mlflow.__version__
assert _get_installed_version("numpy") == np.__version__
assert _get_installed_version("pandas") == pd.__version__
assert _get_installed_version("scikit-learn", module="sklearn") == sklearn.__version__
not_found_package = tmpdir.join("not_found.py")
not_found_package.write("__version__ = '1.2.3'")
sys.path.insert(0, tmpdir.strpath)
with pytest.raises(importlib_metadata.PackageNotFoundError):
importlib_metadata.version("not_found")
assert _get_installed_version("not_found") == "1.2.3"
def test_get_pinned_requirement(tmpdir):
assert _get_pinned_requirement("mlflow") == f"mlflow=={mlflow.__version__}"
assert _get_pinned_requirement("mlflow", version="1.2.3") == "mlflow==1.2.3"
not_found_package = tmpdir.join("not_found.py")
not_found_package.write("__version__ = '1.2.3'")
sys.path.insert(0, tmpdir.strpath)
with pytest.raises(importlib_metadata.PackageNotFoundError):
importlib_metadata.version("not_found")
assert _get_pinned_requirement("not_found") == "not_found==1.2.3"
def test_get_pinned_requirement_local_version_label(tmpdir):
package = tmpdir.join("my_package.py")
lvl = "abc.def.ghi" # Local version label
package.write(f"__version__ = '1.2.3+{lvl}'")
sys.path.insert(0, tmpdir.strpath)
with mock.patch("mlflow.utils.requirements_utils._logger.warning") as mock_warning:
req = _get_pinned_requirement("my_package")
mock_warning.assert_called_once()
(first_pos_arg,) = mock_warning.call_args[0]
assert first_pos_arg.startswith(
f"Found my_package version (1.2.3+{lvl}) contains a local version label (+{lvl})."
)
assert req == "my_package==1.2.3"
def test_infer_requirements_excludes_mlflow():
with mock.patch(
"mlflow.utils.requirements_utils._capture_imported_modules",
return_value=["mlflow", "pytest"],
):
mlflow_package = "mlflow-skinny" if "MLFLOW_SKINNY" in os.environ else "mlflow"
assert mlflow_package in _module_to_packages("mlflow")
assert _infer_requirements("path/to/model", "sklearn") == [f"pytest=={pytest.__version__}"]
|
[
"noreply@github.com"
] |
dmatrix.noreply@github.com
|
4e2d2adc4f5b779288b87fd7bdab986597cd7602
|
f4a9d5937d2dde264a61a53e6b5c79f25bd12c28
|
/a1.py
|
ed7ea5edb284de92a14914a12a6e2422fe517b34
|
[] |
no_license
|
iloveinter/test
|
a48b2e88454aa541968303fac86be43eefc21e5c
|
3fda1f53112b5acd21c9226e13e0a65e826b4af3
|
refs/heads/master
| 2020-06-25T11:47:12.339544 | 2019-07-29T03:28:40 | 2019-07-29T03:28:40 | 199,299,980 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 42 |
py
|
x = 4
y = 7
for i in [5,6]
z = x + i
|
[
"zhanghouyu@vip.126.com"
] |
zhanghouyu@vip.126.com
|
0ef410590b36a0a2e7c27fb05736d468f2304c6f
|
fe90fafd4068434e02fe7914c9c075d15bfacab3
|
/app/adminServer/adminServer/wsgi.py
|
3d19d4b4c350d91ca76da8e93ad020c9e945bbe1
|
[] |
no_license
|
3d1th3/SSMainServer
|
a79d248cc3d6646fdc0d16a46e41681366e6327f
|
a4d73448a9f08f3a93897eac8547ef887d157319
|
refs/heads/master
| 2023-06-04T11:15:27.403988 | 2021-06-03T16:42:04 | 2021-06-03T16:42:04 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 399 |
py
|
"""
WSGI config for adminServer project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'adminServer.settings')
application = get_wsgi_application()
|
[
"cachecuas1@gmail.com"
] |
cachecuas1@gmail.com
|
8cb517e17f1399beeb39c3f9372acbde15424e42
|
51c8fd9ad4549bab9af4b607cb876310ee6050f0
|
/hidden_node.py
|
96a5bcd43c5737fa8c7f5519266898ceee80384b
|
[] |
no_license
|
JacobPimental/trolleybot
|
81669dba4e0fbad002e490e1734b069d5036ff8f
|
c77e3365073bd83ea48d02d685f2ccb4fdeea5c6
|
refs/heads/master
| 2021-01-23T01:01:26.066505 | 2017-04-19T18:16:22 | 2017-04-19T18:16:22 | 85,862,383 | 1 | 0 | null | 2017-03-31T21:34:30 | 2017-03-22T18:27:02 |
Python
|
UTF-8
|
Python
| false | false | 672 |
py
|
class HiddenNode:
synapses = []
act_func = None
data = 0.0
act_data = 0.0
def __init__(self, synapses, act_func):
self.act_func = act_func
self.synapses = synapses
def retrieve_data(self, data):
self.data += data
self.act_data = self.act_func( self.data )
def pass_through(self):
for s in self.synapses:
s.pass_data( self.act_data )
def __repr__(self):
return "Hidden Node:\n\tData: " + str(self.act_data) + "\n\tSynapses " + str(self.synapses)
def updateSynapses(self, delta):
for s in range( len( delta ) ):
self.synapses[s].weight += delta[s]
|
[
"jacob16682@gmail.com"
] |
jacob16682@gmail.com
|
4a8fb408dfd1ca0f114e51d0ba49e0064d154c51
|
3e17a2cb4757143806efc2bbcfb66a7221a103a6
|
/algoritmo.py
|
26e1fdf78df4f82cb289bd981ff988c48a5de519
|
[] |
no_license
|
ibioesc/Algoritmos
|
3387aab670a8fb243e2b78ebbcfa2be152079363
|
5b3de0b7fd4c63ca2bf9043bd012a767eeb33a04
|
refs/heads/master
| 2023-06-07T02:49:36.909005 | 2021-06-26T16:26:34 | 2021-06-26T16:26:34 | 380,533,843 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 450 |
py
|
def canBeSplited(vec):
try:
lis = vec
left = sum(lis[:len(lis)//2])
right = sum(lis[len(lis)//2:])
if len(lis) == 0 and left == right:
return print(0)
elif left == right:
return print(1)
else:
return print(-1)
except (TypeError, NameError):
print('¡No es un tipo de datos correcto!')
vect = [1,3,3,8,4,3,2,3,3]
canBeSplited(vect)
|
[
"ibiotec30@gmail.com"
] |
ibiotec30@gmail.com
|
c0f6fee505ed37fe55dbc1ab1c29ce732cbbde75
|
8596de9a476244adde04e775096835b1b24c15a4
|
/rrc/migrations/0002_rrc_user.py
|
7553cbb24477481bc4a810e057322d112e988510
|
[] |
no_license
|
freeznet2012/mini
|
10cc3e4ccec08835aaebf15788332e571f9b7683
|
ee89b9e05d5d14d8661be40c8624964e907e85cb
|
refs/heads/master
| 2021-01-23T06:01:13.196614 | 2017-04-06T15:27:25 | 2017-04-06T15:27:25 | 86,328,139 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 637 |
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-03-01 20:25
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('rrc', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='rrc',
name='user',
field=models.OneToOneField(default='', on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
]
|
[
"praveenbenny007@gmail.com"
] |
praveenbenny007@gmail.com
|
2ee682a7aeb5da3c8da1a515f601d0cfcd6425e1
|
85d3c54d7de44e06ffc00c3c891e4651765316e4
|
/DataInfo.py
|
3f857ae8e36f7171d0b0e350d9c58c903716b48a
|
[] |
no_license
|
fernando801/DataScienceCourse
|
c7bfd00250c4da5532d9c120f680cd9920a679b3
|
fe8eabe7351b94e40a4653db758f07276f5f4be6
|
refs/heads/main
| 2023-05-15T09:40:19.826443 | 2021-06-02T02:08:43 | 2021-06-02T02:08:43 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,024 |
py
|
def getstatfunction(name,df):
if name == 'Name':
return lambda column:column
elif name == 'Mean':
return lambda column:df[column].mean()
elif name == 'Median':
return lambda column:df[column].median()
elif name == 'Range':
return lambda column:df[column].max()-df[column].min()
elif name == 'Variance':
return lambda column:df[column].var()
elif name == 'Std. Dev':
return lambda column:df[column].std()
def printRow(name,columnformat,df,columns):
stat = getstatfunction(name,df)
print("{:20s} ".format(name), end="")
for i in columns:
print(columnformat.format(stat(i)), end="")
print('')
def getDataInfo(df,columns):
printRow('Name',"{:>20s} ",df,columns)
printRow('Mean',"{:20.4f} ",df,columns)
printRow('Median',"{:20.4f} ",df,columns)
printRow('Range',"{:20.4f} ",df,columns)
printRow('Variance',"{:20.4f} ",df,columns)
printRow('Std. Dev',"{:20.4f} ",df,columns)
|
[
"resendizbautista801@gmail.com"
] |
resendizbautista801@gmail.com
|
7c6459642e6c2d0c2421f04964632102c8d273d6
|
bf3b3bcf36b13ea0b2ebc3f8dfd5ceaa1fbd6ad4
|
/Split_rename-employee-pdfs/split_rename-employee-pdfs.py
|
4c583f300737c3cca922ca611e895a0d01610d1b
|
[] |
no_license
|
dankaygit/random-stuff
|
2fb9ead376e9ea61abd015c2cb57053260ff39ca
|
f24c8bd86a560f49e9f1a325610131a6ab860e6c
|
refs/heads/main
| 2023-06-02T16:11:07.120908 | 2021-06-24T12:19:55 | 2021-06-24T12:19:55 | 379,861,117 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,736 |
py
|
#!/usr/bin/env python
import os
from PyPDF2 import PdfFileWriter, PdfFileReader
# Does this script run with user input or not?
# If yes, set to True.
interactive = False
#Info about the PDF we need to edit
## Get all pdfs from the "source" directory and put the file names in a list called input_files
input_files = [file for file in os.listdir("source") if file.endswith(".pdf")]
## Delete the ".pdf" extension for every file name so we can now extract the available info from each file name
files = [file.strip(".pdf") for file in input_files]
## Split up the name by separating everything before and after each "_". This returns a list of split up strings for each initial string.
## Here it's important that we already have an idea of what our input will look like. In our sample files we have the format FileName_Year_Month_IDN.pdf. Assuming this to be our input, we can extract year and month from our files.
files = [file.split("_") for file in files]
# Either get the employee names interactively (through user input) or extract them from some saved file called employees.txt (since there is just quick and dirty, we assume a txt file with one employee name per line, no tabs or ws.) It's important that the employee names come up in the same order as they are in the input pdf files.
employees = []
sep = "_"
## Extract all names from the file and clean up line breaks
if not interactive:
with open("source/employees.txt", "r") as file:
lines = file.readlines()
lines = [line.strip("\n") for line in lines]
employees = lines
## Clean up empty lines, so we don't have "employees" which have no name due to empty lines in the file.
employees = [employee for employee in employees if employee not in ["\n", "", " "]]
else:
print("Please input in one line all employees in the order as they appear in the the input document, separated by a " + sep + " \nFor example: Bob" + sep + "Alice" + sep + "Brian" + sep + "Dora" + sep + "Charlie \n")
print("Please input all employees as described above: ")
employees = input()
employees = employees.split(sep)
print(employees)
## Here we start the splitting process.
## We take each input file from input_files and create subdirectories in a new "output" based on the file name
for input_file in input_files:
fileName = input_file.strip(".pdf")
output_dir = os.getcwd() + "/output/" + fileName + "/"
if fileName in os.listdir("output"):
raise Exception ("It seems like " + fileName + " has already been split. To avoid inadvertent data loss, please either remove the input pdf file from the source folder or the " + fileName + " subfolder from the output folder and run the script again.")
os.makedirs(output_dir)
fileName = fileName.split("_")
#Date (we now from line 17-19, where to find year and month in the fileName list of Strings)
year = fileName[1]
month = fileName[2]
# Now stitch the name of the new (split) pdf files together, with the employee names extracted from the employee.txt file
output_names = [year + sep + month + sep + employee for employee in employees]
#Splitting the PDF and writing new files.
inputPdf = PdfFileReader("source/" + input_file, "rb")
if inputPdf.getNumPages() != len(employees):
raise Exception("Check that number of pages in input pdf equals number of employees")
else:
for i in range(len(output_names)):
output = PdfFileWriter()
output.addPage(inputPdf.getPage(i))
with open(output_dir + "/" + output_names[i] + ".pdf", "wb") as outputStream:
output.write(outputStream)
outputStream.close()
|
[
"dankaygit@gmail.com"
] |
dankaygit@gmail.com
|
af66f3e9667cc2d7a9aca8543be26bbdbeffb849
|
af9c0aafa10b7901533de0b32177ab80b4782d3f
|
/notes/code/youtube/comments_one_video.py
|
0ae8f2715bd2cd2765d7e2162e6561247db18f41
|
[
"MIT"
] |
permissive
|
Akramz/msds692
|
d1d33298b7599950e95838c0fc9ddbd47a98ed5b
|
42f4c2a0dc7569152bac2439e9b6385f2f101f7b
|
refs/heads/master
| 2023-01-25T00:44:11.197544 | 2020-12-05T22:05:14 | 2020-12-05T22:05:14 | 319,362,758 | 1 | 0 |
MIT
| 2020-12-07T15:31:12 | 2020-12-07T15:31:11 | null |
UTF-8
|
Python
| false | false | 708 |
py
|
import sys
from googleapiclient.discovery import build
DEVELOPER_KEY = sys.argv[1]
YOUTUBE_API_SERVICE_NAME = "youtube"
YOUTUBE_API_VERSION = "v3"
video_id = "gU_gYzwTbYQ" # bonkers the cat
# code from https://developers.google.com/youtube/v3/docs/comments/list
youtube = build(YOUTUBE_API_SERVICE_NAME, YOUTUBE_API_VERSION, developerKey=DEVELOPER_KEY)
results = youtube.commentThreads().list(
part="snippet",
videoId=video_id,
textFormat="plainText"
).execute()
for item in results["items"]:
comment = item["snippet"]["topLevelComment"]
author = comment["snippet"]["authorDisplayName"]
text = comment["snippet"]["textDisplay"]
print("Comment by %s: %s" % (author, text))
|
[
"parrt@cs.usfca.edu"
] |
parrt@cs.usfca.edu
|
5efe2bfb891bf6d1fce5182ffa9a94977080f203
|
16241fa0befde2fb5baf09b81399a070df4024d2
|
/GridWorld/trainer.py
|
dae4adf1f1b29de21a6910c8690cd636aa7c2466
|
[] |
no_license
|
LorenzoPinto04/RL-Transport
|
e5365143186e0197ddba6f5551ea4f9fa69d8d54
|
465a230b74eaacb5967829bbd6f5b7cbf0b135e0
|
refs/heads/master
| 2021-04-23T04:16:51.512977 | 2020-04-20T08:55:12 | 2020-04-20T08:55:12 | 249,896,887 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 14,083 |
py
|
from functions import *
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
import os
os.environ["CUDA_VISIBLE_DEVICES"]="3"
import random
import gym
import numpy as np
from collections import deque
from keras.models import Model, load_model
from keras.layers import Input, Dense, Lambda, Add, Conv2D, Flatten
from keras.optimizers import Adam, RMSprop
from keras import backend as K
import cv2
from matplotlib import pyplot as plt
import pylab
from pylab import rcParams
rcParams['figure.figsize'] = 20, 10
from environment import GridWorld
def OurModel(input_shape, action_space, dueling):
X_input = Input(input_shape)
X = X_input
#X = Conv2D(64, 5, strides=(3, 3),padding="valid", input_shape=input_shape, activation="relu", data_format="channels_first")(X)
X = Conv2D(32, 8, strides=(4, 4),padding="valid", input_shape=input_shape, activation="relu", data_format="channels_first")(X)
X = Conv2D(64, 4, strides=(2, 2),padding="valid", activation="relu", data_format="channels_first")(X)
X = Conv2D(64, 3, strides=(1, 1),padding="valid", activation="relu", data_format="channels_first")(X)
X = Flatten()(X)
# 'Dense' is the basic form of a neural network layer
X = Dense(512, activation="relu", kernel_initializer='he_uniform')(X)
X = Dense(256, activation="relu", kernel_initializer='he_uniform')(X)
X = Dense(64, activation="relu", kernel_initializer='he_uniform')(X)
if dueling:
state_value = Dense(1, kernel_initializer='he_uniform')(X)
state_value = Lambda(lambda s: K.expand_dims(s[:, 0], -1), output_shape=(action_space,))(state_value)
action_advantage = Dense(action_space, kernel_initializer='he_uniform')(X)
action_advantage = Lambda(lambda a: a[:, :] - K.mean(a[:, :], keepdims=True), output_shape=(action_space,))(action_advantage)
X = Add()([state_value, action_advantage])
else:
# Output Layer with # of actions: 2 nodes (left, right)
X = Dense(action_space, activation="linear", kernel_initializer='he_uniform')(X)
model = Model(inputs = X_input, outputs = X)
#model.compile(loss="mean_squared_error", optimizer=RMSprop(lr=0.00025, rho=0.95, epsilon=0.01), metrics=["accuracy"])
model.compile(optimizer=Adam(lr=0.00025), loss='mean_squared_error')
#model.compile(optimizer=Adam(lr=0.00005), loss='mean_squared_error')
model.summary()
return model
class DQNAgent:
def __init__(self, env_name, env):
self.env_name = env_name
self.env = env
self.action_size = 8
self.EPISODES = 1000000
# Instantiate memory
memory_size = 25000
self.MEMORY = Memory(memory_size)
self.memory = deque(maxlen=memory_size)
self.gamma = 0.99 # discount rate
# EXPLORATION HYPERPARAMETERS for epsilon and epsilon greedy strategy
self.epsilon = 1.0 # exploration probability at start
self.epsilon_min = 0.02 # minimum exploration probability
self.epsilon_decay = 0.00002 # exponential decay rate for exploration prob
self.batch_size = 32
# defining model parameters
self.ddqn = False # use doudle deep q network
self.dueling = False # use dealing netowrk
self.epsilon_greedy = False # use epsilon greedy strategy
self.USE_PER = False # use priority experienced replay
self.Save_Path = 'models'
if not os.path.exists(self.Save_Path): os.makedirs(self.Save_Path)
self.scores, self.episodes, self.average = [], [], []
self.Model_name = os.path.join(self.Save_Path, self.env_name+"_CNN.h5")
self.ROWS = 50
self.COLS = 80
self.REM_STEP = 4
self.update_model_steps = 1000
self.state_size = (self.REM_STEP, self.ROWS, self.COLS)
self.image_memory = np.zeros(self.state_size)
# create main model and target model
self.model = OurModel(input_shape=self.state_size, action_space = self.action_size, dueling = self.dueling)
self.target_model = OurModel(input_shape=self.state_size, action_space = self.action_size, dueling = self.dueling)
# after some time interval update the target model to be same with model
def update_target_model(self, game_steps):
if game_steps % self.update_model_steps == 0:
self.target_model.set_weights(self.model.get_weights())
return
def remember(self, state, action, reward, next_state, done):
experience = state, action, reward, next_state, done
if self.USE_PER:
self.MEMORY.store(experience)
else:
self.memory.append((experience))
def act(self, state, decay_step):
# EPSILON GREEDY STRATEGY
if self.epsilon_greedy:
# Here we'll use an improved version of our epsilon greedy strategy for Q-learning
explore_probability = self.epsilon_min + (self.epsilon - self.epsilon_min) * np.exp(-self.epsilon_decay * decay_step)
# OLD EPSILON STRATEGY
else:
if self.epsilon > self.epsilon_min:
self.epsilon *= (1-self.epsilon_decay)
explore_probability = self.epsilon
if explore_probability > np.random.rand():
# Make a random action (exploration)
return random.randrange(self.action_size), explore_probability
else:
# Get action from Q-network (exploitation)
# Estimate the Qs values state
# Take the biggest Q value (= the best action)
return np.argmax(self.model.predict(state)), explore_probability
def replay(self):
if self.USE_PER:
# Sample minibatch from the PER memory
tree_idx, minibatch = self.MEMORY.sample(self.batch_size)
else:
if len(self.memory) > self.batch_size:
# Randomly sample minibatch from the deque memory
minibatch = random.sample(self.memory, self.batch_size)
else:
return
state = np.zeros((self.batch_size, *self.state_size), dtype=np.float32)
action = np.zeros(self.batch_size, dtype=np.int32)
reward = np.zeros(self.batch_size, dtype=np.float32)
next_state = np.zeros((self.batch_size, *self.state_size), dtype=np.float32)
done = np.zeros(self.batch_size, dtype=np.uint8)
# do this before prediction
# for speedup, this could be done on the tensor level
# but easier to understand using a loop
for i in range(len(minibatch)):
state[i], action[i], reward[i], next_state[i], done[i] = minibatch[i]
# do batch prediction to save speed
# predict Q-values for starting state using the main network
target = self.model.predict(state)
target_old = np.array(target)
# predict best action in ending state using the main network
target_next = self.model.predict(next_state)
# predict Q-values for ending state using the target network
target_val = self.target_model.predict(next_state)
for i in range(len(minibatch)):
# correction on the Q value for the action used
if done[i]:
target[i][action[i]] = reward[i]
else:
# the key point of Double DQN
# selection of action is from model
# update is from target model
if self.ddqn: # Double - DQN
# current Q Network selects the action
# a'_max = argmax_a' Q(s', a')
a = np.argmax(target_next[i])
# target Q Network evaluates the action
# Q_max = Q_target(s', a'_max)
target[i][action[i]] = reward[i] + self.gamma * target_val[i][a]
else: # Standard - DQN
# DQN chooses the max Q value among next actions
# selection and evaluation of action is on the target Q Network
# Q_max = max_a' Q_target(s', a')
# when using target model in simple DQN rules, we get better performance
target[i][action[i]] = reward[i] + self.gamma * np.amax(target_val[i])
if self.USE_PER:
indices = np.arange(self.batch_size, dtype=np.int32)
absolute_errors = np.abs(target_old[indices, action]-target[indices, action])
# Update priority
self.MEMORY.batch_update(tree_idx, absolute_errors)
# Train the Neural Network with batches
self.model.fit(state, target, batch_size=self.batch_size, verbose=0)
def load(self, name):
self.model.load_weights(name, by_name=False)
def save(self, name):
model = self.model
model.save_weights(name)
pylab.figure(figsize=(18, 9))
plt.figure(figsize=(18,9))
def PlotModel(self, score, episode):
self.scores.append(score)
self.episodes.append(episode)
self.average.append(sum(self.scores[-50:]) / len(self.scores[-50:]))
pylab.plot(self.episodes, self.scores, 'b')
pylab.plot(self.episodes, self.average, 'r')
pylab.ylabel('Score', fontsize=18)
pylab.xlabel('Games', fontsize=18)
dqn = 'DQN_'
dueling = ''
greedy = ''
PER = ''
if self.ddqn: dqn = '_DDQN'
if self.dueling: dueling = '_Dueling'
if self.epsilon_greedy: greedy = '_Greedy'
if self.USE_PER: PER = '_PER'
try:
if not os.path.exists('training_images'): os.makedirs('training_images')
pylab.savefig('training_images/'+self.env_name+dqn+dueling+greedy+PER+"_CNN.png")
except OSError as e:
pass
# no need to worry about model, when doing a lot of experiments
self.Model_name = os.path.join(self.Save_Path, self.env_name+dqn+dueling+greedy+PER+"_CNN.h5")
return self.average[-1]
def imshow(self, image, rem_step=0):
cv2.imshow("cartpole"+str(rem_step), image[rem_step,...])
if cv2.waitKey(25) & 0xFF == ord("q"):
cv2.destroyAllWindows()
return
def GetImage(self, frame):
# push our data by 1 frame, similar as deq() function work
self.image_memory = np.roll(self.image_memory, 1, axis = 0)
# inserting new frame to free space
self.image_memory[0,:,:] = frame
# show image frame
#self.imshow(self.image_memory,0)
#self.imshow(self.image_memory,1)
#self.imshow(self.image_memory,2)
#self.imshow(self.image_memory,3)
return np.expand_dims(self.image_memory, axis=0)
def reset(self):
frame = self.env.reset()
for i in range(self.REM_STEP):
state = self.GetImage(frame)
return state
def step(self,action):
info = None
state, reward, total_reward, next_state, done = self.env.step(action)
next_state = self.GetImage(next_state)
return next_state, reward, done, info
def run(self):
decay_step = 0
max_average = -100000.0
for e in range(self.EPISODES):
state = self.reset()
done = False
score = 0
SAVING = ''
while not done:
decay_step += 1
action, explore_probability = self.act(state, decay_step)
next_state, reward, done, _ = self.step(action)
'''
if reward != 0.0:
print('States -------------------------------------------------------------------------')
plt.imshow(state[0][0])
plt.show()
plt.imshow(state[0][1])
plt.show()
plt.imshow(state[0][2])
plt.show()
plt.imshow(state[0][3])
plt.show()
print('Action:', action)
print('Reward:', reward)
'''
self.remember(state, action, reward, next_state, done)
state = next_state
score += reward
if done:
# every episode, plot the result
average = self.PlotModel(score, e)
# saving best models
if average >= max_average:
max_average = average
self.save(self.Model_name)
SAVING = "SAVING"
else:
SAVING = ""
print("episode: {}/{}, score: {}, e: {:.2f}, average: {:.2f} {}".format(e, self.EPISODES, score, explore_probability, average, SAVING))
# update target model
self.update_target_model(decay_step)
# train model
self.replay()
# close environemnt when finish training
#self.env.close()
def test(self, Model_name):
import time
self.load(Model_name)
for e in range(self.EPISODES):
state = self.reset()
done = False
score = 0
while not done:
time.sleep(.01)
action = np.argmax(self.model.predict(state))
state, reward, total_reward, next_state, done = self.env.step(action)
state = self.GetImage(next_state)
score += reward
if done:
print("episode: {}/{}, score: {}".format(e, self.EPISODES, score))
break
#self.env.close()
debug_mode = False
show_graph_every = False
means = False
env = GridWorld(show_graph_every, debug_mode, means)
if __name__ == "__main__":
env_name = 'GridWorld'
agent = DQNAgent(env_name, env)
agent.run()
#agent.test('models/GridWorldDQN__CNN.h5')
|
[
"lorenzo-pinto@hotmail.it"
] |
lorenzo-pinto@hotmail.it
|
cba3377a4bbbe906d3d124243bae0ea0f4854a4b
|
ad3b71aca0cc0b897b8d0bee6e5f4189553754ca
|
/src/ralph/scan/facts.py
|
6080abec40aa74e6f9208d995080ee400d886961
|
[
"Apache-2.0"
] |
permissive
|
quamilek/ralph
|
4c43e3168f469045f18114f10779ee63d8ac784d
|
bf7231ea096924332b874718b33cd1f43f9c783b
|
refs/heads/master
| 2021-05-23T07:57:34.864521 | 2014-05-27T08:51:32 | 2014-05-27T08:51:32 | 5,523,714 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 19,267 |
py
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import hashlib
import re
from lck.django.common.models import MACAddressField
from ralph.discovery.hardware import normalize_wwn
from ralph.discovery.models import (
DISK_PRODUCT_BLACKLIST,
DISK_VENDOR_BLACKLIST,
DeviceType,
MAC_PREFIX_BLACKLIST,
SERIAL_BLACKLIST,
)
from ralph.discovery.models_component import cores_from_model
from ralph.scan.lshw import parse_lshw, handle_lshw_storage
from ralph.scan.lshw import Error as LshwError
from ralph.util import network, uncompress_base64_data, units
SMBIOS_BANNER = 'ID SIZE TYPE'
DENSE_SPEED_REGEX = re.compile(r'(\d+)\s*([GgHhKkMmZz]+)')
_3WARE_GENERAL_REGEX = re.compile(r'tw_([^_]+_[^_]+)_([^_]+)')
SMARTCTL_REGEX = re.compile(r'smartctl_([^_]+)__(.+)')
HPACU_GENERAL_REGEX = re.compile(r'hpacu_([^_]+)__(.+)')
HPACU_LOGICAL_PHYSICAL_REGEX = re.compile(r'([^_]+)__(.+)')
MEGARAID_REGEX = re.compile(r'megacli_([^_]+)_([^_]+)__(.+)')
INQUIRY_REGEXES = (
re.compile(r'^(?P<vendor>OCZ)-(?P<sn>[a-zA-Z0-9]{16})OCZ-(?P<product>\S+)\s+.*$'),
re.compile(r'^(?P<vendor>(FUJITSU|TOSHIBA))\s+(?P<product>[a-zA-Z0-9]+)\s+(?P<sn>[a-zA-Z0-9]{16})$'),
re.compile(r'^(?P<vendor>SEAGATE)\s+(?P<product>ST[^G]+G)(?P<sn>[a-zA-Z0-9]+)$'),
re.compile(r'^(?P<vendor>SEAGATE)\s+(?P<product>ST[0-9]+SS)\s+(?P<sn>[a-zA-Z0-9]+)$'),
re.compile(r'^(?P<sn>[a-zA-Z0-9]{18})\s+(?P<vendor>INTEL)\s+(?P<product>[a-zA-Z0-9]+)\s+.*$'),
re.compile(r'^(?P<vendor>IBM)-(?P<product>[a-zA-Z0-9]+)\s+(?P<sn>[a-zA-Z0-9]+)$'),
re.compile(r'^(?P<vendor>HP)\s+(?P<product>[a-zA-Z0-9]{11})\s+(?P<sn>[a-zA-Z0-9]{12})$'),
re.compile(r'^(?P<vendor>HITACHI)\s+(?P<product>[a-zA-Z0-9]{15})(?P<sn>[a-zA-Z0-9]{15})$'),
re.compile(r'^(?P<vendor>HITACHI)\s+(?P<product>[a-zA-Z0-9]{15})\s+(?P<sn>[a-zA-Z0-9]{12})$'),
re.compile(r'^(?P<sn>[a-zA-Z0-9]{15})\s+(?P<vendor>Samsung)\s+(?P<product>[a-zA-Z0-9\s]+)\s+.*$'),
)
SEPARATE_VERSION = re.compile('[~|+|\-]')
def handle_facts(facts, is_virtual=False):
results = {}
if is_virtual:
results['model_name'] = " ".join(
(facts['manufacturer'], facts['virtual']),
)
results['type'] = DeviceType.virtual_server.raw
mac_address = handle_default_mac_address(facts)
if mac_address:
results['mac_addresses'] = [mac_address]
sn = "".join(
(
"VIRT0_",
mac_address,
'_',
hashlib.md5(
facts.get(
'sshdsakey',
facts.get('sshrsakey', '#'),
),
).hexdigest()[:8],
),
)
else:
sn = facts.get('serialnumber')
if sn in SERIAL_BLACKLIST:
sn = None
prod_name = facts.get('productname')
manufacturer = facts.get('manufacturer')
if prod_name:
if manufacturer and manufacturer in prod_name:
model_name = prod_name
else:
model_name = "{} {}".format(manufacturer, prod_name)
results['model_name'] = model_name
if DeviceType.blade_server.matches(model_name):
model_type = DeviceType.blade_server
else:
model_type = DeviceType.rack_server
results['type'] = model_type.raw
if sn:
results['serial_number'] = sn
mac_addresses = handle_facts_mac_addresses(facts)
if mac_addresses:
if 'mac_addresses' in results:
results['mac_addresses'].extend(mac_addresses)
else:
results['mac_addresses'] = mac_addresses
if 'smbios' in facts:
processors, memory = handle_facts_smbios(facts['smbios'], is_virtual)
if processors:
results['processors'] = processors
if memory:
results['memory'] = memory
disks = handle_facts_disks(facts)
if disks:
results['disks'] = disks
return results
def handle_default_mac_address(facts):
for suffix in ('', '_eth0', '_igb0', '_bnx0', '_bge0', '_nfo0', '_nge0'):
mac = facts.get('macaddress{}'.format(suffix))
if mac:
try:
result = MACAddressField.normalize(mac)
except ValueError:
continue
if result[:6] in MAC_PREFIX_BLACKLIST:
continue
return result
def handle_facts_mac_addresses(facts):
mac_addresses = set()
for interface in facts['interfaces'].split(','):
mac_address = facts.get('macaddress_{}'.format(interface))
if not mac_address:
continue
mac_addresses.add(mac_address)
return list(mac_addresses)
def handle_facts_ip_addresses(facts):
ip_addresses = set()
for interface in facts['interfaces'].split(','):
try:
ip = network.validate_ip(facts['ipaddress_{}'.format(interface)])
ip_addresses.add(ip)
except (ValueError, KeyError):
pass
return list(ip_addresses)
def handle_facts_smbios(fact_smbios, is_virtual):
raw_smbios = uncompress_base64_data(fact_smbios)
smbios = _parse_smbios(raw_smbios)
detected_memory = []
for memory_chip in smbios.get('MEMDEVICE', ()):
try:
size, size_units = memory_chip.get('Size', '').split(' ', 1)
size = int(size)
size /= units.size_divisor[size_units]
size = int(size)
except ValueError:
continue # empty slot
for split_key in ('BANK', 'Slot '):
try:
bank = memory_chip.get('Bank Locator').split(split_key)[1]
bank = int(bank) + 1
break
except (IndexError, ValueError):
bank = None # unknown bank
if bank is None:
continue
detected_memory.append({
'label': "{}{} {}".format(
'Virtual ' if is_virtual else '',
memory_chip.get(
'Device Locator',
memory_chip.get('Location Tag', 'DIMM'),
),
memory_chip.get('Part Number', ''),
),
'size': size,
})
detected_cpus = []
for cpu in smbios.get('PROCESSOR', ()):
m = DENSE_SPEED_REGEX.match(cpu.get('Maximum Speed', ''))
if not m:
continue
if 'enabled' not in cpu.get('Processor Status', ''):
continue
speed = int(m.group(1))
speed_units = m.group(2)
speed /= units.speed_divisor[speed_units]
speed = int(speed)
label = cpu['Location Tag']
family = cpu['Family']
if '(other)' in family:
family = cpu['Manufacturer']
index_parts = []
for cpu_part in cpu['Location Tag'].replace('CPU', '').split():
try:
index_parts.append(int(cpu_part.strip()))
except ValueError:
continue
index = reduce(lambda x, y: x * y, index_parts)
model_name = " ".join(cpu.get('Version', family).split())
detected_cpus.append({
'model_name': model_name,
'speed': speed,
'cores': max(1, cores_from_model(model_name)),
'family': family,
'label': label,
'index': index,
})
return detected_cpus, detected_memory
def _parse_smbios(raw_smbios):
if not raw_smbios.startswith(SMBIOS_BANNER):
raise ValueError("Incompatible SMBIOS answer.")
smb = {}
current = None
for line in raw_smbios.split('\n'):
if line == SMBIOS_BANNER:
if current:
ctype = current['__TYPE__']
del current['__TYPE__']
smb.setdefault(ctype, []).append(current)
current = None
elif current is None:
for token in line.split():
if token.startswith('SMB_TYPE_'):
current = {'__TYPE__': token[9:]}
break
else:
if ':' in line:
key, value = line.split(':', 1)
current[key.strip()] = value.strip()
else:
current.setdefault('capabilities', []).append(line)
return smb
def handle_facts_disks(facts):
disks = {}
_cur_key = None
for k, v in facts.iteritems():
if not k.startswith('disk_'):
continue
k = k[5:]
if k.endswith('_product'):
_cur_key = 'product'
k = k[:-8]
elif k.endswith('_revision'):
_cur_key = 'revision'
k = k[:-9]
elif k.endswith('_size'):
_cur_key = 'size'
k = k[:-5]
elif k.endswith('_vendor'):
_cur_key = 'vendor'
k = k[:-7]
elif k.endswith('_serial'):
_cur_key = 'serial'
k = k[:-7]
else:
continue
disks.setdefault(k, {})[_cur_key] = v.strip()
detected_disks = []
for mount_point, disk in disks.iteritems():
try:
if 'size' not in disk or not int(disk['size']):
continue
except ValueError:
continue
if disk['vendor'].lower() in DISK_VENDOR_BLACKLIST:
continue
if disk['product'].lower() in DISK_PRODUCT_BLACKLIST:
continue
sn = disk.get('serial', '').strip()
detected_disk = {
'size': int(int(disk['size']) / 1024 / 1024),
'label': '{} {} {}'.format(
disk['vendor'].strip(), disk['product'].strip(),
disk['revision'].strip(),
),
'mount_point': mount_point,
'family': disk['vendor'].strip(),
}
if sn:
detected_disk['serial_number'] = sn
detected_disks.append(detected_disk)
return detected_disks
def handle_facts_wwn(facts):
disk_shares = []
for key, wwn in facts.iteritems():
if not key.startswith('wwn_mpath'):
continue
path = key.replace('wwn_', '')
disk_shares.append({
'serial_number': normalize_wwn(wwn),
'volume': '/dev/mapper/%s' % path,
})
return disk_shares
def handle_facts_3ware_disks(facts):
disks = {}
for k, value in facts.iteritems():
m = _3WARE_GENERAL_REGEX.match(k)
if not m:
continue
key = m.group(2)
physical_disk = m.group(1)
disks.setdefault(physical_disk, {})[key] = value.strip()
detected_disks = []
for disk_handle, disk in disks.iteritems():
if not disk.get('serial'):
continue
size_value, size_unit, _ = disk['capacity'].split(None, 2)
size = int(float(size_value) / units.size_divisor[size_unit])
detected_disk = {
'serial_number': disk['serial'],
'size': size,
'label': disk['model'],
}
detected_disks.append(detected_disk)
return detected_disks
def handle_facts_smartctl(facts):
disks = {}
for k, value in facts.iteritems():
m = SMARTCTL_REGEX.match(k)
if not m:
continue
disk = m.group(1)
property = m.group(2)
disks.setdefault(disk, {})[property] = value.strip()
detected_disks = []
for disk_handle, disk in disks.iteritems():
if not disk.get('serial_number') or disk.get('device_type') != 'disk':
continue
if {
'user_capacity', 'vendor', 'product', 'transport_protocol',
} - set(disk.keys()):
# not all required keys present
continue
if disk['vendor'].lower() in DISK_VENDOR_BLACKLIST:
continue
if disk['product'].lower() in DISK_PRODUCT_BLACKLIST:
continue
size_value, size_unit, rest = disk['user_capacity'].split(' ', 2)
size_value = size_value.replace(',', '')
label_meta = [' '.join(disk['vendor'].split()), disk['product']]
if 'transport_protocol' in disk:
label_meta.append(disk['transport_protocol'])
detected_disks.append({
'serial_number': disk['serial_number'],
'size': int(int(size_value) / units.size_divisor[size_unit]),
'label': ' '.join(label_meta),
})
return detected_disks
def handle_facts_hpacu(facts):
disks = {}
for k, value in facts.iteritems():
m = HPACU_GENERAL_REGEX.match(k)
if not m:
continue
n = HPACU_LOGICAL_PHYSICAL_REGEX.match(m.group(2))
physical_disk = n.group(1) if n else None
property = n.group(2) if n else m.group(2)
if not physical_disk:
continue
disks.setdefault(physical_disk, {})[property] = value.strip()
detected_disks = []
for disk_handle, disk in disks.iteritems():
if not disk.get('serial_number'):
continue
size_value, size_unit = disk['size'].split()
detected_disks.append({
'serial_number': disk['serial_number'],
'label': '{} {}'.format(
' '.join(disk['model'].split()),
disk['interface_type'],
),
'size': int(float(size_value) / units.size_divisor[size_unit]),
})
return detected_disks
def _handle_inquiry_data(raw, controller, disk):
for regex in INQUIRY_REGEXES:
m = regex.match(raw)
if m:
return m.group('vendor'), m.group('product'), m.group('sn')
raise ValueError(
"Incompatible inquiry_data for disk {}/{}: {}".format(
controller, disk, raw,
)
)
def handle_facts_megaraid(facts):
disks = {}
for k, value in facts.iteritems():
m = MEGARAID_REGEX.match(k)
if not m:
continue
controller = m.group(1)
disk = m.group(2)
property = m.group(3)
disks.setdefault((controller, disk), {})[property] = value.strip()
detected_disks = []
for (controller_handle, disk_handle), disk in disks.iteritems():
inquiry_data = disk.get('inquiry_data', '')
if inquiry_data:
disk['vendor'], disk['product'], disk['serial_number'] = \
_handle_inquiry_data(
inquiry_data,
controller_handle,
disk_handle,
)
if not disk.get('serial_number') or disk.get('media_type') not in (
'Hard Disk Device', 'Solid State Device',
):
continue
if {
'coerced_size', 'vendor', 'product', 'pd_type',
} - set(disk.keys()):
# not all required keys present
continue
if disk['vendor'].lower() in DISK_VENDOR_BLACKLIST:
continue
if disk['product'].lower() in DISK_PRODUCT_BLACKLIST:
continue
size_value, size_unit, rest = disk['coerced_size'].split(' ', 2)
size_value = size_value.replace(',', '')
label_meta = [' '.join(disk['vendor'].split()), disk['product']]
if 'pd_type' in disk:
label_meta.append(disk['pd_type'])
detected_disks.append({
'serial_number': disk['serial_number'],
'label': ' '.join(label_meta),
'size': int(float(size_value) / units.size_divisor[size_unit]),
})
return detected_disks
def _get_storage_size_from_facts(facts):
disk_size = 0
smartctl_size = 0
reg = re.compile(r'^[0-9][0-9,]*')
for k, v in facts.iteritems():
if k.startswith('smartctl_') and k.endswith('_user_capacity'):
match = reg.match(v.strip())
if match:
try:
size = int(match.group(0).replace(',', ''))
except ValueError:
pass
else:
size = int(size / 1024 / 1024)
smartctl_size += size
if k.startswith('disk_') and k.endswith('_size'):
try:
size = int(int(v.strip()) / 1024 / 1024)
except ValueError:
pass
else:
disk_size += size
return smartctl_size if smartctl_size else disk_size
def handle_facts_os(facts, is_virtual=False):
result = {}
try:
os_name = "%s %s" % (
facts['operatingsystem'],
facts['operatingsystemrelease'],
)
family = facts['kernel']
except KeyError:
return result
os_version = facts.get('kernelrelease', '')
result['system_label'] = '%s%s' % (
os_name,
' %s' % os_version if os_version else '',
)
result['system_family'] = family
memory_size = None
try:
memory_size, unit = re.split('\s+', facts['memorysize'].lower())
if unit == 'tb':
memory_size = int(float(memory_size) * 1024 * 1024)
elif unit == 'gb':
memory_size = int(float(memory_size) * 1024)
elif unit == 'mb' or unit == 'mib':
memory_size = int(float(memory_size))
except (KeyError, ValueError):
pass
if memory_size:
result['system_memory'] = memory_size
cores_key = ('physical' if not is_virtual else '') + 'processorcount'
try:
cores_count = int(facts.get(cores_key))
except TypeError:
pass
else:
result['system_cores_count'] = cores_count
storage_size = _get_storage_size_from_facts(facts)
if not storage_size:
lshw = facts.get('lshw', None)
if lshw:
try:
lshw = uncompress_base64_data(lshw)
except TypeError:
pass
else:
try:
lshw = parse_lshw(lshw)
except LshwError:
pass
else:
storages = handle_lshw_storage(lshw)
storage_size = 0
for storage in storages:
storage_size += storage['size']
if storage_size:
result['system_storage'] = storage_size
return result
def _parse_packages(facts):
data = uncompress_base64_data(facts)
if data:
packages = data.strip().split(',')
for package in packages:
try:
name, version = package.split(None, 1)
except ValueError:
continue
yield {
'name': name,
'version': version,
}
def handle_facts_packages(facts):
packages = []
packages_list = _parse_packages(facts.get('packages'))
if not packages_list:
return packages
for package in packages_list:
version = filter(
None,
SEPARATE_VERSION.split(package['version'], 1)
)[0]
package_name = '{} - {}'.format(package['name'], version)
packages.append({
'label': package['name'],
'version': version,
'path': package_name,
'model_name': package_name,
})
return packages
|
[
"andrew.jankowski@gmail.com"
] |
andrew.jankowski@gmail.com
|
1b14e0893000f94e90a7478eb66d700400cb0141
|
7882860350c714e6c08368288dab721288b8d9db
|
/1일차/if(8번문제).py
|
9db67865be7d0871db81bafb600eeaa1d088a3f2
|
[] |
no_license
|
park-seonju/Algorithm
|
682fca984813a54b92a3f2ab174e4f05a95921a8
|
30e5bcb756e9388693624e8880e57bc92bfda969
|
refs/heads/master
| 2023-08-11T18:23:49.644259 | 2021-09-27T10:07:49 | 2021-09-27T10:07:49 | 388,741,922 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 192 |
py
|
result=[]
for i in range(100,301):
a=int(i/100) # int로 해야함
b=int(i/10)
if(a % 2 == 0 and b % 2 == 0 and i % 2 == 0):
result.append(str(i))
print(",".join(result))
|
[
"cucu9823@naver.com"
] |
cucu9823@naver.com
|
e4af460962dfe88532b86a2b00a0a1b9f6e8c649
|
db5f301e1d2ee17ffd8ba009606eb82f4729b112
|
/bot.py
|
be86223c6e466f349a3433ad194a33cbfa85d87a
|
[] |
no_license
|
RomanSarnov/Bot
|
5c96fc66e42ac44b37c870fbe5a6498fe04efcac
|
11a278af325dde75142590418a169e391a4457cf
|
refs/heads/master
| 2022-12-08T17:25:35.320742 | 2020-02-19T19:06:19 | 2020-02-19T19:06:19 | 240,790,736 | 0 | 0 | null | 2022-12-08T07:04:34 | 2020-02-15T21:32:24 |
Python
|
UTF-8
|
Python
| false | false | 10,304 |
py
|
import telebot
from telebot import types
import os
import psycopg2
from psycopg2.extras import DictCursor
DATABASE_URL = os.environ['DATABASE_URL']
con = psycopg2.connect(DATABASE_URL, sslmode='require', cursor_factory=DictCursor)
cur = con.cursor()
cur.execute("""CREATE TABLE address(
Id SERIAL PRIMARY KEY,
title VARCHAR(60),
latitude REAL,
longitude REAL,
img VARCHAR(150),
users INTEGER)"""
)
start,title,location,location_text,photo,stop,vivod= range(7)
from collections import defaultdict
user_state = defaultdict(lambda :start)
text_state = defaultdict(lambda :start)
data = {'users':{}}
# con = pymysql.connect('localhost', 'root',
# 'roman', 'bot',cursorclass=pymysql.cursors.DictCursor)
token = '1065495049:AAEcREziu5RwO94qtk46kb-kgeaKWA2LrJ4'
bot = telebot.TeleBot(token)
@bot.message_handler(commands=['start'],content_types=['text'],func = lambda message:get_state(message) == start)
def handler_message(message):
bot.send_message(message.chat.id,'Начнем! Выберите одну из комманд.')
@bot.message_handler(commands=['add'],content_types=['text'],func = lambda message:get_state(message) == start)
def handler_message(message):
data['users'][str(message.chat.id)] = {}
update_state(message, title)
bot.send_message(message.chat.id,'Укажите название места.')
bot.send_message(message.chat.id, 'Напишите в любой момент "Отмена", если хотите отменить добавление.')
@bot.message_handler(content_types=['text','photo','location'],func = lambda message:get_state(message) == title)
def handler_pribav(message):
if message.photo or message.location:
bot.send_message(message.chat.id, 'Я вас не понимаю. Укажите название места.')
else:
if 'отмена' in message.text.lower():
update_state(message, start)
bot.send_message(message.chat.id, 'Добавление отменено.')
else:
data['users'][str(message.chat.id)]['title'] = message.text
bot.send_message(message.chat.id,'Укажите адрес (Геопозиция).')
update_state(message, location)
@bot.message_handler(content_types=['location'],func = lambda message:get_state(message) == location)
def handler_location(message):
data['users'][str(message.chat.id)]['longitude'] = message.location.longitude
data['users'][str(message.chat.id)]['latitude'] = message.location.latitude
bot.send_message(message.chat.id,'Хотите прикрепить фотографию?')
update_state(message,location_text)
@bot.message_handler(content_types=['text','photo'],func = lambda message:get_state(message) == location)
def handler_location(message):
if message.photo:
bot.send_message(message.chat.id, 'Я вас не понимаю. Укажите адрес (Геопозиция).')
else:
if 'отмена' in message.text.lower():
update_state(message, start)
bot.send_message(message.chat.id, 'Добавление отменено.')
else:
bot.send_message(message.chat.id, 'Я вас не понимаю. Укажите адрес (Геопозиция).')
@bot.message_handler(content_types=['text','photo','location'],func = lambda message:get_state(message) == location_text)
def handler_consent(message):
if message.photo or message.location:
bot.send_message(message.chat.id, 'Я вас не понимаю. Хотите прикрепить фотографию?')
else:
if 'отмена' == message.text.lower():
update_state(message, start)
bot.send_message(message.chat.id, 'Добавление отменено.')
else:
if 'да' in message.text.lower():
bot.send_message(message.chat.id,'Прикрепите фотографию.')
update_state(message, photo)
elif 'нет' in message.text.lower() or 'не' in message.text.lower():
cur = con.cursor()
cur.execute("INSERT INTO address(title,latitude,longitude,users) VALUES ('%s','%s','%s','%s')"%(data['users'][str(message.chat.id)]['title'],data['users'][str(message.chat.id)]['latitude'],data['users'][str(message.chat.id)]['longitude'],int(message.chat.id)))
bot.send_message(message.chat.id, 'Место сохранено!')
update_state(message, start)
else:
bot.send_message(message.chat.id, 'Я вас не понимаю. Хотите прикрепить фотографию?')
@bot.message_handler(content_types=['photo'],func = lambda message:get_state(message) == photo)
def handler_consent(message):
data['users'][str(message.chat.id)]['img']=message.photo[0].file_id
bot.send_message(message.chat.id, 'Место сохранено!')
cur = con.cursor()
cur.execute("INSERT INTO address(title,latitude,longitude,img,users) VALUES ('%s','%s','%s','%s','%s')"%(data['users'][str(message.chat.id)]['title'],data['users'][str(message.chat.id)]['latitude'],data['users'][str(message.chat.id)]['longitude'],data['users'][str(message.chat.id)]['img'],int(message.chat.id)))
update_state(message, start)
@bot.message_handler(content_types=['text','location'],func = lambda message:get_state(message) == photo)
def handler_consent(message):
if message.location:
bot.send_message(message.chat.id,text='Я вас не понимаю. Прикрепите фотографию.')
else:
if 'отмена' in message.text.lower():
update_state(message, start)
bot.send_message(message.chat.id, 'Добавление отменено.')
else:
bot.send_message(message.chat.id, 'Я вас не понимаю. Прикрепите фотографию.')
@bot.message_handler(commands=['list'],content_types=['text'],func = lambda message:get_state(message) == start)
def handler_list(message):
cur = con.cursor()
cur.execute('SELECT title, latitude, longitude, img from address where users = %s LIMIT 10'%(message.chat.id))
re = cur.fetchall()
if re:
for index,elem in enumerate(re):
name = elem['title']
lat = elem['latitude']
lon = elem['longitude']
img = elem['img']
if img:
bot.send_message(message.chat.id,text=f'{str(index+1)}. {name}')
bot.send_location(message.chat.id,latitude=lat,longitude=lon)
bot.send_photo(message.chat.id,photo=img)
else:
bot.send_message(message.chat.id, text=f'{str(index + 1)}. {name}')
bot.send_location(message.chat.id, latitude=lat, longitude=lon)
else:
bot.send_message(message.chat.id, text="У вас нет сохранённых мест.")
@bot.message_handler(commands=['reset'],content_types=['text'],func = lambda message:get_state(message) == start)
def handler_list(message):
cur = con.cursor()
cur.execute('SELECT title, latitude, longitude, img from address where users = %s'%(message.chat.id))
re = cur.fetchall()
if re:
bot.send_message(message.chat.id, text='Удалить сохранённые места?')
update_text(message,stop)
else:
bot.send_message(message.chat.id, text="У вас нет сохранённых мест.")
@bot.message_handler(content_types=['text'],func = lambda message:get_text_state(message) == stop)
def handler_reset_sogl(message):
if 'да' in message.text.lower():
bot.send_message(message.chat.id, text='Места удалены.')
update_text(message, start)
cur = con.cursor()
cur.execute('DELETE FROM address WHERE users = %s;' % (message.chat.id))
elif 'нет' in message.text.lower():
bot.send_message(message.chat.id, text='Удаление отменено.')
update_text(message, start)
else:
bot.send_message(message.chat.id, text='Я вас не понимаю. Удалить сохранённые места?')
@bot.message_handler(commands=['search'],content_types=['text'],func = lambda message:get_state(message) == start)
def handler_message(message):
bot.send_message(message.chat.id, 'Введите название места.')
update_text(message,vivod)
@bot.message_handler(content_types=['text'],func = lambda message:get_text_state(message) == vivod)
def handler_message(message):
cur = con.cursor()
cur.execute(
f"select * FROM address WHERE title = '{message.text}' and users = {message.chat.id};")
re = cur.fetchall()
if re:
for index, elem in enumerate(re):
name = elem['title']
lat = elem['latitude']
lon = elem['longitude']
img = elem['img']
update_text(message, start)
if img:
bot.send_message(message.chat.id, text=f'{str(index + 1)}. {name}')
bot.send_location(message.chat.id, latitude=lat, longitude=lon)
bot.send_photo(message.chat.id, photo=img)
else:
bot.send_message(message.chat.id, text=f'{str(index + 1)}. {name}')
bot.send_location(message.chat.id, latitude=lat, longitude=lon)
else:
update_text(message, start)
bot.send_message(message.chat.id, text="У вас нет сохранённых мест с данным названием.")
@bot.message_handler(content_types=['text','photo','location'],func = lambda message:get_state(message) == start)
def handler_message(message):
bot.send_message(message.chat.id,'Я вас не понимаю. Выберите одну из комманд.')
def get_text_state(message):
return text_state[message.chat.id]
def get_state(message):
return user_state[message.chat.id]
def update_text(message,state):
text_state[message.chat.id] = state
def update_state(message,state):
user_state[message.chat.id] = state
bot.polling(none_stop=True)
|
[
"rsarnov@mail.ru"
] |
rsarnov@mail.ru
|
358d9bc1ccaf0bf357a55881ad1d1e04df66e992
|
721f95317a4ca3837ee509de60b346d37fbd5b9d
|
/towhee/tests/engine/test_engine.py
|
bee06ea75ace7691684abb1bf3d8675db163bbc9
|
[
"Apache-2.0"
] |
permissive
|
yanliang567/towhee
|
5dbb3c544823f5aafc7b56b4a2c03e5364bfd346
|
b3d39dd11793244e655112b10bc15950c215d64a
|
refs/heads/main
| 2023-09-01T02:35:13.953830 | 2021-10-13T12:34:27 | 2021-10-13T12:34:27 | 411,902,421 | 0 | 0 |
Apache-2.0
| 2021-09-30T02:55:21 | 2021-09-30T02:55:21 | null |
UTF-8
|
Python
| false | false | 4,358 |
py
|
# Copyright 2021 Zilliz. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from towhee.dag.graph_repr import GraphRepr
from towhee.dag.variable_repr import VariableRepr
from towhee.dag.dataframe_repr import DataFrameRepr
import unittest
from towhee.engine.engine import Engine, EngineConfig
from towhee.tests import CACHE_PATH
from towhee.engine.pipeline import Pipeline
from towhee.dataframe import DataFrame, Variable
from towhee.tests.test_util import SIMPLE_PIPELINE_YAML
from towhee.dag import OperatorRepr
class TestEngine(unittest.TestCase):
"""
combine tests of engine/scheduler/task-executor/task
"""
def setUp(self):
conf = EngineConfig()
conf.cache_path = CACHE_PATH
conf.sched_interval_ms = 20
engine = Engine()
if not engine.is_alive():
engine.start()
def test_engine(self):
start_op_repr = OperatorRepr(
name='_start_op',
function='internal',
init_args={},
inputs=[
{'name': 'num', 'df': 'op_test_in', 'col': 0}
],
outputs=[{'df': 'op_test_in'}],
iter_info={'type': 'map'}
)
add_op_repr = OperatorRepr(
name='mock_operators/add_operator',
function='mock_operators/add_operator',
init_args={'factor': 2},
inputs=[
{'name': 'num', 'df': 'op_test_in', 'col': 0}
],
outputs=[{'df': 'op_test_out'}],
iter_info={'type': 'map'}
)
end_op_repr = OperatorRepr(
name='_end_op',
function='internal',
init_args={},
inputs=[
{'name': 'sum', 'df': 'op_test_out', 'col': 0}
],
outputs=[{'df': 'op_test_out'}],
iter_info={'type': 'map'}
)
df_in_repr = DataFrameRepr(
name='op_test_in',
columns=[VariableRepr('num', 'int')]
)
df_out_repr = DataFrameRepr(
name='op_test_out',
columns=[VariableRepr('sum', 'int')]
)
op_reprs = {
start_op_repr.name: start_op_repr,
add_op_repr.name: add_op_repr,
end_op_repr.name: end_op_repr
}
df_reprs = {
df_in_repr.name: df_in_repr,
df_out_repr.name: df_out_repr
}
graph_repr = GraphRepr('add', op_reprs, df_reprs)
df_in = DataFrame(
'op_test_in', {'sum': {'index': 0, 'type': 'int'}})
df_in.put((Variable('int', 1), ))
df_in.seal()
self._pipeline = Pipeline(graph_repr)
engine = Engine()
engine.add_pipeline(self._pipeline)
result = self._pipeline(df_in)
_, ret = result.get(0, 1)
self.assertEqual(ret[0][0].value, 3)
df_in = DataFrame(
'op_test_in', {'sum': {'index': 0, 'type': 'int'}})
df_in.put((Variable('int', 3), ))
df_in.seal()
result = self._pipeline(df_in)
_, ret = result.get(0, 1)
self.assertEqual(ret[0][0].value, 5)
def test_simple_pipeline(self):
with open(SIMPLE_PIPELINE_YAML, 'r', encoding='utf-8') as f:
p = Pipeline(f.read())
engine = Engine()
engine.add_pipeline(p)
df_in = DataFrame(
'inputs', {'sum': {'index': 0, 'type': 'int'}})
df_in.put((Variable('int', 3), ))
df_in.seal()
result = p(df_in)
_, ret = result.get(0, 1)
self.assertEqual(ret[0][0].value, 6)
df_in = DataFrame(
'inputs', {'sum': {'index': 0, 'type': 'int'}})
df_in.put((Variable('int', 7), ))
df_in.seal()
result = p(df_in)
_, ret = result.get(0, 1)
self.assertEqual(ret[0][0].value, 10)
|
[
"noreply@github.com"
] |
yanliang567.noreply@github.com
|
fcdf112105ed9d68ed566939f6c55ea04625c5e8
|
7f8188f35d0d4f80b6e37b5d2108010348996f16
|
/news/api/models.py
|
8df397659a510f852dc99b377bdc3e3669d82e5a
|
[] |
no_license
|
Denarzan/DevelopsToday-test
|
83c8f76907344a93c21ebddce2898a0ae5f8839d
|
2a9f1c9c1af05c82e11d10cd30b510d6ac7569d4
|
refs/heads/main
| 2023-03-09T05:07:55.139352 | 2021-03-02T22:17:10 | 2021-03-02T22:17:10 | 343,865,689 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 909 |
py
|
from django.db import models
class Post(models.Model):
"""Model for creating posts."""
title = models.CharField(max_length=255)
link = models.CharField(max_length=255, unique=True)
creation_date = models.DateTimeField(editable=False, auto_now=True)
amount_of_upvotes = models.IntegerField(editable=False, default=0)
author_name = models.CharField(max_length=128)
def add_vote(self):
"""Function to increase the current number of votes by 1."""
self.amount_of_upvotes += 1
self.save(force_update=True)
class Meta:
ordering = ["creation_date"]
class Comment(models.Model):
"""Model for creating comments of posts."""
post = models.ForeignKey(Post, on_delete=models.CASCADE)
author_name = models.CharField(max_length=128)
content = models.TextField()
creation_date = models.DateTimeField(editable=False, auto_now=True)
|
[
"dakhnonazar@gmail.com"
] |
dakhnonazar@gmail.com
|
76283425866e43198277a6f4f43dcc74ae590214
|
e1009433697344f0ce6ec953f086be698fa4e6c4
|
/parsmodel.py
|
10d1dbeb9a2e033a4397f7c7bf345fec03e56af2
|
[] |
no_license
|
bladas/online-store
|
7e848bad1137cf7886cec6bf7563867e5f8f5e36
|
6fd68e0d1318b796b05a94fa5547d5e87a2b0172
|
refs/heads/master
| 2023-05-02T07:11:55.614313 | 2020-01-06T14:20:19 | 2020-01-06T14:20:19 | 216,340,778 | 0 | 0 | null | 2023-04-21T20:38:49 | 2019-10-20T10:00:46 |
Python
|
UTF-8
|
Python
| false | false | 2,339 |
py
|
import json
from home.models import Category, UnderCategory, Product
def create(json, Category, UnderCategory, Product):
with open('citrus.json', 'r') as json_file:
data = json.load(json_file)
for elem in data:
print(elem.get('name'))
print(elem.get('category'))
print(elem.get('undercategory'))
print(elem.get('price'))
# new_category = Category.objects.create(title=elem.get('category'))
# new_uc = UnderCategory.objects.create(title=elem.get('undercategory'), category=new_category)
# new_product = Product.objects.create(name=elem.get('name'), ucategory=new_uc)
# new_category.save()
# new_uc.save()
# new_product = Product.objects.create(name=elem.get('name'), ucategory=new_uc)
try:
category = Category.objects.get(title=elem.get('category'))
try:
ucategory = UnderCategory.objects.get(title=elem.get('undercategory'), category=category)
new_product = Product.objects.create(name=elem.get('name'), ucategory=ucategory,
price=elem.get('price'))
new_product.save()
except:
new_uc = UnderCategory.objects.create(title=elem.get('undercategory'), category=new_category)
new_uc.save()
new_product = Product.objects.create(name=elem.get('name'), ucategory=new_uc,
price=elem.get('price'))
new_product.save()
except:
new_category = Category.objects.create(title=elem.get('category'))
new_category.save()
try:
print(UnderCategory.objects.get(title=elem.get('undercategory'), category=new_category))
except:
new_uc = UnderCategory.objects.create(title=elem.get('undercategory'), category=new_category)
new_uc.save()
new_product = Product.objects.create(name=elem.get('name'), ucategory=new_uc,price=elem.get('price'))
new_product.save()
# print(create())
create(json, Category, UnderCategory, Product)
|
[
"dashkevich_v@ukr.net"
] |
dashkevich_v@ukr.net
|
56d37d047190975695cb0168c225c11656be6066
|
d94b6845aeeb412aac6850b70e22628bc84d1d6d
|
/routing_transformer/routing_tf_api.py
|
ddc35172d2adda48e5cb8cb0ef32aaa4146d4629
|
[
"CC-BY-4.0",
"Apache-2.0"
] |
permissive
|
ishine/google-research
|
541aea114a68ced68736340e037fc0f8257d1ea2
|
c1ae273841592fce4c993bf35cdd0a6424e73da4
|
refs/heads/master
| 2023-06-08T23:02:25.502203 | 2023-05-31T01:00:56 | 2023-05-31T01:06:45 | 242,478,569 | 0 | 0 |
Apache-2.0
| 2020-06-23T01:55:11 | 2020-02-23T07:59:42 |
Jupyter Notebook
|
UTF-8
|
Python
| false | false | 7,727 |
py
|
# coding=utf-8
# Copyright 2023 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pdb
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
tf.get_logger().setLevel('ERROR')
from tensor2tensor import models
from tensor2tensor import problems
from tensor2tensor.utils import trainer_lib
from tensor2tensor.utils import hparams_lib
from tensor2tensor.utils import registry
from tensor2tensor.utils import metrics
from tensor2tensor.data_generators import text_encoder
from tensor2tensor.data_generators import problem
from routing_transformer.problems import pg19
from tensorflow.compat.v1 import estimator as tf_estimator
from tqdm import tqdm
from routing_transformer.sparse_transformer import SparseTransformer
import numpy as np
import random
from scipy.special import log_softmax
VOCAB_PATH = "/mnt/nfs/work1/miyyer/simengsun/in-book-retrieval/RT-data/vocab.pg19_length8k.32768.subwords"
HPARAMS_PATH = "/mnt/nfs/work1/miyyer/simengsun/in-book-retrieval/RT-models/rt-checkpoint/hparams.json"
CKPT_PATH = "/mnt/nfs/work1/miyyer/simengsun/in-book-retrieval/RT-models/rt-checkpoint/ckpt-3530000"
MAX_SEQUENCE_LENGTH = 8192
class SparseTransformerWrapper(object):
def __init__(self, max_seq_length=None):
# Load hyperparameters
self.max_seq_length = max_seq_length or MAX_SEQUENCE_LENGTH
# Needed since RT uses blocks of size 256
assert self.max_seq_length % 256 == 0
hparams = hparams_lib.create_hparams_from_json(HPARAMS_PATH)
hparams.use_tpu = False
hparams = zero_dropout(hparams)
# Build TF1 graph of model
sptf_model = SparseTransformer(hparams, tf_estimator.ModeKeys.EVAL)
self.input_nodes = {
"targets": tf.placeholder(tf.int32, [None, self.max_seq_length])
}
self.output_nodes = sptf_model.body(self.input_nodes)
# Map the checkpoint variables to the graph
init_from_checkpoint(CKPT_PATH, variable_prefix="sparse_transformer/body")
# create a session object, and actually initialize the graph
self.sess = tf.Session()
self.sess.run(tf.global_variables_initializer())
self.encoder = text_encoder.SubwordTextEncoder(VOCAB_PATH)
def forward(self, sentences, encode_sentences=True, relevant_subsequences=None):
encoded_sents = []
encoded_seqs_no_pad = []
if encode_sentences:
for sent in sentences:
encoded = []
for line in sent.split("\n"):
new_tokens = self.encoder.encode(line.strip())
if len(encoded) + len(new_tokens) >= self.max_seq_length:
break
encoded.extend(new_tokens)
encoded.append(text_encoder.EOS_ID)
encoded_seqs_no_pad.append(encoded)
# pad shorter sequences to the full length
encoded = encoded + [text_encoder.PAD_ID for _ in range(self.max_seq_length - len(encoded))]
assert len(encoded) == self.max_seq_length
encoded_sents.append(encoded)
else:
# assume sentences are encoded, pad/truncate them
for sent in sentences:
sent = sent[:self.max_seq_length]
encoded_seqs_no_pad.append(sent)
sent = sent + [text_encoder.PAD_ID for _ in range(self.max_seq_length - len(sent))]
encoded_sents.append(sent)
feed_dict = {
self.input_nodes["targets"]: np.array(encoded_sents)
}
outputs = self.sess.run(self.output_nodes, feed_dict=feed_dict)
return_outputs = {
"logits": np.squeeze(outputs[0], axis=(2, 3)),
"loss": outputs[1]["training"],
"encoded_seqs_no_pad": encoded_seqs_no_pad
}
if relevant_subsequences is not None:
for i, rss in enumerate(relevant_subsequences):
encoded_subseq = self.encoder.encode(rss)
positions = find_sub_list(encoded_subseq, encoded_sents[i])
misaligned_prefix_length = 0
while positions is None:
misaligned_prefix_length += 1
encoded_subseq = encoded_subseq[1:]
positions = find_sub_list(encoded_subseq, encoded_sents[i])
start, end = positions[-1]
relevant_logits = return_outputs["logits"][i][start:end]
log_probs = log_softmax(relevant_logits, axis=1)
gold_log_probs = [lp[index] for index, lp in zip(encoded_subseq, log_probs)]
return_outputs["subseq_log_loss"] = -1 * np.mean(gold_log_probs)
return_outputs["misaligned_prefix_length"] = misaligned_prefix_length
return return_outputs
def close(self):
self.sess.close()
def find_sub_list(sl, l):
"""Find sub-string, so as to be able to compute ppl of a sub-string."""
sll=len(sl)
matches = []
for ind in (i for i,e in enumerate(l) if e == sl[0]):
if l[ind:ind + sll] == sl:
matches.append(
(ind, ind + sll)
)
if matches:
return matches
def zero_dropout(hparams):
hparams.input_dropout = 0.0
hparams.dropout = 0.0
hparams.relu_dropout = 0.0
hparams.attention_dropout = 0.0
hparams.layer_prepostprocess_dropout = 0.0
return hparams
def log_variables(name, var_names):
tf.logging.info("%s (%d total): %s", name, len(var_names),
random.sample(var_names, min(len(var_names), 5)))
def init_from_checkpoint(checkpoint_path,
checkpoint_prefix=None,
variable_prefix=None,
target_variables=None):
"""Initializes all of the variables using `init_checkpoint."""
tf.logging.info("Loading variables from %s", checkpoint_path)
checkpoint_variables = {
name: name for name, _ in tf.train.list_variables(checkpoint_path) if "Adafactor" not in name
}
if target_variables is None:
target_variables = tf.trainable_variables()
target_variables = {var.name.split(":")[0]: var for var in target_variables}
if checkpoint_prefix is not None:
checkpoint_variables = {
checkpoint_prefix + "/" + name: varname
for name, varname in checkpoint_variables.items()
}
if variable_prefix is not None:
target_variables = {
variable_prefix + "/" + name: var
for name, var in target_variables.items()
}
checkpoint_var_names = set(checkpoint_variables.keys())
target_var_names = set(target_variables.keys())
intersected_var_names = target_var_names & checkpoint_var_names
assignment_map = {
checkpoint_variables[name]: target_variables[name]
for name in intersected_var_names
}
tf.train.init_from_checkpoint(checkpoint_path, assignment_map)
log_variables("Loaded variables", intersected_var_names)
log_variables("Uninitialized variables", target_var_names - checkpoint_var_names)
log_variables("Unused variables", checkpoint_var_names - target_var_names)
|
[
"copybara-worker@google.com"
] |
copybara-worker@google.com
|
62ad7ab2952d9267e0c46642ae083f48150e7885
|
56ff291cbddacda6cac8e1041df4159a9f715813
|
/base/admin.py
|
1fb73fe70a5975410c11a5fd0d7edaafee4076fd
|
[] |
no_license
|
roopak1997/YT_API
|
4bb761841dde930ed63df58016cfa28946c9ed78
|
0bf226b27b79978340908e897e9596f867aef1dc
|
refs/heads/master
| 2023-05-06T11:33:55.613972 | 2021-05-29T14:45:17 | 2021-05-29T14:45:17 | 371,979,152 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 167 |
py
|
from django.contrib import admin
from base.models import Video, Thumbnail
from django.contrib import admin
admin.site.register(Video)
admin.site.register(Thumbnail)
|
[
"roopaksrinivasan@gmail.com"
] |
roopaksrinivasan@gmail.com
|
8365348c8c8d72df578af246b3fea656a5feed86
|
727f1bc2205c88577b419cf0036c029b8c6f7766
|
/out-bin/py/google/fhir/labels/bundle_to_label.runfiles/com_google_fhir/external/pypi__apache_beam_2_9_0/apache_beam/runners/direct/bundle_factory.py
|
032aadc4fe49359d4995e2916d7a25262bdded85
|
[
"Apache-2.0"
] |
permissive
|
rasalt/fhir
|
55cf78feed3596a3101b86f9e9bbf6652c6ed4ad
|
d49883cc4d4986e11ca66058d5a327691e6e048a
|
refs/heads/master
| 2020-04-13T00:16:54.050913 | 2019-01-15T14:22:15 | 2019-01-15T14:22:15 | 160,260,223 | 0 | 0 |
Apache-2.0
| 2018-12-03T22:07:01 | 2018-12-03T22:07:01 | null |
UTF-8
|
Python
| false | false | 154 |
py
|
/home/rkharwar/.cache/bazel/_bazel_rkharwar/c4bcd65252c8f8250f091ba96375f9a5/external/pypi__apache_beam_2_9_0/apache_beam/runners/direct/bundle_factory.py
|
[
"ruchika.kharwar@gmail.com"
] |
ruchika.kharwar@gmail.com
|
bfd1c49315015eb94a929c27688e920b79adaa3e
|
2c2a9948ad2885f85576e9fc852515d43ed7054c
|
/backend/music/migrations/0002_auto_20210714_0213.py
|
c40873543f9e5e67180f4cd14d69f7b4db644f4b
|
[] |
no_license
|
Park-JE/NoraeChuchun
|
fcee711465e80c5109bb0022a106622f978c6ff4
|
3d1ffcafa872454fb8af1457ec7cf8ad8fc5a135
|
refs/heads/main
| 2023-06-29T20:15:51.044111 | 2021-08-03T16:04:30 | 2021-08-03T16:04:30 | 376,422,286 | 3 | 4 | null | null | null | null |
UTF-8
|
Python
| false | false | 771 |
py
|
# Generated by Django 3.2.4 on 2021-07-14 02:13
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('music', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='userplaylist',
name='desc',
field=models.CharField(blank=True, max_length=64, null=True, verbose_name='설명'),
),
migrations.AlterField(
model_name='playlist',
name='listid',
field=models.IntegerField(verbose_name='플레이리스트id'),
),
migrations.AlterField(
model_name='playlist',
name='song',
field=models.IntegerField(verbose_name='노래번호'),
),
]
|
[
"jieun.fs@gmail.com"
] |
jieun.fs@gmail.com
|
8a82ec6cc56d3bbe1891aaccaffd1ec003e7f305
|
f9cb33c458429ba9b3c4b5956d65d2bac4d0d5e3
|
/python/lab13.5 pre final finish/Majority Element.py
|
5d0efcea9ca56978ab7699a652f1965cbc15caad
|
[] |
no_license
|
BIRDDYTTP/elabProgrammingConcept
|
4d49c8f9715cc59edf9dbc4d9a2760be96cd5470
|
64b1bd96ef11d090c089430e712f3063decac0dd
|
refs/heads/master
| 2020-03-27T00:04:05.858680 | 2018-12-05T18:42:53 | 2018-12-05T18:42:53 | 145,590,772 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 554 |
py
|
num = int(input())
lsA = []
for x in range(num):
st = str(input())
lsA.append(st)
checkls = []
for x in lsA:
if ord(x) in checkls:
pass
else:
checkls.append(ord(x))
countchc = []
char = []
for x in checkls:
count = lsA.count(chr(x))
countchc.append(count)
char.append(chr(x))
printls = []
count = 0
while count <= len(countchc)-1:
if countchc[count] >= (num//2)+1:
printls.append(char[count])
count += 1
if len(printls) == 0:
print(len(printls))
else:
for x in printls:
print(x)
|
[
"thitipong.ha@ku.th"
] |
thitipong.ha@ku.th
|
921e1b272e52e944ebf83d4db20d7640c81d2453
|
0fb68fce93c7fefdef491eb3da5a2a147b35dba8
|
/simulacoes/cenario4.py
|
8236c82da133380195678eecb3416748123c7f03
|
[] |
no_license
|
mandoju/trabalhoAD
|
187a7f2c23e17ad0c1c2660b21fdd6b34be20ddb
|
a720dc3f95e5aac4d19ba119fe6ae3e6a4bbef1f
|
refs/heads/master
| 2020-03-27T00:20:31.317138 | 2015-08-24T23:30:59 | 2015-08-24T23:30:59 | 39,862,671 | 0 | 1 | null | null | null | null |
ISO-8859-1
|
Python
| false | false | 3,202 |
py
|
# -*- coding: latin-1 -*-
__author__ = 'Rodrigo'
import matplotlib.pyplot as plt
import numpy as np
from scipy.interpolate import spline
def simula4():
#parametros
numero_ciclos= 1
tempo_simulacao = 1000000
u=[]#lista para plotar
valor=[]#lista para plotar
##TODO: melhores nomes para as variáveis
for w in range(1,20):
u_servidor = round(0.5*(w+1), 2)#lambda do problema
media=0
for q in range(numero_ciclos):
lambda_entrada = 0.01 ##lambda fixo
fila = 0
i = 0
j = 0
servidor = 0
tempo_proximo = 0
entrada = 0 ##tempo até a próxima entrada
tempo_proximo = 0 ##tempo até terminar a execução no servidor
##variáveis para calcular a média
tempo = 0
esperanca = 0
fim = 0
print(q)
while (1):
if(tempo == tempo_simulacao):
esperanca = esperanca / tempo
#print('a número médio de pessoas no sistema é: ' + str(esperanca))
media= media + esperanca/numero_ciclos
break
while(entrada == 0):
entrada = int(np.random.exponential(1/lambda_entrada)) ##Gera uma nova entrada exponencial
fila+= 1
i += 1
entrada -= 1
## while nescessário caso caia em um tempo = 0 novamente
resto = 0
while(tempo_proximo == 0 and fila > 0):
servidor = 1
fila -= 1
aleatorio = np.random.random_sample()
if(aleatorio > 0.1):
fila +=1
exponencial = np.random.exponential(1/u_servidor)
tempo_proximo = int(exponencial)
resto += exponencial - tempo_proximo
if(resto > 1):
tempo_proximo += 1
j += 1
if(fila == 0 and tempo_proximo == 0):
servidor = 0
else:
tempo_proximo -= 1
##coisas inuteis para parar quando já tudo processado
##nunca será executável pois para depois da ultima remessa da entrada
esperanca += fila + servidor ##soma o número de clientes no sistema
tempo += 1 ##adiciona mais um no tempo
##executando a função main
print("Media das medias: ", media)
u.append(u_servidor)
valor.append(media)
plt.figure(figsize=(8, 6), dpi=100)
x_smooth = np.linspace(min(u), max(u), 10000)
y_smooth = spline (u, valor, x_smooth)
plt.plot(x_smooth,y_smooth,'-b', label="Curva Amortizada")
plt.plot(u, valor, 'bo', label="Pontos Da Curva")
#plt.plot(lamb, valor2, '-ro', label="Curva Teste")
plt.axis([0, 1.1*(max(u)), 0, 1.2*(max(valor))])
plt.suptitle('Cenário-3', fontsize=20)
plt.xlabel('Lambda', fontsize=15)
plt.ylabel('Média', fontsize=15)
plt.legend(loc=1, prop={'size':10})
plt.show()
|
[
"jorgezin@gmail.com"
] |
jorgezin@gmail.com
|
dbb1f628f7a710240e42d0872d1f4720c17b1f93
|
31f090ee6707817164a2514993e92882e2220670
|
/app.py
|
3a68958515690552cacba7d24d13795bc27f094c
|
[] |
no_license
|
Nandan26/Internship
|
0397589c099822ded1c8c3ce6d460640020f82db
|
a416c0fb3995428554d40e8f4c71554eeeb42d47
|
refs/heads/main
| 2023-06-17T11:55:23.736400 | 2021-07-07T11:42:52 | 2021-07-07T11:42:52 | 383,768,347 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,398 |
py
|
from flask import Flask, request, render_template
import sklearn
import joblib
import pandas as pd
import numpy as np
import statsmodels.api as sm
from flask_cors import cross_origin,CORS
#for enable cross platform communication
app = Flask(__name__)
#for Next day prediction
scaler= joblib.load('scaler.pkl')
model1 = joblib.load('Model1.pkl')
#for next minute prediction
scaler2= joblib.load('scaler2.pkl')
model2=joblib.load(open('Model2.pkl','rb'))
#just another way of doing it
#CORS(app)
@app.route("/")
@cross_origin()
def home():
return render_template("index.html")
@app.route("/predict", methods = ["GET", "POST"])
@cross_origin()
def predict():
if request.method == "POST":
# Open Price
op = float(request.form["Open_price"])
# High Price
hp = float(request.form["High_price"])
# Low Price
lp = float(request.form["Low_price"])
adj = float(request.form["Adj_Close"])
vm = float(request.form["Volume"])
# Date
Date = request.form["Date_Time"]
Day = int(pd.to_datetime(Date,format="%Y-%m-%d").day)
Year = int(pd.to_datetime(Date,format="%Y-%m-%d").year)
Month = int(pd.to_datetime(Date,format="%Y-%m-%d").month)
DayofWeek = int(pd.to_datetime(Date,format="%Y-%m-%d").dayofweek)
# Previous day Open Price
pop = float(request.form["popen"])
# Previous day close Price
pcp = float(request.form["pclose"])
scaled=scaler.transform([[op,hp,lp,adj,vm,op-pcp,op-pop,Day,Year,Month,DayofWeek]])
prediction=model1.predict(scaled)
if prediction[0]==1:
output2= "Up"
else:
output2= "Down"
scaled=scaler2.transform([[op,hp,lp]])
dataset = pd.DataFrame({'const' : 1,'op': scaled[:, 0], 'hp': scaled[:, 1], 'lp': scaled[:, 2]})
pred=model2.predict(dataset)
if pred[0] > 0.5:
output= "Up"
else:
output= "Down"
return render_template('index.html',prediction_text="Your Stock will go {} next minute Your Stock will go {} next day".format(output,output2))
return render_template("index.html")
if __name__ == "__main__":
app.run(debug=True)
|
[
"noreply@github.com"
] |
Nandan26.noreply@github.com
|
7e00da9fc16b52f90cc97d91dacdb51b2ecbfb55
|
90c07b483649784e202c67d66e19fad82e7f68de
|
/test/fixture/vl53l0x-long-output.py
|
2669a93463cf15ef98065bdab3525ea745259f14
|
[] |
no_license
|
mlenkeit/balcony-server
|
67142a3b6113c43a27f93a8e7c392f5498969a3c
|
7e20d9139e92512ea5982a0815a8e4bbf7603eaa
|
refs/heads/master
| 2021-01-02T22:59:22.143355 | 2017-08-13T13:35:23 | 2017-08-13T13:35:23 | 99,431,810 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 563 |
py
|
print """Creating tof...
Start ranging...
VL53L0X Start Ranging Object 0 Address 0x29
VL53L0X_GetDeviceInfo:
Device Name : VL53L0X ES1 or later
Device Type : VL53L0X
Device ID : VL53L0CAV0DH/1$5
ProductRevisionMajor : 1
ProductRevisionMinor : 1
VL53L0X_LONG_RANGE_MODE
API Status: 0 : No Error
Timing 33 ms
743 mm, 74 cm, 1
755 mm, 75 cm, 2
711 mm, 71 cm, 3
758 mm, 75 cm, 4
729 mm, 72 cm, 5
690 mm, 69 cm, 6
735 mm, 73 cm, 7
742 mm, 74 cm, 8
634 mm, 63 cm, 9
722 mm, 72 cm, 10
Call of VL53L0X_StopMeasurement
Wait Stop to be competed
API Status: 0 : No Error"""
|
[
"maximilian.lenkeit@sap.com"
] |
maximilian.lenkeit@sap.com
|
f951e1cff4773f3d7bbafa8a8da8f51e39292a6b
|
6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4
|
/cyzbSvpfSzDjGi4TB_6.py
|
3dc41e9ce3d55a84ccef17f3ab0f837b05e5f6c6
|
[] |
no_license
|
daniel-reich/ubiquitous-fiesta
|
26e80f0082f8589e51d359ce7953117a3da7d38c
|
9af2700dbe59284f5697e612491499841a6c126f
|
refs/heads/master
| 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 74 |
py
|
def harmonic(n):
return round(sum([1/x for x in range(1, n+1)] ), 3 )
|
[
"daniel.reich@danielreichs-MacBook-Pro.local"
] |
daniel.reich@danielreichs-MacBook-Pro.local
|
cb05b353fe6c3caabc51970a0c30c56d9c656422
|
8ac6d0a2c9008b40809cd82acb5b2aa7ebb02c32
|
/mne_nirs/tests/test_examples.py
|
e22ac7b3291636b1749997d0df71984250b20367
|
[] |
no_license
|
seapsy/mne-nirs
|
da75e4d9d5f2392d056f4df9d11f9e636e4cb8de
|
a229f8e8f93d950f5b3791575f73b5c949651127
|
refs/heads/master
| 2023-01-29T01:15:48.542632 | 2020-12-09T06:26:19 | 2020-12-09T06:26:19 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,220 |
py
|
# Authors: Robert Luke <mail@robertluke.net>
#
# License: BSD (3-clause)
# This script runs each of the example scripts. It acts as a system test.
import os
import pytest
def examples_path():
if not os.path.isdir("BIDS-NIRS-Tapping"):
os.system("git clone --depth 1 "
"https://github.com/rob-luke/BIDS-NIRS-Tapping.git")
if os.path.isdir("examples"):
path = "examples/"
else:
path = "../examples/"
return path
def run_script_and_check(test_file_path):
import matplotlib as mpl
mpl.use("Agg") # Useful when testing locally as not to block progress
return exec(open(test_file_path).read(), locals(), locals())
@pytest.mark.parametrize('fname', (["plot_10_hrf_simulation.py",
"plot_11_hrf_measured.py",
"plot_12_group_glm.py",
"plot_19_snirf.py",
"plot_20_enhance.py",
"plot_30_frequency.py",
"plot_99_bad.py"]))
def test_examples(fname):
test_file_path = examples_path() + fname
run_script_and_check(test_file_path)
|
[
"noreply@github.com"
] |
seapsy.noreply@github.com
|
ec3322af4a258e119685102a357e04305f55bd12
|
faf7110bbfb970ae9a66d2e52a1be2fcd73a7a0c
|
/settings.py
|
95b1f2a77ecf4f07b305b0136e71164299a163d7
|
[] |
no_license
|
willsouza04/Radar
|
45f79a7d57250e77491fbfaa323cce5a059bc9eb
|
114b5383edf07355301c7103919ae536c4f038f1
|
refs/heads/master
| 2020-12-20T14:19:09.680784 | 2020-01-25T00:14:56 | 2020-01-25T00:14:56 | 236,105,080 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 644 |
py
|
class Settings():
def __init__(self, screenSize):
# Configurações da tela
self.screen_width = screenSize[0]
self.screen_height = screenSize[1]
self.bg_color = (100, 100, 100)
# Configurações do retangulo
self.rect_color = (150, 150, 150)
self.rect_width = 300
self.rect_height = 200
# Configurações da fonte
self.font_color = (20, 20, 20)
# Configurações da ovelha
self.sheep_speed = 0.5
# Configurações do menu
self.font_color_buttons = (200, 200, 200)
self.font_color_buttons_featured = (150, 150, 150)
|
[
"willsouza04@gmail.com"
] |
willsouza04@gmail.com
|
452c423ba680971cc37cb2c6c4d90ab016e7bf81
|
38958483cc2dbeb35497b8d091e32e42c9c90f07
|
/day17/app.py
|
853de8840b5a54f54ae1cd99f35ede495af05370
|
[] |
no_license
|
jonthornton/adventofcode2020
|
3fb7204dd224b81884ea2217dd7beb1c3eb30349
|
ba8f67cfebb04b4441ca79e7efe25f0287eb02b9
|
refs/heads/master
| 2023-02-17T18:32:36.820428 | 2021-01-19T03:47:28 | 2021-01-19T03:47:28 | 326,104,480 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,113 |
py
|
# coding: utf-8
"""
:copyright: (c) 2020 by Jon Thornton.
"""
def main():
input = []
with open('input.txt', 'r') as file:
input = [line.strip() for line in file]
s = Solution()
# print(s.part1(input))
print(s.part2(input))
class Solution:
def part1(self, input):
active_blocks = set()
x = 0
y = 0
z = 0
for y, line in enumerate(input):
for x, block in enumerate(line):
if block == '#':
active_blocks.add((x, y, z))
for i in range(6):
# print(active_blocks)
active_blocks = self.do_cycle(active_blocks)
return len(active_blocks)
def part2(self, input):
active_blocks = set()
x = 0
y = 0
z = 0
w = 0
for y, line in enumerate(input):
for x, block in enumerate(line):
if block == '#':
active_blocks.add((x, y, z, w))
for i in range(6):
# print(active_blocks)
active_blocks = self.do_cycle_4d(active_blocks)
return len(active_blocks)
def do_cycle_4d(self, active_blocks):
new_active = set()
for block in active_blocks:
if self.should_be_active_block_4d(block, active_blocks):
new_active.add(block)
neighbors = self.get_neighbors_4d(block)
for n in neighbors:
if self.should_be_active_block_4d(n, active_blocks):
new_active.add(n)
return new_active
def do_cycle(self, active_blocks):
new_active = set()
for block in active_blocks:
if self.should_be_active_block(block, active_blocks):
new_active.add(block)
neighbors = self.get_neighbors(block)
for n in neighbors:
if self.should_be_active_block(n, active_blocks):
new_active.add(n)
return new_active
def should_be_active_block(self, block, active_blocks):
is_active = block in active_blocks
active_neighbors = self.get_active_neighbors(block, active_blocks)
if is_active and 2 <= len(active_neighbors) <= 3:
return True
elif not is_active and len(active_neighbors) == 3:
return True
return False
def should_be_active_block_4d(self, block, active_blocks):
is_active = block in active_blocks
active_neighbors = self.get_active_neighbors_4d(block, active_blocks)
if is_active and 2 <= len(active_neighbors) <= 3:
return True
elif not is_active and len(active_neighbors) == 3:
return True
return False
def get_active_neighbors(self, block, active_blocks):
neighbors = self.get_neighbors(block)
collector = []
for n in neighbors:
if n in active_blocks:
collector.append(n)
return collector
def get_active_neighbors_4d(self, block, active_blocks):
neighbors = self.get_neighbors_4d(block)
collector = []
for n in neighbors:
if n in active_blocks:
collector.append(n)
return collector
def get_neighbors(self, block):
collector = []
for x in range(-1, 2):
for y in range(-1, 2):
for z in range(-1, 2):
if x == 0 and y == 0 and z == 0:
continue
collector.append((block[0] + x, block[1] + y, block[2] + z))
return collector
def get_neighbors_4d(self, block):
collector = []
for x in range(-1, 2):
for y in range(-1, 2):
for z in range(-1, 2):
for w in range(-1, 2):
if x == 0 and y == 0 and z == 0 and w == 0:
continue
collector.append((block[0] + x, block[1] + y, block[2] + z, block[3] + w))
return collector
if __name__ == '__main__':
main()
|
[
"thornton.jon@gmail.com"
] |
thornton.jon@gmail.com
|
874ca0ff5a45005438735f04ecf861ffa29707b7
|
36b012177eba594326ce29c5d9e2808b1c075775
|
/venv/lib/python3.6/copy.py
|
cc7a3f7877ea349643e3e6a9df840c5ac5a7a5dd
|
[] |
no_license
|
JoshCrusader/ResidencySystem
|
0000125c5ccb4ee5dea1394fcd197614bb5a631a
|
98a120b3ff8dec447d4d0df032aa5eb188b19e99
|
refs/heads/master
| 2020-12-03T03:37:21.334321 | 2017-07-08T13:57:01 | 2017-07-08T13:57:01 | 95,750,651 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 55 |
py
|
/Users/drjeoffreycruzada/anaconda/lib/python3.6/copy.py
|
[
"joshua_cruzada@dlsu.edu.ph"
] |
joshua_cruzada@dlsu.edu.ph
|
cfeccc1e4f8c10c38e839b7a51e2a6fa60edb50d
|
2464974c859fcae1c665c4220f98906ccd06b89f
|
/kjo/wsgi.py
|
83689a7bf379ff75845c2d3e2b0a91be173e0674
|
[] |
no_license
|
wlsdh85/django_practice
|
91562675f7ed57e3ebfe0ef4eff28e711fa03248
|
761d24dfc3bd53167957c932a4aeebb3740ab262
|
refs/heads/master
| 2023-01-11T00:03:30.020750 | 2020-11-10T11:39:20 | 2020-11-10T11:39:20 | 311,635,062 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 383 |
py
|
"""
WSGI config for kjo project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'kjo.settings')
application = get_wsgi_application()
|
[
"wlsdh85@gmail.com"
] |
wlsdh85@gmail.com
|
9435de26e438527194333e88e18880da7357c9db
|
d637a0a76e65c5498ab41a01e423ab4a69ff0e5c
|
/src/RL/ex.py
|
94fb6ca867df2df5b5d60b65547019d34b57bad2
|
[] |
no_license
|
webclinic017/DART_Crawling
|
1d8ae692ecdd8ac95cf5334c7ed0bb23375c4a54
|
6937f48cab218dd6a2ec27338d37aca88ba368b4
|
refs/heads/master
| 2022-11-30T14:16:50.865420 | 2020-08-11T03:25:00 | 2020-08-11T03:25:00 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 845 |
py
|
# import gym
# env = gym.make('CartPole-v0')
# env.reset()
# for _ in range(1000):
# env.render()
# env.step(env.action_space.sample()) # take a random action
# env.close()
import gym
env = gym.make('CartPole-v0')
for i_epoisode in range(20):
observation = env.reset()
for t in range(100):
env.render()
print(observation)
action = env.action_space.sample()
observation, reward, done, info = env.step(action)
if done:
print("Episode finised after {}timesteps".format(t+1))
break
env.close()
# import gym
# env = gym.make('CartPole-v0')
# print(env.action_space)
# print(env.observation_space)
# from gym import spaces
# space = spaces.Discrete(2)
# x = space.sample()
# # 가정 함수 assert
# assert space.contains(x)
# assert space.n == 2, '크기가 다름'
|
[
"wnstjd117@naver.com"
] |
wnstjd117@naver.com
|
77d38137276897f4d2a3390e678b2c33e68f0437
|
a8425bd731abfaa2c7bc797ce1b4d00aa8b46fa2
|
/anytree/node/anynode.py
|
82c5b0a7e1c956192ff369fb7eb9c0410dfdcdd7
|
[
"Apache-2.0"
] |
permissive
|
nline/anytree
|
45103dea6b4ce397711a33bb873b3478fc48cd20
|
73492fb96d77a6973f3c48489064a12452646965
|
refs/heads/master
| 2023-02-14T03:42:42.853455 | 2021-01-04T20:41:06 | 2021-01-04T20:41:06 | 287,288,337 | 0 | 0 |
Apache-2.0
| 2020-08-13T13:30:56 | 2020-08-13T13:30:55 | null |
UTF-8
|
Python
| false | false | 3,469 |
py
|
# -*- coding: utf-8 -*-
from .nodemixin import NodeMixin
from .util import _repr
class AnyNode(NodeMixin, object):
def __init__(self, parent=None, children=None, **kwargs):
u"""
A generic tree node with any `kwargs`.
Keyword Args:
parent: Reference to parent node.
children: Iterable with child nodes.
*: Any other given attribute is just stored as object attribute.
Other than :any:`Node` this class has no default idenifier.
It is up to the user to use other attributes for identification.
The `parent` attribute refers the parent node:
>>> from anytree import AnyNode, RenderTree
>>> root = AnyNode(id="root")
>>> s0 = AnyNode(id="sub0", parent=root)
>>> s0b = AnyNode(id="sub0B", parent=s0, foo=4, bar=109)
>>> s0a = AnyNode(id="sub0A", parent=s0)
>>> s1 = AnyNode(id="sub1", parent=root)
>>> s1a = AnyNode(id="sub1A", parent=s1)
>>> s1b = AnyNode(id="sub1B", parent=s1, bar=8)
>>> s1c = AnyNode(id="sub1C", parent=s1)
>>> s1ca = AnyNode(id="sub1Ca", parent=s1c)
>>> root
AnyNode(id='root')
>>> s0
AnyNode(id='sub0')
>>> print(RenderTree(root))
AnyNode(id='root')
├── AnyNode(id='sub0')
│ ├── AnyNode(bar=109, foo=4, id='sub0B')
│ └── AnyNode(id='sub0A')
└── AnyNode(id='sub1')
├── AnyNode(id='sub1A')
├── AnyNode(bar=8, id='sub1B')
└── AnyNode(id='sub1C')
└── AnyNode(id='sub1Ca')
The same tree can be constructed by using the `children` attribute:
>>> root = AnyNode(id="root", children=[
... AnyNode(id="sub0", children=[
... AnyNode(id="sub0B", foo=4, bar=109),
... AnyNode(id="sub0A"),
... ]),
... AnyNode(id="sub1", children=[
... AnyNode(id="sub1A"),
... AnyNode(id="sub1B", bar=8),
... AnyNode(id="sub1C", children=[
... AnyNode(id="sub1Ca"),
... ]),
... ]),
... ])
>>> print(RenderTree(root))
AnyNode(id='root')
├── AnyNode(id='sub0')
│ ├── AnyNode(bar=109, foo=4, id='sub0B')
│ └── AnyNode(id='sub0A')
└── AnyNode(id='sub1')
├── AnyNode(id='sub1A')
├── AnyNode(bar=8, id='sub1B')
└── AnyNode(id='sub1C')
└── AnyNode(id='sub1Ca')
Node attributes can be added, modified and deleted the pythonic way:
>>> root.new = 'a new attribute'
>>> s0b.bar = 110 # modified
>>> del s1b.bar
>>> print(RenderTree(root))
AnyNode(id='root', new='a new attribute')
├── AnyNode(id='sub0')
│ ├── AnyNode(bar=109, foo=4, id='sub0B')
│ └── AnyNode(id='sub0A')
└── AnyNode(id='sub1')
├── AnyNode(id='sub1A')
├── AnyNode(bar=8, id='sub1B')
└── AnyNode(id='sub1C')
└── AnyNode(id='sub1Ca')
"""
self.__dict__.update(kwargs)
self.parent = parent
if children:
self.children = children
def __repr__(self):
return _repr(self)
|
[
"c0fec0de@gmail.com"
] |
c0fec0de@gmail.com
|
494e0d4136694286e0c11714a7c34d8c62b349d7
|
07c62636b2e88d235200cf3bf2d7948d5ab9f288
|
/old archives/ip-cam/wan/remote-opencv-streaming-live-video-master/client.py
|
89a7654af909dab40d88d299ec237e67ec1f6287
|
[
"MIT"
] |
permissive
|
Whiron/Realtime-object-detection
|
a52bf3703e7c1525a69de1ef3b3aa6ff2f16f54d
|
ac5cf1ceec3dafb094685edec6ae9c6999a1df79
|
refs/heads/master
| 2020-12-15T06:17:24.615617 | 2020-01-20T04:24:12 | 2020-01-20T04:24:12 | 235,018,043 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 584 |
py
|
import cv2
import numpy as np
import socket
import sys
import pickle
import struct
#from io import StringIO
import io
import json
import base64
cap=cv2.VideoCapture(0)
clientsocket=socket.socket(socket.AF_INET,socket.SOCK_STREAM)
clientsocket.connect(('localhost',8089))
while(cap.isOpened()):
ret,frame=cap.read()
memfile = io.BytesIO()
np.save(memfile, frame)
memfile.seek(0)
data = json.dumps(memfile.read().decode('latin-1'))
#print(data)
clientsocket.sendall(struct.pack('L', len(data))+data)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
|
[
"sarthak.thakkar16@gmail.com"
] |
sarthak.thakkar16@gmail.com
|
d7cc9563938c88b61d3a40cd101d1af903cb8f89
|
a06337aeb77809e8f39ebf56b4ec2c2dfcd87ede
|
/apostrofo.py
|
4645161ca7ae9690d39516f61ded5bda40092676
|
[] |
no_license
|
jessicameira/python
|
ee43b85043559d6fda8e10d4cb1928cc0f6cac10
|
9504c49fca7555318b0660beb086b8d43f23c2a4
|
refs/heads/master
| 2020-07-05T09:35:21.295091 | 2019-08-20T13:23:28 | 2019-08-20T13:23:28 | 202,610,625 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 75 |
py
|
message = "One of the Python's strenght is the diversity"
print(message)
|
[
"noreply@github.com"
] |
jessicameira.noreply@github.com
|
8cecebde482ccbac8c83a02a31aae64cf3aa2af3
|
640608dc02fcfa9334be5abaa7146d8134df489a
|
/7. Reverse Integer.py
|
30b2b882229e31633995f5535deb2da3e0fce011
|
[] |
no_license
|
lingqinx/leedcode
|
698835d925eeaa4fea84086fcf8413dd28b90ef1
|
b3ba70ed100cd5207f7649266d48cf30a9f600e5
|
refs/heads/master
| 2021-07-02T13:29:40.114567 | 2019-04-04T07:14:24 | 2019-04-04T07:14:24 | 106,343,420 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 341 |
py
|
class Solution(object):
def reverse(self, x):
"""
:type x: int
:rtype: int
"""
oper = 0
if x > 0:
oper = 1
reverse=int(str(x)[::-1])
if x < 0:
oper = -1
reverse = int(str(x)[1::-1])
reverse = int(`oper*x`[::-1])
|
[
"noreply@github.com"
] |
lingqinx.noreply@github.com
|
d01c60729dde3704ca76f6163bb970a349a73025
|
2ee66e6485f0c68adb6367b3697c4a3cb32c9b7e
|
/tests/test_mpi.py
|
b945048d2f32120065d20655192115c3e52a001e
|
[
"MIT"
] |
permissive
|
yuriyi/devito
|
8836f741b360946ce2b439b88b78bf267279e90b
|
d0a29b3ad3653c20f88b35c2780471cef107b7a2
|
refs/heads/master
| 2020-04-01T05:49:17.221770 | 2018-10-13T18:34:46 | 2018-10-13T18:34:46 | 152,920,977 | 0 | 0 |
MIT
| 2018-10-13T22:52:12 | 2018-10-13T22:52:12 | null |
UTF-8
|
Python
| false | false | 38,484 |
py
|
import numpy as np
import pytest
from conftest import skipif_yask
from devito import (Grid, Constant, Function, TimeFunction, SparseFunction,
SparseTimeFunction, Dimension, ConditionalDimension,
SubDimension, Eq, Inc, Operator)
from devito.ir.iet import Call, Conditional, FindNodes
from devito.mpi import MPI, copy, sendrecv, update_halo
from devito.parameters import configuration
from devito.types import LEFT, RIGHT
@skipif_yask
class TestPythonMPI(object):
@pytest.mark.parallel(nprocs=[2, 4])
def test_partitioning(self):
grid = Grid(shape=(15, 15))
f = Function(name='f', grid=grid)
distributor = grid.distributor
expected = { # nprocs -> [(rank0 shape), (rank1 shape), ...]
2: [(15, 8), (15, 7)],
4: [(8, 8), (8, 7), (7, 8), (7, 7)]
}
assert f.shape == expected[distributor.nprocs][distributor.myrank]
@pytest.mark.parallel(nprocs=[2, 4])
def test_partitioning_fewer_dims(self):
"""Test domain decomposition for Functions defined over a strict subset
of grid-decomposed dimensions."""
size_x, size_y = 16, 16
grid = Grid(shape=(size_x, size_y))
x, y = grid.dimensions
# A function with fewer dimensions that in `grid`
f = Function(name='f', grid=grid, dimensions=(y,), shape=(size_y,))
distributor = grid.distributor
expected = { # nprocs -> [(rank0 shape), (rank1 shape), ...]
2: [(8,), (8,)],
4: [(8,), (8,), (8,), (8,)]
}
assert f.shape == expected[distributor.nprocs][distributor.myrank]
@pytest.mark.parallel(nprocs=9)
def test_neighborhood_2d(self):
grid = Grid(shape=(3, 3))
x, y = grid.dimensions
distributor = grid.distributor
# Rank map:
# ---------------y
# | 0 | 1 | 2 |
# -------------
# | 3 | 4 | 5 |
# -------------
# | 6 | 7 | 8 |
# -------------
# |
# x
expected = {
0: {x: {LEFT: MPI.PROC_NULL, RIGHT: 3}, y: {LEFT: MPI.PROC_NULL, RIGHT: 1}},
1: {x: {LEFT: MPI.PROC_NULL, RIGHT: 4}, y: {LEFT: 0, RIGHT: 2}},
2: {x: {LEFT: MPI.PROC_NULL, RIGHT: 5}, y: {LEFT: 1, RIGHT: MPI.PROC_NULL}},
3: {x: {LEFT: 0, RIGHT: 6}, y: {LEFT: MPI.PROC_NULL, RIGHT: 4}},
4: {x: {LEFT: 1, RIGHT: 7}, y: {LEFT: 3, RIGHT: 5}},
5: {x: {LEFT: 2, RIGHT: 8}, y: {LEFT: 4, RIGHT: MPI.PROC_NULL}},
6: {x: {LEFT: 3, RIGHT: MPI.PROC_NULL}, y: {LEFT: MPI.PROC_NULL, RIGHT: 7}},
7: {x: {LEFT: 4, RIGHT: MPI.PROC_NULL}, y: {LEFT: 6, RIGHT: 8}},
8: {x: {LEFT: 5, RIGHT: MPI.PROC_NULL}, y: {LEFT: 7, RIGHT: MPI.PROC_NULL}},
}
assert expected[distributor.myrank] == distributor.neighbours
@pytest.mark.parallel(nprocs=2)
def test_halo_exchange_bilateral(self):
"""
Test halo exchange between two processes organised in a 1x2 cartesian grid.
The initial ``data_with_halo`` looks like:
rank0 rank1
0 0 0 0 0 0 0 0 0 0 0 0
0 1 1 1 1 0 0 2 2 2 2 0
0 1 1 1 1 0 0 2 2 2 2 0
0 1 1 1 1 0 0 2 2 2 2 0
0 1 1 1 1 0 0 2 2 2 2 0
0 0 0 0 0 0 0 0 0 0 0 0
After the halo exchange, the following is expected and tested for:
rank0 rank1
0 0 0 0 0 0 0 0 0 0 0 0
0 1 1 1 1 2 1 2 2 2 2 0
0 1 1 1 1 2 1 2 2 2 2 0
0 1 1 1 1 2 1 2 2 2 2 0
0 1 1 1 1 2 1 2 2 2 2 0
0 0 0 0 0 0 0 0 0 0 0 0
"""
grid = Grid(shape=(12, 12))
f = Function(name='f', grid=grid)
distributor = grid.distributor
f.data[:] = distributor.myrank + 1
# Now trigger a halo exchange...
f.data_with_halo # noqa
if distributor.myrank == 0:
assert np.all(f.data_ro_with_halo[1:-1, -1] == 2.)
assert np.all(f.data_ro_with_halo[:, 0] == 0.)
else:
assert np.all(f.data_ro_with_halo[1:-1, 0] == 1.)
assert np.all(f.data_ro_with_halo[:, -1] == 0.)
assert np.all(f.data_ro_with_halo[0] == 0.)
assert np.all(f.data_ro_with_halo[-1] == 0.)
@pytest.mark.parallel(nprocs=2)
def test_halo_exchange_bilateral_asymmetric(self):
"""
Test halo exchange between two processes organised in a 1x2 cartesian grid.
In this test, the size of left and right halo regions are different.
The initial ``data_with_halo`` looks like:
rank0 rank1
0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 1 1 1 1 0 0 0 2 2 2 2 0
0 0 1 1 1 1 0 0 0 2 2 2 2 0
0 0 1 1 1 1 0 0 0 2 2 2 2 0
0 0 1 1 1 1 0 0 0 2 2 2 2 0
0 0 0 0 0 0 0 0 0 0 0 0 0 0
After the halo exchange, the following is expected and tested for:
rank0 rank1
0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 1 1 1 1 2 1 1 2 2 2 2 0
0 0 1 1 1 1 2 1 1 2 2 2 2 0
0 0 1 1 1 1 2 1 1 2 2 2 2 0
0 0 1 1 1 1 2 1 1 2 2 2 2 0
0 0 0 0 0 0 0 0 0 0 0 0 0 0
"""
grid = Grid(shape=(12, 12))
f = Function(name='f', grid=grid, space_order=(1, 2, 1))
distributor = grid.distributor
f.data[:] = distributor.myrank + 1
# Now trigger a halo exchange...
f.data_with_halo # noqa
if distributor.myrank == 0:
assert np.all(f.data_ro_with_halo[2:-1, -1] == 2.)
assert np.all(f.data_ro_with_halo[:, 0:2] == 0.)
else:
assert np.all(f.data_ro_with_halo[2:-1, 0:2] == 1.)
assert np.all(f.data_ro_with_halo[:, -1] == 0.)
assert np.all(f.data_ro_with_halo[0:2] == 0.)
assert np.all(f.data_ro_with_halo[-1] == 0.)
@pytest.mark.parallel(nprocs=4)
def test_halo_exchange_quadrilateral(self):
"""
Test halo exchange between four processes organised in a 2x2 cartesian grid.
The initial ``data_with_halo`` looks like:
rank0 rank1
0 0 0 0 0 0 0 0 0 0 0 0
0 1 1 1 1 0 0 2 2 2 2 0
0 1 1 1 1 0 0 2 2 2 2 0
0 1 1 1 1 0 0 2 2 2 2 0
0 1 1 1 1 0 0 2 2 2 2 0
0 0 0 0 0 0 0 0 0 0 0 0
rank2 rank3
0 0 0 0 0 0 0 0 0 0 0 0
0 3 3 3 3 0 0 4 4 4 4 0
0 3 3 3 3 0 0 4 4 4 4 0
0 3 3 3 3 0 0 4 4 4 4 0
0 3 3 3 3 0 0 4 4 4 4 0
0 0 0 0 0 0 0 0 0 0 0 0
After the halo exchange, the following is expected and tested for:
rank0 rank1
0 0 0 0 0 0 0 0 0 0 0 0
0 1 1 1 1 2 1 2 2 2 2 0
0 1 1 1 1 2 1 2 2 2 2 0
0 1 1 1 1 2 1 2 2 2 2 0
0 1 1 1 1 2 1 2 2 2 2 0
0 3 3 3 3 4 3 4 4 4 4 0
rank2 rank3
0 1 1 1 1 2 1 2 2 2 2 0
0 3 3 3 3 4 3 4 4 4 4 0
0 3 3 3 3 4 3 4 4 4 4 0
0 3 3 3 3 4 3 4 4 4 4 0
0 3 3 3 3 4 3 4 4 4 4 0
0 0 0 0 0 0 0 0 0 0 0 0
"""
grid = Grid(shape=(12, 12))
f = Function(name='f', grid=grid)
distributor = grid.distributor
f.data[:] = distributor.myrank + 1
# Now trigger a halo exchange...
f.data_with_halo # noqa
if distributor.myrank == 0:
assert np.all(f.data_ro_with_halo[0] == 0.)
assert np.all(f.data_ro_with_halo[:, 0] == 0.)
assert np.all(f.data_ro_with_halo[1:-1, -1] == 2.)
assert np.all(f.data_ro_with_halo[-1, 1:-1] == 3.)
assert f.data_ro_with_halo[-1, -1] == 4.
elif distributor.myrank == 1:
assert np.all(f.data_ro_with_halo[0] == 0.)
assert np.all(f.data_ro_with_halo[:, -1] == 0.)
assert np.all(f.data_ro_with_halo[1:-1, 0] == 1.)
assert np.all(f.data_ro_with_halo[-1, 1:-1] == 4.)
assert f.data_ro_with_halo[-1, 0] == 3.
elif distributor.myrank == 2:
assert np.all(f.data_ro_with_halo[-1] == 0.)
assert np.all(f.data_ro_with_halo[:, 0] == 0.)
assert np.all(f.data_ro_with_halo[1:-1, -1] == 4.)
assert np.all(f.data_ro_with_halo[0, 1:-1] == 1.)
assert f.data_ro_with_halo[0, -1] == 2.
else:
assert np.all(f.data_ro_with_halo[-1] == 0.)
assert np.all(f.data_ro_with_halo[:, -1] == 0.)
assert np.all(f.data_ro_with_halo[1:-1, 0] == 3.)
assert np.all(f.data_ro_with_halo[0, 1:-1] == 2.)
assert f.data_ro_with_halo[0, 0] == 1.
@skipif_yask
@pytest.mark.parallel(nprocs=[2, 4])
def test_ctypes_neighbours(self):
grid = Grid(shape=(4, 4))
distributor = grid.distributor
PN = MPI.PROC_NULL
attrs = ['xleft', 'xright', 'yleft', 'yright']
expected = { # nprocs -> [(rank0 xleft xright ...), (rank1 xleft ...), ...]
2: [(PN, PN, PN, 1), (PN, PN, 0, PN)],
4: [(PN, 2, PN, 1), (PN, 3, 0, PN), (0, PN, PN, 3), (1, PN, 2, PN)]
}
mapper = dict(zip(attrs, expected[distributor.nprocs][distributor.myrank]))
_, _, obj = distributor._C_neighbours
assert all(getattr(obj.value._obj, k) == v for k, v in mapper.items())
@skipif_yask
class TestCodeGeneration(object):
def test_iet_copy(self):
grid = Grid(shape=(4, 4))
t = grid.stepping_dim
f = TimeFunction(name='f', grid=grid)
iet = copy(f, [t])
assert str(iet.parameters) == """\
(buf(buf_x, buf_y), buf_x_size, buf_y_size, dat(dat_time, dat_x, dat_y),\
dat_time_size, dat_x_size, dat_y_size, otime, ox, oy)"""
assert """\
for (int x = 0; x <= buf_x_size - 1; x += 1)
{
for (int y = 0; y <= buf_y_size - 1; y += 1)
{
buf[x][y] = dat[otime][x + ox][y + oy];
}
}""" in str(iet)
def test_iet_sendrecv(self):
grid = Grid(shape=(4, 4))
t = grid.stepping_dim
f = TimeFunction(name='f', grid=grid)
iet = sendrecv(f, [t])
assert str(iet.parameters) == """\
(dat(dat_time, dat_x, dat_y), dat_time_size, dat_x_size, dat_y_size,\
buf_x_size, buf_y_size, ogtime, ogx, ogy, ostime, osx, osy, fromrank, torank, comm)"""
assert str(iet.body[0]) == """\
float (*restrict dat)[dat_x_size][dat_y_size] __attribute__((aligned(64))) =\
(float (*)[dat_x_size][dat_y_size]) dat_vec;
float bufs[buf_x_size][buf_y_size] __attribute__((aligned(64)));
MPI_Request rrecv;
float bufg[buf_x_size][buf_y_size] __attribute__((aligned(64)));
MPI_Request rsend;
MPI_Status srecv;
MPI_Irecv((float*)bufs,buf_x_size*buf_y_size,MPI_FLOAT,fromrank,13,comm,&rrecv);
gather_f((float*)bufg,buf_x_size,buf_y_size,(float*)dat,dat_time_size,dat_x_size,\
dat_y_size,ogtime,ogx,ogy);
MPI_Isend((float*)bufg,buf_x_size*buf_y_size,MPI_FLOAT,torank,13,comm,&rsend);
MPI_Wait(&rsend,MPI_STATUS_IGNORE);
MPI_Wait(&rrecv,&srecv);
if (fromrank != MPI_PROC_NULL)
{
scatter_f((float*)bufs,buf_x_size,buf_y_size,(float*)dat,dat_time_size,dat_x_size,\
dat_y_size,ostime,osx,osy);
}"""
@pytest.mark.parallel(nprocs=1)
def test_iet_update_halo(self):
grid = Grid(shape=(4, 4))
t = grid.stepping_dim
f = TimeFunction(name='f', grid=grid)
iet = update_halo(f, [t])
assert str(iet.parameters) == """\
(f(t, x, y), mxl, mxr, myl, myr, comm, nb, otime, t_size, x_size, y_size)"""
assert """\
MPI_Comm *comm = (MPI_Comm*) _comm;
struct neighbours *nb = (struct neighbours*) _nb;
if (mxl)
{
sendrecv(f_vec,t_size,x_size + 1 + 1,y_size + 1 + 1,1,y_size + 1 + 1,\
otime,1,0,otime,x_size + 1,0,nb->xright,nb->xleft,comm);
}
if (mxr)
{
sendrecv(f_vec,t_size,x_size + 1 + 1,y_size + 1 + 1,1,y_size + 1 + 1,\
otime,x_size,0,otime,0,0,nb->xleft,nb->xright,comm);
}
if (myl)
{
sendrecv(f_vec,t_size,x_size + 1 + 1,y_size + 1 + 1,x_size + 1 + 1,1,\
otime,0,1,otime,0,y_size + 1,nb->yright,nb->yleft,comm);
}
if (myr)
{
sendrecv(f_vec,t_size,x_size + 1 + 1,y_size + 1 + 1,x_size + 1 + 1,1,\
otime,0,y_size,otime,0,0,nb->yleft,nb->yright,comm);
}"""
@skipif_yask
class TestSparseFunction(object):
@pytest.mark.parallel(nprocs=4)
@pytest.mark.parametrize('coords,expected', [
([(1., 1.), (1., 3.), (3., 1.), (3., 3.)], (0, 1, 2, 3)),
])
def test_ownership(self, coords, expected):
"""Given a sparse point ``p`` with known coordinates, this test checks
that the MPI rank owning ``p`` is retrieved correctly."""
grid = Grid(shape=(4, 4), extent=(4.0, 4.0))
sf = SparseFunction(name='sf', grid=grid, npoint=4, coordinates=coords)
assert len(sf.gridpoints) == len(expected)
assert all(sf._is_owned(i) == (j == grid.distributor.myrank)
for i, j in zip(sf.gridpoints, expected))
@pytest.mark.parallel(nprocs=4)
def test_scatter_gather(self):
"""
Test scattering and gathering of sparse data from and to a single MPI rank.
The initial data distribution looks like:
rank0 rank1 rank2 rank3
[0, 1, 2, 3] [] [] []
Logically (i.e., given point coordinates and domain decomposition), 0 belongs
to rank0, 1 belongs to rank1, etc. Thus, after scattering, the data distribution
is expected to be:
rank0 rank1 rank2 rank3
[0] [1] [2] [3]
Then, locally on each rank, some trivial computation is performed, and we obtain:
rank0 rank1 rank2 rank3
[0] [2] [4] [6]
Finally, we gather the data values and we get:
rank0 rank1 rank2 rank3
[0, 2, 4, 6] [] [] []
"""
grid = Grid(shape=(4, 4), extent=(4.0, 4.0))
# Initialization
if grid.distributor.myrank == 0:
coords = [(1., 1.), (1., 3.), (3., 1.), (3., 3.)]
else:
coords = []
sf = SparseFunction(name='sf', grid=grid, npoint=len(coords), coordinates=coords)
sf.data[:] = list(range(len(coords)))
# Scatter
data = sf._dist_scatter()[sf]
assert len(data) == 1
assert data[0] == grid.distributor.myrank
# Do some local computation
data = data*2
# Gather
sf._dist_gather(data)
if grid.distributor.myrank == 0:
assert np.all(sf.data == [0, 2, 4, 6])
else:
assert not sf.data
@skipif_yask
class TestOperatorSimple(object):
@pytest.mark.parallel(nprocs=[2, 4, 8, 16, 32])
def test_trivial_eq_1d(self):
grid = Grid(shape=(32,))
x = grid.dimensions[0]
t = grid.stepping_dim
f = TimeFunction(name='f', grid=grid)
f.data_with_halo[:] = 1.
op = Operator(Eq(f.forward, f[t, x-1] + f[t, x+1] + 1))
op.apply(time=1)
assert np.all(f.data_ro_domain[1] == 3.)
if f.grid.distributor.myrank == 0:
assert f.data_ro_domain[0, 0] == 5.
assert np.all(f.data_ro_domain[0, 1:] == 7.)
elif f.grid.distributor.myrank == f.grid.distributor.nprocs - 1:
assert f.data_ro_domain[0, -1] == 5.
assert np.all(f.data_ro_domain[0, :-1] == 7.)
else:
assert np.all(f.data_ro_domain[0] == 7.)
@pytest.mark.parallel(nprocs=2)
def test_trivial_eq_1d_save(self):
grid = Grid(shape=(32,))
x = grid.dimensions[0]
time = grid.time_dim
f = TimeFunction(name='f', grid=grid, save=5)
f.data_with_halo[:] = 1.
op = Operator(Eq(f.forward, f[time, x-1] + f[time, x+1] + 1))
op.apply()
time_M = op._prepare_arguments()['time_M']
assert np.all(f.data_ro_domain[1] == 3.)
glb_pos_map = f.grid.distributor.glb_pos_map
if LEFT in glb_pos_map[x]:
assert np.all(f.data_ro_domain[-1, time_M:] == 31.)
else:
assert np.all(f.data_ro_domain[-1, :-time_M] == 31.)
@pytest.mark.parallel(nprocs=4)
def test_trivial_eq_2d(self):
grid = Grid(shape=(8, 8,))
x, y = grid.dimensions
t = grid.stepping_dim
f = TimeFunction(name='f', grid=grid, space_order=1)
f.data_with_halo[:] = 1.
eqn = Eq(f.forward, f[t, x-1, y] + f[t, x+1, y] + f[t, x, y-1] + f[t, x, y+1])
op = Operator(eqn)
op.apply(time=1)
# Expected computed values
corner, side, interior = 10., 13., 16.
glb_pos_map = f.grid.distributor.glb_pos_map
assert np.all(f.data_ro_interior[0] == interior)
if LEFT in glb_pos_map[x] and LEFT in glb_pos_map[y]:
assert f.data_ro_domain[0, 0, 0] == corner
assert np.all(f.data_ro_domain[0, 1:, :1] == side)
assert np.all(f.data_ro_domain[0, :1, 1:] == side)
elif LEFT in glb_pos_map[x] and RIGHT in glb_pos_map[y]:
assert f.data_ro_domain[0, 0, -1] == corner
assert np.all(f.data_ro_domain[0, :1, :-1] == side)
assert np.all(f.data_ro_domain[0, 1:, -1:] == side)
elif RIGHT in glb_pos_map[x] and LEFT in glb_pos_map[y]:
assert f.data_ro_domain[0, -1, 0] == corner
assert np.all(f.data_ro_domain[0, -1:, 1:] == side)
assert np.all(f.data_ro_domain[0, :-1, :1] == side)
else:
assert f.data_ro_domain[0, -1, -1] == corner
assert np.all(f.data_ro_domain[0, :-1, -1:] == side)
assert np.all(f.data_ro_domain[0, -1:, :-1] == side)
@pytest.mark.parallel(nprocs=4)
def test_multiple_eqs_funcs(self):
grid = Grid(shape=(12,))
x = grid.dimensions[0]
t = grid.stepping_dim
f = TimeFunction(name='f', grid=grid)
f.data_with_halo[:] = 0.
g = TimeFunction(name='g', grid=grid)
g.data_with_halo[:] = 0.
op = Operator([Eq(f.forward, f[t, x+1] + g[t, x-1] + 1),
Eq(g.forward, f[t, x-1] + g[t, x+1] + 1)])
op.apply(time=1)
assert np.all(f.data_ro_domain[1] == 1.)
if f.grid.distributor.myrank == 0:
assert f.data_ro_domain[0, 0] == 2.
assert np.all(f.data_ro_domain[0, 1:] == 3.)
elif f.grid.distributor.myrank == f.grid.distributor.nprocs - 1:
assert f.data_ro_domain[0, -1] == 2.
assert np.all(f.data_ro_domain[0, :-1] == 3.)
else:
assert np.all(f.data_ro_domain[0] == 3.)
# Also check that there are no redundant halo exchanges. Here, only
# two are expected before the `x` Iteration, one for `f` and one for `g`
calls = FindNodes(Call).visit(op)
assert len(calls) == 2
def test_nostencil_implies_nohaloupdate(self):
grid = Grid(shape=(12,))
f = TimeFunction(name='f', grid=grid)
g = Function(name='g', grid=grid)
op = Operator([Eq(f.forward, f + 1.),
Eq(g, f + 1.)])
calls = FindNodes(Call).visit(op)
assert len(calls) == 0
@pytest.mark.parallel(nprocs=1)
def test_stencil_nowrite_implies_haloupdate(self):
grid = Grid(shape=(12,))
x = grid.dimensions[0]
t = grid.stepping_dim
f = TimeFunction(name='f', grid=grid)
g = Function(name='g', grid=grid)
op = Operator(Eq(g, f[t, x-1] + f[t, x+1] + 1.))
calls = FindNodes(Call).visit(op)
assert len(calls) == 1
@pytest.mark.parallel(nprocs=1)
def test_avoid_redundant_haloupdate(self):
grid = Grid(shape=(12,))
x = grid.dimensions[0]
t = grid.stepping_dim
i = Dimension(name='i')
j = Dimension(name='j')
f = TimeFunction(name='f', grid=grid)
g = Function(name='g', grid=grid)
op = Operator([Eq(f.forward, f[t, x-1] + f[t, x+1] + 1.),
Inc(f[t+1, i], 1.), # no halo update as it's an Inc
Eq(g, f[t, j] + 1)]) # access `f` at `t`, not `t+1`!
calls = FindNodes(Call).visit(op)
assert len(calls) == 1
@pytest.mark.parallel(nprocs=2)
def test_redo_haloupdate_due_to_antidep(self):
grid = Grid(shape=(12,))
x = grid.dimensions[0]
t = grid.stepping_dim
f = TimeFunction(name='f', grid=grid)
g = TimeFunction(name='g', grid=grid)
op = Operator([Eq(f.forward, f[t, x-1] + f[t, x+1] + 1.),
Eq(g.forward, f[t+1, x-1] + f[t+1, x+1] + g)])
op.apply(time=0)
calls = FindNodes(Call).visit(op)
assert len(calls) == 2
assert np.all(f.data_ro_domain[1] == 1.)
glb_pos_map = f.grid.distributor.glb_pos_map
if LEFT in glb_pos_map[x]:
assert np.all(g.data_ro_domain[1, 1:] == 2.)
else:
assert np.all(g.data_ro_domain[1, :-1] == 2.)
def test_haloupdate_not_requried(self):
grid = Grid(shape=(4, 4))
u = TimeFunction(name='u', grid=grid, space_order=4, time_order=2, save=None)
v = TimeFunction(name='v', grid=grid, space_order=0, time_order=0, save=5)
g = Function(name='g', grid=grid, space_order=0)
i = Function(name='i', grid=grid, space_order=0)
shift = Constant(name='shift', dtype=np.int32)
step = Eq(u.forward, u - u.backward + 1)
g_inc = Inc(g, u * v.subs(grid.time_dim, grid.time_dim - shift))
i_inc = Inc(i, (v*v).subs(grid.time_dim, grid.time_dim - shift))
op = Operator([step, g_inc, i_inc])
# No stencil in the expressions, so no halo update required!
calls = FindNodes(Call).visit(op)
assert len(calls) == 0
@skipif_yask
class TestOperatorAdvanced(object):
@pytest.mark.parallel(nprocs=[4])
def test_injection_wodup(self):
"""
Test injection operator when the sparse points don't need to be replicated
("wodup" -> w/o duplication) over multiple MPI ranks.
"""
grid = Grid(shape=(4, 4), extent=(3.0, 3.0))
f = Function(name='f', grid=grid, space_order=0)
f.data[:] = 0.
if grid.distributor.myrank == 0:
coords = [(0.5, 0.5), (0.5, 2.5), (2.5, 0.5), (2.5, 2.5)]
else:
coords = []
sf = SparseFunction(name='sf', grid=grid, npoint=len(coords), coordinates=coords)
sf.data[:] = 4.
# This is the situation at this point
# O is a grid point
# * is a sparse point
#
# O --- O --- O --- O
# | * | | * |
# O --- O --- O --- O
# | | | |
# O --- O --- O --- O
# | * | | * |
# O --- O --- O --- O
op = Operator(sf.inject(field=f, expr=sf + 1))
op.apply()
assert np.all(f.data == 1.25)
@pytest.mark.parallel(nprocs=4)
def test_injection_wodup_wtime(self):
"""
Just like ``test_injection_wodup``, but using a SparseTimeFunction
instead of a SparseFunction. Hence, the data scattering/gathering now
has to correctly pack/unpack multidimensional arrays.
"""
grid = Grid(shape=(4, 4), extent=(3.0, 3.0))
save = 3
f = TimeFunction(name='f', grid=grid, save=save, space_order=0)
f.data[:] = 0.
if grid.distributor.myrank == 0:
coords = [(0.5, 0.5), (0.5, 2.5), (2.5, 0.5), (2.5, 2.5)]
else:
coords = []
sf = SparseTimeFunction(name='sf', grid=grid, nt=save,
npoint=len(coords), coordinates=coords)
sf.data[0, :] = 4.
sf.data[1, :] = 8.
sf.data[2, :] = 12.
op = Operator(sf.inject(field=f, expr=sf + 1))
op.apply()
assert np.all(f.data[0] == 1.25)
assert np.all(f.data[1] == 2.25)
assert np.all(f.data[2] == 3.25)
@pytest.mark.parallel(nprocs=[4])
def test_injection_dup(self):
"""
Test injection operator when the sparse points are replicated over
multiple MPI ranks.
"""
grid = Grid(shape=(4, 4), extent=(3.0, 3.0))
x, y = grid.dimensions
f = Function(name='f', grid=grid)
f.data[:] = 0.
if grid.distributor.myrank == 0:
coords = [(0.5, 0.5), (1.5, 2.5), (1.5, 1.5), (2.5, 1.5)]
else:
coords = []
sf = SparseFunction(name='sf', grid=grid, npoint=len(coords), coordinates=coords)
sf.data[:] = 4.
# Global view (left) and local view (right, after domain decomposition)
# O is a grid point
# x is a halo point
# A, B, C, D are sparse points
# Rank0 Rank1
# O --- O --- O --- O O --- O --- x x --- O --- O
# | A | | | | A | | | | |
# O --- O --- O --- O O --- O --- x x --- O --- O
# | | C | B | --> | | C | | C | B |
# O --- O --- O --- O x --- x --- x x --- x --- x
# | | D | | Rank2 Rank3
# O --- O --- O --- O x --- x --- x x --- x --- x
# | | C | | C | B |
# O --- O --- x x --- O --- O
# | | D | | D | |
# O --- O --- x x --- O --- O
#
# Expected `f.data` (global view)
#
# 1.25 --- 1.25 --- 0.00 --- 0.00
# | | | |
# 1.25 --- 2.50 --- 2.50 --- 1.25
# | | | |
# 0.00 --- 2.50 --- 3.75 --- 1.25
# | | | |
# 0.00 --- 1.25 --- 1.25 --- 0.00
op = Operator(sf.inject(field=f, expr=sf + 1))
op.apply()
glb_pos_map = grid.distributor.glb_pos_map
if LEFT in glb_pos_map[x] and LEFT in glb_pos_map[y]: # rank0
assert np.all(f.data_ro_domain == [[1.25, 1.25], [1.25, 2.5]])
elif LEFT in glb_pos_map[x] and RIGHT in glb_pos_map[y]: # rank1
assert np.all(f.data_ro_domain == [[0., 0.], [2.5, 1.25]])
elif RIGHT in glb_pos_map[x] and LEFT in glb_pos_map[y]:
assert np.all(f.data_ro_domain == [[0., 2.5], [0., 1.25]])
elif RIGHT in glb_pos_map[x] and RIGHT in glb_pos_map[y]:
assert np.all(f.data_ro_domain == [[3.75, 1.25], [1.25, 0.]])
@pytest.mark.parallel(nprocs=[4])
def test_interpolation_wodup(self):
grid = Grid(shape=(4, 4), extent=(3.0, 3.0))
f = Function(name='f', grid=grid, space_order=0)
f.data[:] = 4.
if grid.distributor.myrank == 0:
coords = [(0.5, 0.5), (0.5, 2.5), (2.5, 0.5), (2.5, 2.5)]
else:
coords = []
sf = SparseFunction(name='sf', grid=grid, npoint=len(coords), coordinates=coords)
sf.data[:] = 0.
# This is the situation at this point
# O is a grid point
# * is a sparse point
#
# O --- O --- O --- O
# | * | | * |
# O --- O --- O --- O
# | | | |
# O --- O --- O --- O
# | * | | * |
# O --- O --- O --- O
op = Operator(sf.interpolate(expr=f))
op.apply()
assert np.all(sf.data == 4.)
@pytest.mark.parallel(nprocs=[4])
def test_interpolation_dup(self):
"""
Test interpolation operator when the sparse points are replicated over
multiple MPI ranks.
"""
grid = Grid(shape=(4, 4), extent=(3.0, 3.0))
x, y = grid.dimensions
# Init Function+data
f = Function(name='f', grid=grid)
glb_pos_map = grid.distributor.glb_pos_map
if LEFT in glb_pos_map[x]:
f.data[:] = [[1., 1.], [2., 2.]]
else:
f.data[:] = [[3., 3.], [4., 4.]]
if grid.distributor.myrank == 0:
coords = [(0.5, 0.5), (1.5, 2.5), (1.5, 1.5), (2.5, 1.5)]
else:
coords = []
sf = SparseFunction(name='sf', grid=grid, npoint=len(coords), coordinates=coords)
sf.data[:] = 0.
# Global view (left) and local view (right, after domain decomposition)
# O is a grid point
# x is a halo point
# A, B, C, D are sparse points
# Rank0 Rank1
# O --- O --- O --- O O --- O --- x x --- O --- O
# | A | | | | A | | | | |
# O --- O --- O --- O O --- O --- x x --- O --- O
# | | C | B | --> | | C | | C | B |
# O --- O --- O --- O x --- x --- x x --- x --- x
# | | D | | Rank2 Rank3
# O --- O --- O --- O x --- x --- x x --- x --- x
# | | C | | C | B |
# O --- O --- x x --- O --- O
# | | D | | D | |
# O --- O --- x x --- O --- O
#
# The initial `f.data` is (global view)
#
# 1. --- 1. --- 1. --- 1.
# | | | |
# 2. --- 2. --- 2. --- 2.
# | | | |
# 3. --- 3. --- 3. --- 3.
# | | | |
# 4. --- 4. --- 4. --- 4.
#
# Expected `sf.data` (global view)
#
# 1.5 --- 2.5 --- 2.5 --- 3.5
op = Operator(sf.interpolate(expr=f))
op.apply()
if grid.distributor.myrank == 0:
assert np.all(sf.data == [1.5, 2.5, 2.5, 3.5])
else:
assert sf.data.size == 0
@pytest.mark.parallel(nprocs=2)
def test_subsampling(self):
grid = Grid(shape=(40,))
x = grid.dimensions[0]
t = grid.stepping_dim
time = grid.time_dim
nt = 9
f = TimeFunction(name='f', grid=grid)
f.data_with_halo[:] = 1.
# Setup subsampled function
factor = 4
nsamples = (nt+factor-1)//factor
times = ConditionalDimension('t_sub', parent=time, factor=factor)
fsave = TimeFunction(name='fsave', grid=grid, save=nsamples, time_dim=times)
eqns = [Eq(f.forward, f[t, x-1] + f[t, x+1]), Eq(fsave, f)]
op = Operator(eqns)
op.apply(time=nt-1)
assert np.all(f.data_ro_domain[0] == fsave.data_ro_domain[nsamples-1])
glb_pos_map = f.grid.distributor.glb_pos_map
if LEFT in glb_pos_map[x]:
assert np.all(fsave.data_ro_domain[nsamples-1, nt-1:] == 256.)
else:
assert np.all(fsave.data_ro_domain[nsamples-1, :-(nt-1)] == 256.)
# Also check there are no redundant halo exchanges
calls = FindNodes(Call).visit(op)
assert len(calls) == 1
# In particular, there is no need for a halo exchange within the conditional
conditional = FindNodes(Conditional).visit(op)
assert len(conditional) == 1
assert len(FindNodes(Call).visit(conditional[0])) == 0
@pytest.mark.parallel(nprocs=2)
def test_arguments_subrange(self):
"""
Test op.apply when a subrange is specified for a distributed dimension.
"""
grid = Grid(shape=(16,))
x = grid.dimensions[0]
f = TimeFunction(name='f', grid=grid)
op = Operator(Eq(f.forward, f + 1.))
op.apply(time=0, x_m=4, x_M=11)
glb_pos_map = f.grid.distributor.glb_pos_map
if LEFT in glb_pos_map[x]:
assert np.all(f.data_ro_domain[1, :4] == 0.)
assert np.all(f.data_ro_domain[1, 4:] == 1.)
else:
assert np.all(f.data_ro_domain[1, :-4] == 1.)
assert np.all(f.data_ro_domain[1, -4:] == 0.)
@pytest.mark.parallel(nprocs=2)
def test_bcs_basic(self):
"""
Test MPI in presence of boundary condition loops. Here, no halo exchange
is expected (as there is no stencil in the computed expression) but we
check that:
* the left BC loop is computed by the leftmost rank only
* the right BC loop is computed by the rightmost rank only
"""
grid = Grid(shape=(20,))
x = grid.dimensions[0]
t = grid.stepping_dim
thickness = 4
u = TimeFunction(name='u', grid=grid, time_order=1)
xleft = SubDimension.left(name='xleft', parent=x, thickness=thickness)
xi = SubDimension.middle(name='xi', parent=x,
thickness_left=thickness, thickness_right=thickness)
xright = SubDimension.right(name='xright', parent=x, thickness=thickness)
t_in_centre = Eq(u[t+1, xi], 1)
leftbc = Eq(u[t+1, xleft], u[t+1, xleft+1] + 1)
rightbc = Eq(u[t+1, xright], u[t+1, xright-1] + 1)
op = Operator([t_in_centre, leftbc, rightbc])
op.apply(time_m=1, time_M=1)
glb_pos_map = u.grid.distributor.glb_pos_map
if LEFT in glb_pos_map[x]:
assert np.all(u.data_ro_domain[0, thickness:] == 1.)
assert np.all(u.data_ro_domain[0, :thickness] == range(thickness+1, 1, -1))
else:
assert np.all(u.data_ro_domain[0, :-thickness] == 1.)
assert np.all(u.data_ro_domain[0, -thickness:] == range(2, thickness+2))
@pytest.mark.parallel(nprocs=9)
def test_nontrivial_operator(self):
"""
Test MPI in a non-trivial scenario: ::
* 9 processes logically organised in a 3x3 cartesian grid (as opposed to
most tests in this module, which only use 2 or 4 processed);
* star-like stencil expression;
* non-trivial Higdon-like BCs;
* simultaneous presence of TimeFunction(grid), Function(grid), and
Function(dimensions)
"""
size_x, size_y = 9, 9
tkn = 2
# Grid and Dimensions
grid = Grid(shape=(size_x, size_y,))
x, y = grid.dimensions
t = grid.stepping_dim
# SubDimensions to implement BCs
xl, yl = [SubDimension.left('%sl' % d.name, d, tkn) for d in [x, y]]
xi, yi = [SubDimension.middle('%si' % d.name, d, tkn, tkn) for d in [x, y]]
xr, yr = [SubDimension.right('%sr' % d.name, d, tkn) for d in [x, y]]
# Functions
u = TimeFunction(name='f', grid=grid)
m = Function(name='m', grid=grid)
c = Function(name='c', grid=grid, dimensions=(x,), shape=(size_x,))
# Data initialization
u.data_with_halo[:] = 0.
m.data_with_halo[:] = 1.
c.data_with_halo[:] = 0.
# Equations
c_init = Eq(c, 1.)
eqn = Eq(u[t+1, xi, yi], u[t, xi, yi] + m[xi, yi] + c[xi] + 1.)
bc_left = Eq(u[t+1, xl, yi], u[t+1, xl+1, yi] + 1.)
bc_right = Eq(u[t+1, xr, yi], u[t+1, xr-1, yi] + 1.)
bc_top = Eq(u[t+1, xi, yl], u[t+1, xi, yl+1] + 1.)
bc_bottom = Eq(u[t+1, xi, yr], u[t+1, xi, yr-1] + 1.)
op = Operator([c_init, eqn, bc_left, bc_right, bc_top, bc_bottom])
op.apply(time=0)
# Expected (global view):
# 0 0 5 5 5 5 5 0 0
# 0 0 4 4 4 4 4 0 0
# 5 4 3 3 3 3 3 4 5
# 5 4 3 3 3 3 3 4 5
# 5 4 3 3 3 3 3 4 5
# 5 4 3 3 3 3 3 4 5
# 0 0 4 4 4 4 4 0 0
# 0 0 5 5 5 5 5 0 0
assert np.all(u.data_ro_domain[0] == 0) # The write occures at t=1
glb_pos_map = u.grid.distributor.glb_pos_map
# Check cornes
if LEFT in glb_pos_map[x] and LEFT in glb_pos_map[y]:
assert np.all(u.data_ro_domain[1] == [[0, 0, 5], [0, 0, 4], [5, 4, 3]])
elif LEFT in glb_pos_map[x] and RIGHT in glb_pos_map[y]:
assert np.all(u.data_ro_domain[1] == [[5, 0, 0], [4, 0, 0], [3, 4, 5]])
elif RIGHT in glb_pos_map[x] and LEFT in glb_pos_map[y]:
assert np.all(u.data_ro_domain[1] == [[5, 4, 3], [0, 0, 4], [0, 0, 5]])
elif RIGHT in glb_pos_map[x] and RIGHT in glb_pos_map[y]:
assert np.all(u.data_ro_domain[1] == [[3, 4, 5], [4, 0, 0], [5, 0, 0]])
# Check sides
if not glb_pos_map[x] and LEFT in glb_pos_map[y]:
assert np.all(u.data_ro_domain[1] == [[5, 4, 3], [5, 4, 3], [5, 4, 3]])
elif not glb_pos_map[x] and RIGHT in glb_pos_map[y]:
assert np.all(u.data_ro_domain[1] == [[3, 4, 5], [3, 4, 5], [3, 4, 5]])
elif LEFT in glb_pos_map[x] and not glb_pos_map[y]:
assert np.all(u.data_ro_domain[1] == [[5, 5, 5], [4, 4, 4], [3, 3, 3]])
elif RIGHT in glb_pos_map[x] and not glb_pos_map[y]:
assert np.all(u.data_ro_domain[1] == [[3, 3, 3], [4, 4, 4], [5, 5, 5]])
# Check center
if not glb_pos_map[x] and not glb_pos_map[y]:
assert np.all(u.data_ro_domain[1] == 3)
class TestIsotropicAcoustic(object):
"""
Test the acoustic wave model with MPI.
"""
# TODO: Cannot mark the following test as `xfail` since this marker
# doesn't cope well with the `parallel` mark. Leaving it commented out
# for the time being...
# @pytest.mark.parametrize('shape, kernel, space_order, nbpml', [
# # 1 tests with varying time and space orders
# ((60, ), 'OT2', 4, 10),
# ])
# @pytest.mark.parallel(nprocs=2)
# def test_adjoint_F(self, shape, kernel, space_order, nbpml):
# from test_adjoint import TestAdjoint
# TestAdjoint().test_adjoint_F('layers', shape, kernel, space_order, nbpml)
pass
if __name__ == "__main__":
configuration['mpi'] = True
TestOperatorAdvanced().test_interpolation_dup()
|
[
"f.luporini12@imperial.ac.uk"
] |
f.luporini12@imperial.ac.uk
|
f766a764745a6648551a3f64259734cd74a93643
|
e347f2ca9b3623031e3825a86ed5f99f543f201c
|
/foodtasker/foodtaskerapp/social_auth_pipeline.py
|
e07da3eab1f4ff05ea3c302d7e0bdda879c6a116
|
[] |
no_license
|
miachenberkeley/FoodDeliverySwift
|
4262c6a009cd2da89647ed019cf3490f05187bde
|
fb535112af0289f2962bdee22abd0a50bade35a6
|
refs/heads/master
| 2021-01-22T07:47:12.872641 | 2017-02-20T16:56:10 | 2017-02-20T16:56:10 | 81,844,368 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 597 |
py
|
from foodtaskerapp.models import Customer, Driver
def create_user_by_type(backend, user, request, response, *args, **kwargs):
request = backend.strategy.request_data()
if backend.name == 'facebook':
avatar = 'https://graph.facebook.com/%s/picture?type=large' % response['id']
else:
avatar = ''
if request['user_type'] == "driver" and not Driver.objects.filter(user_id=user.id):
Driver.objects.create(user_id=user.id, avatar = avatar)
elif not Customer.objects.filter(user_id=user.id):
Customer.objects.create(user_id=user.id, avatar = avatar)
|
[
"70664914@qq.com"
] |
70664914@qq.com
|
6ecd7aef7feeaf0c0a1b5b863f5a9956e43c4838
|
99094cc79bdbb69bb24516e473f17b385847cb3a
|
/58.Length of Last Word/Solution.py
|
6a986db084927025fd5e816d63158989ce2edd7a
|
[] |
no_license
|
simonxu14/LeetCode_Simon
|
7d389bbfafd3906876a3f796195bb14db3a1aeb3
|
13f4595374f30b482c4da76e466037516ca3a420
|
refs/heads/master
| 2020-04-06T03:33:25.846686 | 2016-09-10T00:23:11 | 2016-09-10T00:23:11 | 40,810,940 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 248 |
py
|
__author__ = 'Simon'
class Solution(object):
def lengthOfLastWord(self, s):
"""
:type s: str
:rtype: int
"""
li = s.split()
if li:
return len(li[-1])
else:
return 0
|
[
"simonxu14@gmail.com"
] |
simonxu14@gmail.com
|
6c495150425895f7bf1ff1899d6791c5c2caf44a
|
bdf3183327bf2baed43f4ce0d9151d9285d3d707
|
/new_database3.py
|
e33865b5425044ff46dcf75338ba4bbced5690ad
|
[] |
no_license
|
SanketBhave/BE-Project
|
757e00f33ae7d80ea9a199eafac1711ce7589ab1
|
f79abca3bf0c364b305c741e1c392b95b87ff82b
|
refs/heads/master
| 2021-07-13T21:02:59.371882 | 2018-02-01T08:00:59 | 2018-02-01T08:00:59 | 95,019,015 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,261 |
py
|
import cv2,math
from skimage import feature
from scipy import ndimage,signal
from pre import preprocess
import numpy as np
from numpy import linalg as la
from pymongo import MongoClient
from itertools import izip
#import numpy as np
#import cv2
import skimage
from skimage import filter
client=MongoClient('localhost',27017)
db=client.mband
collection=db.features_mband
np.set_printoptions(threshold='nan')
def mband(img,g):
h90=[0.022033327573,0.015381522616,-0.088169084245,0.051120949834,0.574161374258,0.717567366340,0.247558418377,-0.076963057605,-0.048477254777]
h91=[0.048477254777,0.019991451948,-0.304530024033,0.165478923930,0.308884916012,-0.214155508410 ,-0.074865474330,0.028685132531,0.022033327573]
h92=[0.031294135831,0.013248398005,-0.311552292833,0.497594326648,-0.235117092484,-0.020594576659,0.015375249485,0.009751852004,0]
h0=np.asarray(h90) #low pass
h1=np.asarray(h91) #band pass
h2=np.asarray(h92) #high pass
img=preprocess(img)
img=np.asarray(img)
[m,n]=img.shape[:2]
ft11 = [[0] * img[1,:].ravel() for j in range(n)]
ft11=np.asarray(ft11)
ft12 = [[0] * img[1,:].ravel() for j in range(n)]
ft12=np.asarray(ft12)
ft13 = [[0] * img[1,:].ravel() for j in range(n)]
ft13=np.asarray(ft13)
for i in range(0,m):
ft11[i,:]=ndimage.convolve(img[i,:].ravel(),h0,mode='wrap')
ft12[i,:]=ndimage.convolve(img[i,:].ravel(),h1,mode='wrap')
ft13[i,:]=ndimage.convolve(img[i,:].ravel(),h2,mode='wrap')
ftd1=signal.decimate(ft11.T,3)
ftd2=signal.decimate(ft12.T,3)
ftd3=signal.decimate(ft13.T,3)
[m,n]=ftd1.shape[:2]
ft1 = [[0] * ftd1[1,:] for j in range(m)]
ft1=np.asarray(ft1)
ft2 = [[0] * ftd1[1,:] for j in range(m)]
ft2=np.asarray(ft2)
ft3 = [[0] * ftd1[1,:] for j in range(m)]
ft3=np.asarray(ft3)
ft4 = [[0] * ftd1[1,:] for j in range(m)]
ft4=np.asarray(ft4)
ft5 = [[0] * ftd1[1,:] for j in range(m)]
ft5=np.asarray(ft5)
ft6 = [[0] * ftd1[1,:] for j in range(m)]
ft6=np.asarray(ft6)
ft7 = [[0] * ftd1[1,:] for j in range(m)]
ft7=np.asarray(ft7)
ft8 = [[0] * ftd1[1,:] for j in range(m)]
ft8=np.asarray(ft8)
ft9 = [[0] * ftd1[1,:] for j in range(m)]
ft9=np.asarray(ft9)
for i in range(0,m):
ft1[i,:]=ndimage.convolve(ftd1[i,:],h0,mode='wrap')
ft2[i,:]=ndimage.convolve(ftd1[i,:],h1,mode='wrap')
ft3[i,:]=ndimage.convolve(ftd1[i,:],h2,mode='wrap')
ft4[i,:]=ndimage.convolve(ftd2[i,:],h0,mode='wrap')
ft5[i,:]=ndimage.convolve(ftd2[i,:],h1,mode='wrap')
ft6[i,:]=ndimage.convolve(ftd2[i,:],h2,mode='wrap')
ft7[i,:]=ndimage.convolve(ftd3[i,:],h0,mode='wrap')
ft8[i,:]=ndimage.convolve(ftd3[i,:],h1,mode='wrap')
ft9[i,:]=ndimage.convolve(ftd3[i,:],h2,mode='wrap')
fm1=signal.decimate(ft1.T,3)
fm2=signal.decimate(ft2.T,3)
fm3=signal.decimate(ft3.T,3)
fm4=signal.decimate(ft4.T,3)
fm5=signal.decimate(ft5.T,3)
fm6=signal.decimate(ft6.T,3)
fm7=signal.decimate(ft7.T,3)
fm8=signal.decimate(ft8.T,3)
fm9=signal.decimate(ft9.T,3)
#print fm2
#print fm1
#[m,n]=fm1.shape[:2]
#print m,n
#a = [[0] * fm1[1,:] for j in range(m)]
#a=np.asarray(a)
'''for j in range(90000):
a.append(j)'''
'''print fm1.shape
fm1=np.append(fm1,fm1)
fm1=np.append(fm1,fm2)
fm1=np.append(fm1,fm3)
fm1=np.append(fm1,fm4)
fm1=np.append(fm1,fm5)
fm1=np.append(fm1,fm6)
fm1=np.append(fm1,fm7)
fm1=np.append(fm1,fm8)
fm1=np.append(fm1,fm9)
print fm1
print fm1.shape[:2]
'''
U, s1, V = np.linalg.svd(fm1, full_matrices=True)
#print s1
U1, s2, V3= np.linalg.svd(fm2, full_matrices=True)
#print s2
U1, s3, V3= np.linalg.svd(fm3, full_matrices=True)
U1, s4, V3= np.linalg.svd(fm4, full_matrices=True)
U1, s5, V3= np.linalg.svd(fm5, full_matrices=True)
U1, s6, V3= np.linalg.svd(fm6, full_matrices=True)
U1, s7, V3= np.linalg.svd(fm7, full_matrices=True)
U1, s8, V3= np.linalg.svd(fm8, full_matrices=True)
U1, s9, V3= np.linalg.svd(fm9, full_matrices=True)
s1=np.concatenate((s1,s2,s3,s4,s5,s6,s7,s8,s9))
#print s1
#print s1.shape
#print len(s1)
collection.insert({"ID":g,"descriptor":s1.tolist()})
i=191
g=548
while(i<500):
j=str(i)
#k=g+1
img=cv2.imread("/home/ubuntu/Downloads/UCIDPNG/"+j+".png",0)
#var="/home/ubuntu/coil-20/obj3__"+j+".png"
#print var
mband(img,g)
print g,i
i=i+1
g=g+1
|
[
"noreply@github.com"
] |
SanketBhave.noreply@github.com
|
e072a7a9583e09391630a836cd7ad19fcc54c685
|
b597f3767f5027e26f0f83d83c9756ac7c001e97
|
/train_multimodal_model.py
|
4b4eb9c2ce5d3bdeb80f5c478d45285c654c47d4
|
[] |
no_license
|
sandropezzelle/mtl-amodal
|
9d1d878e4b36b8f8ec8f81af37bda19e50dde884
|
7b505070924c2d6eae86f979f2f70a78eb926a06
|
refs/heads/master
| 2020-03-17T20:45:53.094196 | 2018-07-10T17:42:56 | 2018-07-10T17:42:56 | 133,927,523 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,139 |
py
|
import argparse
import os
import pickle
import numpy as np
import utils
full_images = {}
ratios = {}
r_dict = {}
data_path = ''
def read_ind_files(path):
"""
reads the indices of the images for a given file
"""
fin = open(path, 'r')
links = []
for line in fin:
links.append(data_path + line.strip())
return links
def read_indices(repository_path):
"""
it goes through train/test/validation files
"""
path = repository_path + '/code/data_split/'
tr = path + 'train_ids.txt'
v = path + 'valid_ids.txt'
t = path + 'test_ids.txt'
tr_links = read_ind_files(tr)
v_links = read_ind_files(v)
t_links = read_ind_files(t)
return tr_links, v_links, t_links
def read_images(links, size):
"""
for a given list of paths, it reads the image,
prepares it for input and it calculates the target value
"""
dim = 203
inp = np.zeros((size, dim, dim, 3))
m_out = np.zeros((size, 3))
q_out = np.zeros((size, 9))
r_out = np.zeros((size, 17))
count = 0
for link in links[:size]:
res_img = utils.load_image(link, dim)
inp[count] = res_img
cat = link.strip().split('/')[-2][-2:]
for i in range(9):
q_out[count][i] = ratios[cat][str(i)]
r_out[count][r_dict[cat]] = 1.0
if cat[1] == 'Y' or cat[0] == 'X':
if cat[1] == 'Y':
ratio_val = 0.0
else:
ratio_val = 1.0
else:
ratio_val = float(cat[0]) / (float(cat[1]) + float(cat[0]))
if ratio_val < 0.5:
m_out[count][0] = 1.0
if ratio_val == 0.5:
m_out[count][1] = 1.0
if ratio_val > 0.5:
m_out[count][2] = 1.0
count += 1
if count % 100 == 0:
print(count)
return inp, m_out, q_out, r_out
def create_ratio_dict(ratios):
count = 0
r = sorted(ratios.keys())
print(r)
for i in range(len(r)):
r_dict[r[i]] = count
count += 1
if __name__ == '__main__':
embeddings_filename = "/mnt/povobackup/clic/sandro.pezzelle/corpus-and-vectors/GoogleNews-vectors-negative300.txt"
parser = argparse.ArgumentParser()
parser.add_argument("--lang_dataset_path", type=str, default="lang_dataset/")
parser.add_argument("--vision_dataset_path", type=str, default="vision_dataset/")
parser.add_argument("--embeddings_filename", type=str, default=embeddings_filename)
parser.add_argument("--num_epochs", type=int, default=100)
parser.add_argument("--batch_size", type=int, default=32)
args = parser.parse_args()
# Load language data
index_filename = os.path.join(args.preprocessed_dataset_path, "index.pkl")
print("Loading filename: {}".format(index_filename))
with open(index_filename, mode="rb") as in_file:
index = pickle.load(in_file)
token2id = index["token2id"]
id2token = index["id2token"]
train_filename = os.path.join(args.preprocessed_dataset_path, "train.pkl")
print("Loading filename: {}".format(train_filename))
with open(os.path.join(args.preprocessed_dataset_path, "train.pkl"), mode="rb") as in_file:
train = pickle.load(in_file)
dataset_tr = train["dataset_tr"]
tr_m_out = train["tr_m_out"]
tr_q_out = train["tr_q_out"]
tr_r_out = train["tr_r_out"]
test_filename = os.path.join(args.preprocessed_dataset_path, "test.pkl")
print("Loading filename: {}".format(test_filename))
with open(os.path.join(args.preprocessed_dataset_path, "test.pkl"), mode="rb") as in_file:
test = pickle.load(in_file)
dataset_t = test["dataset_t"]
t_m_out = test["t_m_out"]
t_q_out = test["t_q_out"]
t_r_out = test["t_r_out"]
valid_filename = os.path.join(args.preprocessed_dataset_path, "valid.pkl")
print("Loading filename: {}".format(valid_filename))
with open(os.path.join(args.preprocessed_dataset_path, "valid.pkl"), mode="rb") as in_file:
valid = pickle.load(in_file)
dataset_v = valid["dataset_v"]
v_m_out = valid["v_m_out"]
v_q_out = valid["v_q_out"]
v_r_out = valid["v_r_out"]
print("Loading filename: {}".format(args.embeddings_filename))
embeddings_index = {}
with open(args.embeddings_filename) as in_file:
for line in in_file:
values = line.split()
word = values[0]
coefs = np.asarray(values[1:], dtype='float32')
embeddings_index[word] = coefs
embedding_matrix = np.zeros((len(token2id) + 1, 300))
for word, i in token2id.items():
embedding_vector = embeddings_index.get(word)
if embedding_vector is not None:
embedding_matrix[i] = embedding_vector
# Load visual data
tr_inds, v_inds, t_inds = read_indices(args.vision_dataset_path)
ratios = utils.read_qprobs(args.vision_dataset_path)
tr_size = 11900
v_size = 1700
create_ratio_dict(ratios)
tr_inp, tr_m_out, tr_q_out, tr_r_out = read_images(tr_inds, tr_size)
v_inp, v_m_out, v_q_out, v_r_out = read_images(v_inds, v_size)
|
[
"claudio.greco@unitn.it"
] |
claudio.greco@unitn.it
|
56f0bbe6de6c31d891e2dfb2024dcb94c61d0693
|
c50eb81b57f3dac7c0273d1781e4ce5ba9c41584
|
/petBookApi/petBook/urls.py
|
81eb4935208c3a84ba07f577ac99f51978181a3f
|
[] |
no_license
|
rsbabcock/pet-book
|
474b7c6ffb27a2765cbda359b796e089ccc78f45
|
ccd548f119fcad93ee85415dddcd3b6c9f1d94d9
|
refs/heads/master
| 2020-03-27T08:57:34.884012 | 2018-10-13T19:06:47 | 2018-10-13T19:06:47 | 146,301,896 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,239 |
py
|
"""petBook URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from django.conf.urls import url, include
from rest_framework import routers
from rest_framework.authtoken.views import obtain_auth_token
from django.conf import settings
from django.conf.urls.static import static
from petBookApi.views import *
from petBookApi.views import PetImageViewSet
from petBookApi.serializers import *
router = routers.DefaultRouter()
router.register(r'users', owner_view.UserViewSet),
router.register(r'owners', owner_view.OwnerViewSet),
router.register(r'pet-types', pet_type_view.PetTypeViewSet),
router.register(r'breeds', breed_view.BreedViewSet),
router.register(r'pets', pet_view.PetViewSet),
router.register(r'pet-image', PetImageViewSet, base_name='pet-image'),
router.register(r'get-pet-image',get_pet_image_view.GetImageViewSet),
router.register(r'commands', command_view.CommandViewSet),
router.register(r'notes', note_view.NoteViewSet),
router.register(r'follow', follow_view.FollowViewSet),
router.register(r'allergies', allergy_view.AllergyViewSet),
router.register(r'followed-pets', FollowedViewSet, base_name='followed-pets'),
router.register(r'user-pets', UserPetList, base_name='user-pets'),
router.register(r'get-owner', GetOwnerList, base_name='get-owner'),
router.register(r'user-allergies',UserAllergyViewSet, base_name='user-allergies'),
router.register(r'user-commands',UserCommandViewSet, base_name='user-commands'),
router.register(r'pet-allergies', pet_allergies_view.PetAllergiesViewSet),
router.register(r'pet-commands', pet_commands_view.PetCommandViewSet),
router.register(r'pet-notes', pet_note_view.PetNoteViewSet),
router.register(r'create-pet', CreatePetViewSet, base_name='create-pet'),
router.register(r'create-allergy', CreateAllergyViewSet, base_name='create-allergy'),
router.register(r'create-pet-allergy', CreatePetAllergyViewSet, base_name='create-pet-allergy'),
router.register(r'create-command', CreateCommandViewSet, base_name='create-command'),
router.register(r'create-pet-command', CreatePetCommandViewSet, base_name='create-pet-command'),
router.register(r'create-follow', CreateFollowViewSet, base_name='create-follow'),
router.register(r'edit-note', EditNoteViewSet, base_name='edit-note'),
urlpatterns = [
path('admin/', admin.site.urls),
url(r'^', include(router.urls)),
url(r'^register/', register_view.register_user),
url(r'^api-token-auth/', obtain_auth_token),
url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework'))
]
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
[
"rachael.s.babcock@gmail.com"
] |
rachael.s.babcock@gmail.com
|
f4269fd9d2329fc2e30627a2658cf2964f61b129
|
a45e3ce2690db6e059721c496acf52dd7728fd0c
|
/main.py
|
4000f042dffbc043bf40e36f5e344752c8ae9466
|
[] |
no_license
|
Kaminari84/GCloud-ReflectionSlackBot
|
e08ae860737450b70bb87bd63c767e26915affd4
|
b694b3f57d793c60a5cb8daee0f40a5babc3d01f
|
refs/heads/master
| 2022-12-09T22:50:50.905166 | 2017-07-21T03:04:39 | 2017-07-21T03:04:39 | 97,900,123 | 0 | 0 | null | 2022-12-07T23:58:09 | 2017-07-21T03:02:58 |
Python
|
UTF-8
|
Python
| false | false | 4,471 |
py
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#SQL - https://cloud.google.com/sql/docs/mysql/connect-admin-ip#configure-instance-mysql
#Options for cinnecting - https://cloud.google.com/sql/docs/mysql/external-connection-methods
#SQL connection - https://cloud.google.com/sql/docs/mysql/connect-admin-ip
# [START app]
import datetime
import logging
import os
import socket
import time
import json
from slackclient import SlackClient
from flask import Flask, request
from flask_sqlalchemy import SQLAlchemy
import sqlalchemy
logging.basicConfig(level=logging.INFO)
# starterbot's ID as an environment variable
BOT_ID = os.environ.get("BOT_ID")
SLACK_BOT_TOKEN = os.environ.get('SLACK_BOT_TOKEN')
# constants
AT_BOT = "<@" + BOT_ID + ">"
EXAMPLE_COMMAND = "do"
# instantiate Slack & Twilio clients
slack_client = SlackClient(os.environ.get('SLACK_BOT_TOKEN'))
app = Flask(__name__)
def is_ipv6(addr):
"""Checks if a given address is an IPv6 address."""
try:
socket.inet_pton(socket.AF_INET6, addr)
return True
except socket.error:
return False
# Environment variables are defined in app.yaml.
app.config['SQLALCHEMY_DATABASE_URI'] = os.environ['SQLALCHEMY_DATABASE_URI']
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
db = SQLAlchemy(app)
class Visit(db.Model):
id = db.Column(db.Integer, primary_key=True)
timestamp = db.Column(db.DateTime())
user_ip = db.Column(db.String(46))
def __init__(self, timestamp, user_ip):
self.timestamp = timestamp
self.user_ip = user_ip
@app.route('/')
def hello():
"""Return a friendly HTTP greeting."""
logging.info("Hello world being called!")
user_ip = request.remote_addr
logging.info("Got user ip:" + user_ip)
# Keep only the first two octets of the IP address.
if is_ipv6(user_ip):
user_ip = ':'.join(user_ip.split(':')[:2])
else:
user_ip = '.'.join(user_ip.split('.')[:2])
visit = Visit(
user_ip=user_ip,
timestamp=datetime.datetime.utcnow()
)
db.session.add(visit)
db.session.commit()
visits = Visit.query.order_by(sqlalchemy.desc(Visit.timestamp)).limit(10)
results = [
'Time: {} Addr: {}'.format(x.timestamp, x.user_ip)
for x in visits]
output = 'Last 10 visits:\n{}'.format('\n'.join(results))
return 'Hello World Rafal test with new me: ' + BOT_ID + '<br />'+output, 200, {'Content-Type': 'text/plain; charset=utf-8'}
@app.errorhandler(500)
def server_error(e):
logging.exception('An error occurred during a request.')
return """
An internal error occurred: <pre>{}</pre>
See logs for full stacktrace.
""".format(e), 500
if __name__ == '__main__':
# This is used when running locally. Gunicorn is used to run the
# application on Google App Engine. See entrypoint in app.yaml.
app.run(host='127.0.0.1', port=8080, debug=True)
#logging.warning("Starting the slack bot stuff")
#logging.info("BOT_ID: " + BOT_ID)
#logging.info("SLACK_BOT_TOKEN: " + SLACK_BOT_TOKEN)
#READ_WEBSOCKET_DELAY = 1 # 1 second delay between reading from firehose
#if slack_client.rtm_connect():
# logging.warning("StarterBot connected and running!")
# while True:
# raw_cmd = slack_client.rtm_read()
# if raw_cmd and len(raw_cmd) > 0:
# n = 0
# for msg in raw_cmd:
# n = n+1
# if 'type' in msg:
# logging.info( "Msg n: " + str(n) + str(msg['type']) )
# # logging.info("[",n,"] Command type:")
# #command, channel = parse_slack_output(raw_cmd)
# #if command and channel:
# # handle_command(command, channel)
# time.sleep(READ_WEBSOCKET_DELAY)
#else:
# logging.warning("Connection failed. Invalid Slack token or bot ID?")
# [END app]
|
[
"kocielnik@HW0972.local"
] |
kocielnik@HW0972.local
|
5b23cf8b083f27bfba637e09ee48d567a1020f20
|
c54a41e62946758c60e175c2ee70a0a3a15823d4
|
/test2.py
|
0a222fcb6da5f299189e3aba07d73d40a5765e14
|
[] |
no_license
|
NienkeUijlen/Oefenen
|
d226327e22071c1c855dbebcd97a8613c88b13ab
|
3bae72f9032d3f5271b02c244fbe0efbdcebb21a
|
refs/heads/master
| 2021-01-23T13:56:47.056881 | 2015-05-18T19:12:00 | 2015-05-18T19:12:00 | 34,312,685 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,123 |
py
|
eerste_rij=list(input())
tweede_rij=list(input())
derde_rij=list(input())
a=int(eerste_rij[0])
b=int(eerste_rij[1])
c=int(eerste_rij[2])
d=int(tweede_rij[0])
e=int(tweede_rij[1])
f=int(tweede_rij[2])
g=int(derde_rij[0])
h=int(derde_rij[1])
i=int(derde_rij[2])
if a==b==c:
if a==1:
print("Player 1 wins")
elif a==2:
print("Player 2 wins")
elif d==e==f:
if d==1:
print("Player 1 wins")
elif d==2:
print("Player 2 wins")
elif g==h==i:
if g==1:
print("Player 1 wins")
elif g==2:
print("Player 2 wins")
elif a==d==g:
if a==1:
print("Player 1 wins")
elif a==2:
print("Player 2 wins")
elif b==e==h:
if b==1:
print("Player 1 wins")
elif b==2:
print("Player 2 wins")
elif c==f==i:
if c==1:
print("Player 1 wins")
elif c==2:
print("Player 2 wins")
elif a==e==i:
if a==1:
print("Player 1 wins")
elif a==2:
print("Player 2 wins")
elif c==e==g:
if c==1:
print("Player 1 wins")
elif c==2:
print("Player 2 wins")
else:
print("No winner")
|
[
"n.m.uijlen@students.uu.nl"
] |
n.m.uijlen@students.uu.nl
|
8a9097cb3320d30e788a574515548ffd3d9a74c7
|
5894528e86efcc243febb3e37d1f6322fe0c7baa
|
/CodeJam2018/PracticeSession/numberGuesses/testing_tool.py
|
aeb3e13131420c80ea480a45a00bf68bc0575f61
|
[
"MIT"
] |
permissive
|
paolodelia99/google-code-jam-challenges
|
d758bf5da05e72165698d140ab37495d7c92a292
|
98945ab6c21a78225674bfae0ac3574cb20967c3
|
refs/heads/master
| 2022-04-28T20:16:33.451203 | 2020-04-21T12:32:12 | 2020-04-21T12:32:12 | 246,570,041 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 6,613 |
py
|
""" Python script for local testing (compatible with both Python 2 and Python 3)
Disclaimer: this is a way to test your solutions, but it is NOT the real judging
system. The judging system behavior might be different.
"""
from __future__ import print_function
import subprocess
import sys
USAGE_MSG = """
Usage:
Linux and Mac users:
From your terminal, run
python testing_tool.py command_to_run_your_script_or_executable
Note that command_to_run_your_script_or_executable is read as a list of
arguments, so you should NOT wrap it with quotation marks.
Examples:
C++, after compilation:
python testing_tool.py ./my_binary
Python:
python testing_tool.py python my_code.py
Java, after compilation:
python testing_tool.py java my_main_class_name
See https://code.google.com/codejam/resources/faq#languages for how we compile
and run your solution in the language of your choice.
Windows users:
Follow the instructions for Linux and Mac users if you are familiar with
terminal tools on Windows. Otherwise, please be advised that this script might
not work with Python 2 (it works with Python 3). In addition, if you cannot
pass arguments to Python, you will need to modify the "cmd = sys.argv[1:]"
line below.
"""
# Hard-coded list for numbers to guess. We encourage you to modify this list,
# as well as A, B, N below as you wish, for more thorough testing.
CORRECT_GUESS_LIST = [15,25,12,9,7,1,30]
A = 0
B = 30
N = 10
assert (A < min(CORRECT_GUESS_LIST)) and (max(CORRECT_GUESS_LIST) <= B)
NUM_TEST_CASES = len(CORRECT_GUESS_LIST)
# You can set PRINT_INTERACTION_HISTORY to True to print out the interaction
# history between your code and the judge.
PRINT_INTERACTION_HISTORY = False
"""Helper functions"""
def JudgePrint(p, s):
# Print the judge output to your code's input stream. Log this interaction
# to console (stdout) if PRINT_INTERACTION_HISTORY is True.
print(s, file=p.stdin)
p.stdin.flush()
if PRINT_INTERACTION_HISTORY:
print("Judge prints:", s)
def PrintSubprocessResults(p):
# Print the return code and stderr output for your code.
print("Your code finishes with exit status {}.".format(p.returncode))
code_stderr_output = p.stderr.read()
if code_stderr_output:
print("The stderr output of your code is:")
sys.stdout.write(code_stderr_output)
else:
print("Your code doesn't have stderr output.")
def WaitForSubprocess(p):
# Wait for your code to finish and print the stderr output of your code for
# debugging purposes.
if p.poll() is None:
print("Waiting for your code to finish...")
p.wait()
PrintSubprocessResults(p)
def CheckSubprocessExit(p, case_id):
# Exit if your code finishes in the middle of a test case.
if p.poll() is not None:
print("Your code exited early, in the middle of Case #{}.".format(case_id))
PrintSubprocessResults(p)
sys.exit(-1)
def WrongAnswerExit(p, case_id, error_msg):
print("Case #{} failed: {}".format(case_id, error_msg))
try:
JudgePrint(p, "WRONG_ANSWER")
except IOError:
print("Failed to print WRONG_ANSWER because your code finished already.")
WaitForSubprocess(p)
sys.exit(-1)
"""Main function begins"""
# Retrieve the command to run your code from the arguments.
# If you cannot pass arguments to Python when running this testing tool, please
# replace sys.argv[1:] with the command list to run your code.
# e.g. C++ users: cmd = ["./my_binary"]
# Python users: cmd = [sys.executable, "my_code.py"]
# Java users: cmd = ["java", "my_main_class_name"]
cmd = sys.argv[1:]
assert cmd, "There should be at least one argument." + USAGE_MSG
if (cmd[0] == "-h") or (cmd[0] == "-help") or (cmd[0] == "--h") or (
cmd[0] == "--help"):
print(USAGE_MSG)
sys.exit(0)
# Run your code in a separate process. You can debug your code by printing to
# stderr inside your code, or adding print statements in this testing tool.
# Note that your stderr output will be printed by this testing tool only after
# your code finishes, e.g. if your code hangs, you wouldn't get your stderr
# output.
try:
p = subprocess.Popen(
cmd,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
bufsize=1,
universal_newlines=True)
except Exception as e:
print("Failed to start running your code. Error:")
print(e)
sys.exit(-1)
JudgePrint(p, NUM_TEST_CASES)
print(f'numer of test cases {NUM_TEST_CASES}')
for i in range(NUM_TEST_CASES):
if PRINT_INTERACTION_HISTORY:
print("Test Case #{}:".format(i + 1))
JudgePrint(p, "{} {}".format(A, B)) # the range (A, B]
JudgePrint(p, N) # number of tries, N
p.stdin.flush()
answer = CORRECT_GUESS_LIST[i]
test_case_passed = False
for _ in range(N):
# Detect whether the your code has finished running.
CheckSubprocessExit(p, i + 1)
user_input = None
try:
user_input = p.stdout.readline()
q = int(user_input)
except:
# Note that your code might finish after the first CheckSubprocessExit
# check above but before the readline(), so we will need to again check
# whether your code has finished.
CheckSubprocessExit(p, i + 1)
exit_msg = ""
if user_input == "":
exit_msg = ("Read an empty string for the guess. This might happen "
"because your code exited early, or printed an extra "
"newline character.")
elif user_input is None:
exit_msg = (
"Unable to read the guess. This might happen because your "
"code exited early, printed an extra new line character, or did "
"not print the output correctly.")
else:
exit_msg = ("Failed to read the guess. Expected an integer ending with "
"one new line character. Read \"{}\" (quotes added to "
"isolate your output) instead.").format(user_input)
WrongAnswerExit(p, i + 1, exit_msg)
if PRINT_INTERACTION_HISTORY:
print("Judge reads:", q)
if (q <= A) or (q > B):
WrongAnswerExit(p, i + 1, "Your guess, {}, is out of range!".format(q))
if q == answer:
JudgePrint(p, "CORRECT")
test_case_passed = True
break
elif q < answer:
JudgePrint(p, "TOO_SMALL")
else:
JudgePrint(p, "TOO_BIG")
if not test_case_passed:
WrongAnswerExit(p, i + 1, "Too many queries.")
extra_output = p.stdout.readline()
WaitForSubprocess(p)
if extra_output == "":
print("Congratulations! All test cases passed :)")
else:
print("Wrong Answer because of extra output:")
sys.stdout.write(extra_output)
sys.exit(-1)
|
[
"paolo.delia99@gmail.com"
] |
paolo.delia99@gmail.com
|
ff44601100038aba800c66cb8d18e73458d7b4df
|
bdf86d69efc1c5b21950c316ddd078ad8a2f2ec0
|
/venv/Lib/site-packages/twisted/application/runner/_runner.py
|
66f1f11ee0f27fe0b61e6dfa8b9fee0befdaa03b
|
[
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] |
permissive
|
DuaNoDo/PythonProject
|
543e153553c58e7174031b910fd6451399afcc81
|
2c5c8aa89dda4dec2ff4ca7171189788bf8b5f2c
|
refs/heads/master
| 2020-05-07T22:22:29.878944 | 2019-06-14T07:44:35 | 2019-06-14T07:44:35 | 180,941,166 | 1 | 1 | null | 2019-06-04T06:27:29 | 2019-04-12T06:05:42 |
Python
|
UTF-8
|
Python
| false | false | 5,763 |
py
|
# -*- test-case-name: twisted.application.runner.test.test_runner -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Twisted application runner.
"""
from os import kill
from signal import SIGTERM
from sys import stderr
from attr import attrib, attrs, Factory
from twisted.logger import (
globalLogBeginner, textFileLogObserver,
FilteringLogObserver, LogLevelFilterPredicate,
LogLevel, Logger,
)
from ._exit import exit, ExitStatus
from ._pidfile import nonePIDFile, AlreadyRunningError, InvalidPIDFileError
@attrs(frozen=True)
class Runner(object):
"""
Twisted application runner.
@cvar _log: The logger attached to this class.
@type _log: L{Logger}
@ivar _reactor: The reactor to start and run the application in.
@type _reactor: L{IReactorCore}
@ivar _pidFile: The file to store the running process ID in.
@type _pidFile: L{IPIDFile}
@ivar _kill: Whether this runner should kill an existing running
instance of the application.
@type _kill: L{bool}
@ivar _defaultLogLevel: The default log level to start the logging
system with.
@type _defaultLogLevel: L{constantly.NamedConstant} from L{LogLevel}
@ivar _logFile: A file stream to write logging output to.
@type _logFile: writable file-like object
@ivar _fileLogObserverFactory: A factory for the file log observer to
use when starting the logging system.
@type _pidFile: callable that takes a single writable file-like object
argument and returns a L{twisted.logger.FileLogObserver}
@ivar _whenRunning: Hook to call after the reactor is running;
this is where the application code that relies on the reactor gets
called.
@type _whenRunning: callable that takes the keyword arguments specified
by C{whenRunningArguments}
@ivar _whenRunningArguments: Keyword arguments to pass to
C{whenRunning} when it is called.
@type _whenRunningArguments: L{dict}
@ivar _reactorExited: Hook to call after the reactor exits.
@type _reactorExited: callable that takes the keyword arguments
specified by C{reactorExitedArguments}
@ivar _reactorExitedArguments: Keyword arguments to pass to
C{reactorExited} when it is called.
@type _reactorExitedArguments: L{dict}
"""
_log = Logger()
_reactor = attrib()
_pidFile = attrib(default=nonePIDFile)
_kill = attrib(default=False)
_defaultLogLevel = attrib(default=LogLevel.info)
_logFile = attrib(default=stderr)
_fileLogObserverFactory = attrib(default=textFileLogObserver)
_whenRunning = attrib(default=lambda **_: None)
_whenRunningArguments = attrib(default=Factory(dict))
_reactorExited = attrib(default=lambda **_: None)
_reactorExitedArguments = attrib(default=Factory(dict))
def run(self):
"""
Run this command.
"""
pidFile = self._pidFile
self.killIfRequested()
try:
with pidFile:
self.startLogging()
self.startReactor()
self.reactorExited()
except AlreadyRunningError:
exit(ExitStatus.EX_CONFIG, "Already running.")
return # When testing, patched exit doesn't exit
def killIfRequested(self):
"""
If C{self._kill} is true, attempt to kill a running instance of the
application.
"""
pidFile = self._pidFile
if self._kill:
if pidFile is nonePIDFile:
exit(ExitStatus.EX_USAGE, "No PID file specified.")
return # When testing, patched exit doesn't exit
try:
pid = pidFile.read()
except EnvironmentError:
exit(ExitStatus.EX_IOERR, "Unable to read PID file.")
return # When testing, patched exit doesn't exit
except InvalidPIDFileError:
exit(ExitStatus.EX_DATAERR, "Invalid PID file.")
return # When testing, patched exit doesn't exit
self.startLogging()
self._log.info("Terminating process: {pid}", pid=pid)
kill(pid, SIGTERM)
exit(ExitStatus.EX_OK)
return # When testing, patched exit doesn't exit
def startLogging(self):
"""
Start the L{twisted.logger} logging system.
"""
logFile = self._logFile
fileLogObserverFactory = self._fileLogObserverFactory
fileLogObserver = fileLogObserverFactory(logFile)
logLevelPredicate = LogLevelFilterPredicate(
defaultLogLevel=self._defaultLogLevel
)
filteringObserver = FilteringLogObserver(
fileLogObserver, [logLevelPredicate]
)
globalLogBeginner.beginLoggingTo([filteringObserver])
def startReactor(self):
"""
Register C{self._whenRunning} with the reactor so that it is called
once the reactor is running, then start the reactor.
"""
self._reactor.callWhenRunning(self.whenRunning)
self._log.info("Starting reactor...")
self._reactor.run()
def whenRunning(self):
"""
Call C{self._whenRunning} with C{self._whenRunningArguments}.
@note: This method is called after the reactor starts running.
"""
self._whenRunning(**self._whenRunningArguments)
def reactorExited(self):
"""
Call C{self._reactorExited} with C{self._reactorExitedArguments}.
@note: This method is called after the reactor exits.
"""
self._reactorExited(**self._reactorExitedArguments)
|
[
"teadone@naver.com"
] |
teadone@naver.com
|
b281ebdf28c8bde7e6a4084edef77face71533da
|
bf97e5177e2a23292cf06750c52df5d008efb1c8
|
/videos_monitor/run.py
|
08330379e6fe4186e9c442aa39ea6396c982726d
|
[] |
no_license
|
blingmoon/monitor
|
da962ba8a63f05c7dfd0f30486e9f3e5a46f4664
|
7ddf198ace064525cf1276836b74321f4facbb01
|
refs/heads/master
| 2020-12-14T23:25:01.514898 | 2020-01-24T14:16:15 | 2020-01-24T14:16:15 | 234,908,046 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 821 |
py
|
# -*- coding:utf-8 -*-
# @Time : 2020/1/19 11:05
# @Author : zhouliang02
# @description :
import os
import time
from videos_producer.main import OpenCVProducer
from videos_transponder.main import ServerTransponder
def main():
product = OpenCVProducer("/tmp", 10)
# transponder = ServerTransponder()
server_ip = os.environ.get("server_ip", "127.0.0.1")
server_base_dir = os.environ.get("server_base_dir", "/tmp/")
server_account = os.environ.get("server_account", "root")
for photo_path in product.run():
if not photo_path:
continue
dir_path = time.strftime("%Y-%m-%d/%H", time.localtime(time.time()))
ServerTransponder(server_ip, server_account, f"{server_base_dir}{dir_path}").push_file(photo_path)
if __name__ == "__main__":
main()
pass
|
[
"zhouliang02@qianxin.com"
] |
zhouliang02@qianxin.com
|
9de3288ac7c9aef6e29ff490754ebd5512e2a0ce
|
093e828b4e72b53cf7d5ac9217d8fd7172f3e4cc
|
/Framework/Evaluator.py
|
bbaaa1867f58dcbd901551ee38ac13574ac05a90
|
[
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] |
permissive
|
uboni/Project-B-Movie-Recommender-System
|
0a19d45040c8a032bb601ef85e5c67807f7f8d8e
|
77390637a71b112651fcf92b767e27ca0d3ee8c7
|
refs/heads/master
| 2020-09-22T05:28:55.830719 | 2019-12-05T17:00:01 | 2019-12-05T17:00:01 | 225,067,194 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,832 |
py
|
'''
Author: Uday
Date:29-NOV-2019
License: Free. Please modify it if needed
'''
from EvaluationData import EvaluationData
from EvaluatedAlgorithm import EvaluatedAlgorithm
class Evaluator:
algorithms = []
def __init__(self, dataset, rankings):
ed = EvaluationData(dataset, rankings)
self.dataset = ed
def AddAlgorithm(self, algorithm, name):
alg = EvaluatedAlgorithm(algorithm, name)
self.algorithms.append(alg)
def Evaluate(self, doTopN):
results = {}
for algorithm in self.algorithms:
print("Evaluating ", algorithm.GetName(), "...")
results[algorithm.GetName()] = algorithm.Evaluate(self.dataset, doTopN)
# Print results
print("\n")
if (doTopN):
print("{:<10} {:<10} {:<10} {:<10} {:<10} {:<10} {:<10} {:<10} {:<10}".format(
"Algorithm", "RMSE", "MAE", "HR", "cHR", "ARHR", "Coverage", "Diversity", "Novelty"))
for (name, metrics) in results.items():
print("{:<10} {:<10.4f} {:<10.4f} {:<10.4f} {:<10.4f} {:<10.4f} {:<10.4f} {:<10.4f} {:<10.4f}".format(
name, metrics["RMSE"], metrics["MAE"], metrics["HR"], metrics["cHR"], metrics["ARHR"],
metrics["Coverage"], metrics["Diversity"], metrics["Novelty"]))
else:
print("{:<10} {:<10} {:<10}".format("Algorithm", "RMSE", "MAE"))
for (name, metrics) in results.items():
print("{:<10} {:<10.4f} {:<10.4f}".format(name, metrics["RMSE"], metrics["MAE"]))
print("\nLegend:\n")
print("RMSE: Root Mean Squared Error. Lower values mean better accuracy.")
print("MAE: Mean Absolute Error. Lower values mean better accuracy.")
if (doTopN):
print("HR: Hit Rate; how often we are able to recommend a left-out rating. Higher is better.")
print("cHR: Cumulative Hit Rate; hit rate, confined to ratings above a certain threshold. Higher is better.")
print("ARHR: Average Reciprocal Hit Rank - Hit rate that takes the ranking into account. Higher is better." )
print("Coverage: Ratio of users for whom recommendations above a certain threshold exist. Higher is better.")
print("Diversity: 1-S, where S is the average similarity score between every possible pair of recommendations")
print(" for a given user. Higher means more diverse.")
print("Novelty: Average popularity rank of recommended items. Higher means more novel.")
def SampleTopNRecs(self, ml, testSubject=85, k=10):
for algo in self.algorithms:
print("\nUsing recommender ", algo.GetName())
print("\nBuilding recommendation model...")
trainSet = self.dataset.GetFullTrainSet()
algo.GetAlgorithm().fit(trainSet)
print("Computing recommendations...")
testSet = self.dataset.GetAntiTestSetForUser(testSubject)
predictions = algo.GetAlgorithm().test(testSet)
recommendations = []
print ("\nWe recommend:")
for userID, movieID, actualRating, estimatedRating, _ in predictions:
intMovieID = int(movieID)
recommendations.append((intMovieID, estimatedRating))
recommendations.sort(key=lambda x: x[1], reverse=True)
for ratings in recommendations[:10]:
print(ml.getMovieName(ratings[0]), ratings[1])
|
[
"noreply@github.com"
] |
uboni.noreply@github.com
|
2f6c2bce524bc945e8b1906c4fd08726bca5888c
|
41dc19883789f45b6086399a1ae23995f53b4b2c
|
/BayesMadeSimple/distribution.py
|
dea353cbb2da5a315f6990d03872b1985e04638a
|
[
"MIT"
] |
permissive
|
sunny2309/scipy_conf_notebooks
|
f86179ddcd67168b709c755cc01862ed7c9ab2bd
|
30a85d5137db95e01461ad21519bc1bdf294044b
|
refs/heads/master
| 2022-10-28T17:27:42.717171 | 2021-01-25T02:24:05 | 2021-01-25T02:24:05 | 221,385,814 | 2 | 0 |
MIT
| 2022-10-20T02:55:20 | 2019-11-13T06:12:07 |
Jupyter Notebook
|
UTF-8
|
Python
| false | false | 16,338 |
py
|
"""
Pmf: Represents a Probability Mass Function (PMF).
Cdf: Represents a Cumulative Distribution Function (CDF).
Copyright 2019 Allen B. Downey
MIT License: https://opensource.org/licenses/MIT
"""
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from scipy.interpolate import interp1d
def underride(d, **options):
"""Add key-value pairs to d only if key is not in d.
d: dictionary
options: keyword args to add to d
returns: modified d
"""
for key, val in options.items():
d.setdefault(key, val)
return d
class Pmf(pd.Series):
"""Represents a probability Mass Function (PMF)."""
def __init__(self, *args, **kwargs):
"""Initialize a Pmf.
Note: this cleans up a weird Series behavior, which is
that Series() and Series([]) yield different results.
See: https://github.com/pandas-dev/pandas/issues/16737
"""
if args:
super().__init__(*args, **kwargs)
else:
underride(kwargs, dtype=np.float64)
super().__init__([], **kwargs)
def copy(self, **kwargs):
"""Make a copy.
returns: new Pmf
"""
return Pmf(self, **kwargs)
def __getitem__(self, qs):
"""Look up qs and return ps."""
try:
return super().__getitem__(qs)
except (KeyError, ValueError, IndexError):
return 0
@property
def qs(self):
"""Get the quantities.
returns: NumPy array
"""
return self.index.values
@property
def ps(self):
"""Get the probabilities.
returns: NumPy array
"""
return self.values
def _repr_html_(self):
"""Returns an HTML representation of the series.
Mostly used for Jupyter notebooks.
"""
df = pd.DataFrame(dict(probs=self))
return df._repr_html_()
def normalize(self):
"""Make the probabilities add up to 1 (modifies self).
returns: normalizing constant
"""
total = self.sum()
self /= total
return total
def mean(self):
"""Computes expected value.
returns: float
"""
#TODO: error if not normalized
return np.sum(self.ps * self.qs)
def median(self):
"""Median (50th percentile).
returns: float
"""
return self.quantile(0.5)
def quantile(self, ps):
"""Quantiles.
Computes the inverse CDF of ps, that is,
the values that correspond to the given probabilities.
returns: float
"""
return self.make_cdf().quantile(ps)
def var(self):
"""Variance of a PMF.
returns: float
"""
m = self.mean()
d = self.qs - m
return np.sum(d**2 * self.ps)
def std(self):
"""Standard deviation of a PMF.
returns: float
"""
return np.sqrt(self.var())
def sample(self, *args, **kwargs):
"""Makes a random sample.
args: same as ps.Series.sample
options: same as ps.Series.sample
returns: Series
"""
# TODO: finish this
underride(kwargs, weights=self.ps)
return self.index.sample(*args, **kwargs)
def choice(self, *args, **kwargs):
"""Makes a random sample.
Uses the probabilities as weights unless `p` is provided.
args: same as np.random.choice
options: same as np.random.choice
returns: NumPy array
"""
underride(kwargs, p=self.ps)
return np.random.choice(self.qs, *args, **kwargs)
def bar(self, **options):
"""Makes a bar plot.
options: same as plt.bar
"""
underride(options, label=self.name)
plt.bar(self.qs, self.ps, **options)
def __add__(self, x):
"""Computes the Pmf of the sum of values drawn from self and x.
x: another Pmf or a scalar
returns: new Pmf
"""
if isinstance(x, Pmf):
return pmf_add(self, x)
else:
return Pmf(self.ps, index=self.qs + x)
__radd__ = __add__
def __sub__(self, x):
"""Computes the Pmf of the diff of values drawn from self and other.
x: another Pmf
returns: new Pmf
"""
if isinstance(x, Pmf):
return pmf_sub(self, x)
else:
return Pmf(self.ps, index=self.qs - x)
# TODO: implement rsub
# __rsub__ = __sub__
# TODO: mul, div, truediv, divmod?
def make_joint(self, other, **options):
"""Make joint distribution
:param self:
:param other:
:param options: passed to Pmf constructor
:return: new Pmf
"""
qs = pd.MultiIndex.from_product([self.qs, other.qs])
ps = np.multiply.outer(self.ps, other.ps).flatten()
return Pmf(ps, index=qs, **options)
def marginal(self, i, name=None):
"""Gets the marginal distribution of the indicated variable.
i: index of the variable we want
name: string
Returns: Pmf
"""
# TODO: rewrite this using multiindex operations
pmf = Pmf(name=name)
for vs, p in self.items():
pmf[vs[i]] += p
return pmf
def conditional(self, i, j, val, name=None):
"""Gets the conditional distribution of the indicated variable.
Distribution of vs[i], conditioned on vs[j] = val.
i: index of the variable we want
j: which variable is conditioned on
val: the value the jth variable has to have
name: string
Returns: Pmf
"""
# TODO: rewrite this using multiindex operations
pmf = Pmf(name=name)
for vs, p in self.items():
if vs[j] == val:
pmf[vs[i]] += p
pmf.normalize()
return pmf
def update(self, likelihood, data):
"""Bayesian update.
likelihood: function that takes (data, hypo) and returns
likelihood of data under hypo
data: whatever format like_func understands
returns: normalizing constant
"""
for hypo in self.qs:
self[hypo] *= likelihood(data, hypo)
return self.normalize()
def max_prob(self):
"""Value with the highest probability.
returns: the value with the highest probability
"""
return self.idxmax()
def make_cdf(self, normalize=True):
"""Make a Cdf from the Pmf.
It can be good to normalize the cdf even if the Pmf was normalized,
to guarantee that the last element of `ps` is 1.
returns: Cdf
"""
cdf = Cdf(self.cumsum())
if normalize:
cdf.normalize()
return cdf
def quantile(self, ps):
"""Quantities corresponding to given probabilities.
ps: sequence of probabilities
return: sequence of quantities
"""
cdf = self.sort_index().cumsum()
interp = interp1d(cdf.values, cdf.index,
kind='next',
copy=False,
assume_sorted=True,
bounds_error=False,
fill_value=(self.qs[0], np.nan))
return interp(ps)
def credible_interval(self, p):
"""Credible interval containing the given probability.
p: float 0-1
returns: array of two quantities
"""
tail = (1-p) / 2
ps = [tail, 1-tail]
return self.quantile(ps)
@staticmethod
def from_seq(seq, normalize=True, sort=True, **options):
"""Make a PMF from a sequence of values.
seq: any kind of sequence
normalize: whether to normalize the Pmf, default True
sort: whether to sort the Pmf by values, default True
options: passed to the pd.Series constructor
returns: Pmf object
"""
series = pd.Series(seq).value_counts(sort=False)
options['copy'] = False
pmf = Pmf(series, **options)
if sort:
pmf.sort_index(inplace=True)
if normalize:
pmf.normalize()
return pmf
# Comparison operators
def gt(self, x):
"""Probability that a sample from this Pmf > x.
x: number
returns: float probability
"""
if isinstance(x, Pmf):
return pmf_gt(self, x)
else:
return self[self.qs > x].sum()
__gt__ = gt
def lt(self, x):
"""Probability that a sample from this Pmf < x.
x: number
returns: float probability
"""
if isinstance(x, Pmf):
return pmf_lt(self, x)
else:
return self[self.qs < x].sum()
__lt__ = lt
def ge(self, x):
"""Probability that a sample from this Pmf >= x.
x: number
returns: float probability
"""
if isinstance(x, Pmf):
return pmf_ge(self, x)
else:
return self[self.qs >= x].sum()
__ge__ = ge
def le(self, x):
"""Probability that a sample from this Pmf <= x.
x: number
returns: float probability
"""
if isinstance(x, Pmf):
return pmf_le(self, x)
else:
return self[self.qs <= x].sum()
__le__ = le
def eq(self, x):
"""Probability that a sample from this Pmf == x.
x: number
returns: float probability
"""
if isinstance(x, Pmf):
return pmf_eq(self, x)
else:
return self[self.qs == x].sum()
__eq__ = eq
def ne(self, x):
"""Probability that a sample from this Pmf != x.
x: number
returns: float probability
"""
if isinstance(x, Pmf):
return pmf_ne(self, x)
else:
return self[self.qs != x].sum()
__ne__ = ne
def pmf_conv(pmf1, pmf2, ufunc):
"""Convolve two PMFs.
pmf1:
pmf2:
ufunc: elementwise function for arrays
returns: new Pmf
"""
qs = ufunc(pmf1.qs, pmf2.qs).flatten()
ps = np.multiply.outer(pmf1.ps, pmf2.ps).flatten()
series = pd.Series(ps).groupby(qs).sum()
return Pmf(series)
def pmf_add(pmf1, pmf2):
"""Distribution of the sum.
pmf1:
pmf2:
returns: new Pmf
"""
return pmf_conv(pmf1, pmf2, np.add.outer)
def pmf_sub(pmf1, pmf2):
"""Distribution of the difference.
pmf1:
pmf2:
returns: new Pmf
"""
return pmf_conv(pmf1, pmf2, np.subtract.outer)
def pmf_outer(pmf1, pmf2, ufunc):
"""Computes the outer product of two PMFs.
pmf1:
pmf2:
ufunc: function to apply to the qs
returns: NumPy array
"""
qs = ufunc.outer(pmf1.qs, pmf2.qs)
ps = np.multiply.outer(pmf1.ps, pmf2.ps)
return qs * ps
def pmf_gt(pmf1, pmf2):
"""Probability that a value from pmf1 is greater than a value from pmf2.
pmf1: Pmf object
pmf2: Pmf object
returns: float probability
"""
outer = pmf_outer(pmf1, pmf2, np.greater)
return outer.sum()
def pmf_lt(pmf1, pmf2):
"""Probability that a value from pmf1 is less than a value from pmf2.
pmf1: Pmf object
pmf2: Pmf object
returns: float probability
"""
outer = pmf_outer(pmf1, pmf2, np.less)
return outer.sum()
def pmf_ge(pmf1, pmf2):
"""Probability that a value from pmf1 is >= than a value from pmf2.
pmf1: Pmf object
pmf2: Pmf object
returns: float probability
"""
outer = pmf_outer(pmf1, pmf2, np.greater_equal)
return outer.sum()
def pmf_le(pmf1, pmf2):
"""Probability that a value from pmf1 is <= than a value from pmf2.
pmf1: Pmf object
pmf2: Pmf object
returns: float probability
"""
outer = pmf_outer(pmf1, pmf2, np.less_equal)
return outer.sum()
def pmf_eq(pmf1, pmf2):
"""Probability that a value from pmf1 equals a value from pmf2.
pmf1: Pmf object
pmf2: Pmf object
returns: float probability
"""
outer = pmf_outer(pmf1, pmf2, np.equal)
return outer.sum()
def pmf_ne(pmf1, pmf2):
"""Probability that a value from pmf1 is <= than a value from pmf2.
pmf1: Pmf object
pmf2: Pmf object
returns: float probability
"""
outer = pmf_outer(pmf1, pmf2, np.not_equal)
return outer.sum()
class Cdf(pd.Series):
"""Represents a Cumulative Distribution Function (CDF)."""
def __init__(self, *args, **kwargs):
"""Initialize a Cdf.
Note: this cleans up a weird Series behavior, which is
that Series() and Series([]) yield different results.
See: https://github.com/pandas-dev/pandas/issues/16737
"""
if args:
super().__init__(*args, **kwargs)
else:
underride(kwargs, dtype=np.float64)
super().__init__([], **kwargs)
def copy(self, **kwargs):
"""Make a copy.
returns: new Cdf
"""
return Cdf(self, **kwargs)
@property
def forward(self):
interp = interp1d(self.qs, self.ps,
kind='previous',
copy=False,
assume_sorted=True,
bounds_error=False,
fill_value=(0,1))
return interp
@property
def inverse(self):
interp = interp1d(self.ps, self.qs,
kind='next',
copy=False,
assume_sorted=True,
bounds_error=False,
fill_value=(self.qs[0], np.nan))
return interp
# calling a Cdf like a function does forward lookup
__call__ = forward
# quantile is the same as an inverse lookup
quantile = inverse
@staticmethod
def from_seq(seq, normalize=True, sort=True, **options):
"""Make a CDF from a sequence of values.
seq: any kind of sequence
normalize: whether to normalize the Cdf, default True
sort: whether to sort the Cdf by values, default True
options: passed to the pd.Series constructor
returns: CDF object
"""
pmf = Pmf.from_seq(seq, normalize=False, sort=sort, **options)
return pmf.make_cdf(normalize=normalize)
@property
def qs(self):
"""Get the quantities.
returns: NumPy array
"""
return self.index.values
@property
def ps(self):
"""Get the probabilities.
returns: NumPy array
"""
return self.values
def _repr_html_(self):
"""Returns an HTML representation of the series.
Mostly used for Jupyter notebooks.
"""
df = pd.DataFrame(dict(probs=self))
return df._repr_html_()
def normalize(self):
"""Make the probabilities add up to 1 (modifies self).
returns: normalizing constant
"""
total = self.ps[-1]
self /= total
return total
def make_pmf(self, normalize=False):
"""Make a Pmf from the Cdf.
returns: Cdf
"""
ps = self.ps
diff = np.ediff1d(ps, to_begin=ps[0])
pmf = Pmf(pd.Series(diff, index=self.index.copy()))
if normalize:
pmf.normalize()
return pmf
def choice(self, *args, **kwargs):
"""Makes a random sample.
Uses the probabilities as weights unless `p` is provided.
args: same as np.random.choice
options: same as np.random.choice
returns: NumPy array
"""
# TODO: Make this more efficient by implementing the inverse CDF method.
pmf = self.make_pmf()
return pmf.choice(*args, *kwargs)
def mean(self):
"""Expected value.
returns: float
"""
return self.make_pmf().mean()
def var(self):
"""Variance.
returns: float
"""
return self.make_pmf().var()
def std(self):
"""Standard deviation.
returns: float
"""
return self.make_pmf().std()
def median(self):
"""Median (50th percentile).
returns: float
"""
return self.quantile(0.5)
|
[
"sunny.2309@yahoo.in"
] |
sunny.2309@yahoo.in
|
279bc30399e52b6a4f952a964584f2866955791d
|
da4cdfab90215fb8c9b7e873740161b8f8e865d4
|
/course4/week3/tracks.py
|
7928da2ad0e972fafef83e138f10d640c0d4a083
|
[] |
no_license
|
cauequeiroz/py4e
|
98d7ca53af099b1ee50af2ce094810641c9411a7
|
9e3d487d1031c621d66ac7bdfd38ac419b098293
|
refs/heads/master
| 2022-12-06T08:41:14.013133 | 2020-08-26T21:46:07 | 2020-08-26T21:46:07 | 275,848,700 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,964 |
py
|
# This application will read an iTunes export file in XML and produce a properly normalized database
import xml.etree.ElementTree as ET
import sqlite3
# Parse xml
# --------------------------------------------------------
def get_item_content(items, label):
found = False
for item in items:
if item.text == label:
found = True
elif found:
return item.text
def get_content(file):
tracks_xml = ET.parse(file).findall('dict/dict/dict')
tracks = list()
for track in tracks_xml:
name = get_item_content(track, 'Name')
artist = get_item_content(track, 'Artist')
album = get_item_content(track, 'Album')
genre = get_item_content(track, 'Genre')
if artist is None or album is None or genre is None:
continue
track = dict()
track['name'] = name
track['artist'] = artist
track['album'] = album
track['genre'] = genre
tracks.append(track)
return tracks
# Database
# --------------------------------------------------------
connection = sqlite3.connect('tracks.sqlite')
database = connection.cursor()
database.executescript('''
DROP TABLE IF EXISTS Artist;
DROP TABLE IF EXISTS Genre;
DROP TABLE IF EXISTS Album;
DROP TABLE IF EXISTS Track;
CREATE TABLE Artist (
id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT UNIQUE,
name TEXT UNIQUE
);
CREATE TABLE Genre (
id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT UNIQUE,
name TEXT UNIQUE
);
CREATE TABLE Album (
id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT UNIQUE,
artist_id INTEGER,
title TEXT UNIQUE
);
CREATE TABLE Track (
id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT UNIQUE,
title TEXT UNIQUE,
album_id INTEGER,
genre_id INTEGER
);
''')
# Start application
# --------------------------------------------------------
tracks = get_content('Library.xml')
for track in tracks:
# Handle artist
database.execute('INSERT OR IGNORE INTO Artist (name) VALUES (?)', (track.get('artist'),))
database.execute('SELECT id FROM Artist WHERE name=?', (track.get('artist'),))
artist_id = database.fetchone()[0]
# Handle genre
database.execute('INSERT OR IGNORE INTO Genre (name) VALUES (?)', (track.get('genre'),))
database.execute('SELECT id FROM Genre WHERE name=?', (track.get('genre'),))
genre_id = database.fetchone()[0]
# Handle album
database.execute('INSERT OR IGNORE INTO Album (title, artist_id) VALUES (?, ?)', (track.get('album'), artist_id))
database.execute('SELECT * FROM Album WHERE title=?', (track.get('album'),))
album_id = database.fetchone()[0]
# Handle track
database.execute('INSERT OR REPLACE INTO Track (title, album_id, genre_id) VALUES (?, ?, ?)', (track.get('name'), album_id, genre_id))
connection.commit()
connection.close()
|
[
"cauenqueiroz@gmail.com"
] |
cauenqueiroz@gmail.com
|
56058f10629781f94c27d67b43249ebd00812591
|
0d43b36da35dc97239e665dedfa6f5dd6812986e
|
/tfpipe/modules/bedtools/__init__.py
|
c60c4c4ab0ceaa0f31437e800c9ba459936f9445
|
[] |
no_license
|
PaulCotney/tfpipe
|
39be244b46a531fbac3a41cf606e957dde6e02e7
|
88008c80c48ccf91527311c7f7ac801c4b0dd0c3
|
refs/heads/master
| 2021-01-22T09:48:54.127534 | 2018-08-22T13:48:31 | 2018-08-22T13:48:31 | 63,611,894 | 0 | 0 | null | 2017-05-15T16:22:14 | 2016-07-18T14:54:04 |
Python
|
UTF-8
|
Python
| false | false | 87 |
py
|
""" """
from format_conversion import BamToBed, BedToBam, Intersect, SortBed, MergeBed
|
[
"eklundke@gmail.com"
] |
eklundke@gmail.com
|
81b8109d9545af784d646f5b27b16704a9b99af5
|
41d2730917ef44323e9d534cc6df8dab0c32788a
|
/2-Computer-control-terminal/Raspi端/Main_raspi_client.py
|
322db3faa14b3148228ab781cb2578d85fbe61ba
|
[
"Apache-2.0"
] |
permissive
|
thinker-niu/Home-management-platform-and-home-robot
|
cc73f498291a720c6f55ed3405bf7b835244989c
|
597349e261bfb8e37ef7da5a698ca2dd4696aed9
|
refs/heads/master
| 2023-03-19T12:45:29.962477 | 2020-09-03T23:53:20 | 2020-09-03T23:53:20 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 912 |
py
|
#!/usr/bin/python3.7
# coding=utf-8
#客户端与上一个没有任何改变
from socket import *
import threading
import time
import robot_contro_main #串口控制
address="192.168.43.10" #8266的服务器的ip地址
#address="127.0.0.1" #8266的服务器的ip地址
port=6424 #8266的服务器的端口号
buffsize=1024 #接收数据的缓存大小
s=socket(AF_INET, SOCK_STREAM)
s.connect((address,port))
def fun():
while True:
recvdata=s.recv(buffsize).decode('utf-8')
print("\n接收的数据是:"+recvdata)
robot_contro_main.Driver_Set_Engine(recvdata)
t = threading.Thread(target=fun) # t为新创建的线程,专门用来接收从服务器发送过来的数据
t.start()
while True:
senddata=input('\n想要发送的数据:')
if senddata=='exit':
break
s.send(senddata.encode())
s.close()
|
[
"noreply@github.com"
] |
thinker-niu.noreply@github.com
|
af7f3350737682e5f14e67fac76f11d84afe65b1
|
edd8ad3dcb6ee9b019c999b712f8ee0c468e2b81
|
/Python 300/08. Iteration Statement/144.py
|
641f2af3cf169953aeb4921da394052b49442b99
|
[] |
no_license
|
narinn-star/Python
|
575cba200de35b9edf3832c4e41ccce657075751
|
14eba211cd3a9e9708a30073ba5b31d21d39eeef
|
refs/heads/master
| 2023-05-25T22:57:26.079294 | 2021-06-07T15:29:39 | 2021-06-07T15:29:39 | 331,647,462 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 88 |
py
|
#for문 _ animals
lists = ['dog', 'cat','parrot']
for i in lists:
print(i, len(i))
|
[
"skfls2618@naver.com"
] |
skfls2618@naver.com
|
3c8c81ebadd2490de3c9f2eb1caef7b1dbd64274
|
2bce81a0d3203ebbf0a94a6d67007d3bdf5ba10e
|
/10.py
|
f9f39d62fd56630cfd4683409c12ff5f4e4296bf
|
[] |
no_license
|
kayeoni/Project_Euler
|
362698277730a1ea844ef490de7e0fd79646acf3
|
3d53c4b313c04bc53d5a8e0684b40438e824f266
|
refs/heads/master
| 2021-09-08T02:07:03.137199 | 2018-03-05T22:21:53 | 2018-03-05T22:21:53 | 115,199,853 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 474 |
py
|
import math
def check_prime(n):
i = 2
if n == i:
return 1
while i < int(math.sqrt(n)) + 1:
if n % i == 0:
return 0
i += 1
return 1
def sum_of_primes(n):
list = []
if n < 2:
return 0
if n == 2:
return 2
if n > 2:
list.append(2)
i = 3
while i <= n:
if check_prime(i) == 1:
list.append(i)
i += 2
return sum(list)
print(sum_of_primes(2000000))
|
[
"kayeoni.kim@gmail.com"
] |
kayeoni.kim@gmail.com
|
914957a9aa07da94a8697e03eb3e85c6fee20dd2
|
ecf22cd4f463327c85be6e763f550f933104815f
|
/orders/forms.py
|
66cefd55be1b4139b23c2d55e7f3d98cd671b505
|
[] |
no_license
|
solairedev/gomel-agro-complect
|
d0205f4381baf083a9bf94791e64193208ca0387
|
a77a3f9db4bb88f810e1c7a73859019407b1292e
|
refs/heads/master
| 2022-01-31T06:29:55.831333 | 2019-05-20T09:01:13 | 2019-05-20T09:01:13 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 486 |
py
|
from django.forms import ModelForm, Textarea, TextInput
from .models import Order
class OrderForm(ModelForm):
class Meta:
model = Order
fields = ['name','phone','note']
widgets = {
'name':TextInput(attrs={'class':'input'}),
'phone':TextInput(attrs={'class':'input js-phone-mask', 'pattern':'.{19}'}),
'note':Textarea(attrs={'class':'textarea','rows':'1','placeholder':'Необязательное поле'})
}
|
[
"victor374288@mail.ru"
] |
victor374288@mail.ru
|
d1ac6305dbd6d50b835b3c72c2b048137df5ea1f
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/81/usersdata/212/47049/submittedfiles/dec2bin.py
|
6411c5dd71bf68dde30b00d10c31f9fc65086a43
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 138 |
py
|
# -*- coding: utf-8 -*-
p=int(input('digite o valor do menor número:'))
q=int(input('digite o valor do maior número:'))
n=1%10
print(n)
|
[
"rafael.mota@ufca.edu.br"
] |
rafael.mota@ufca.edu.br
|
0fb14a61732e67d06cdc1613b86e38564700f4d4
|
24aec917953728338a7f1804d96d0ac6288a1250
|
/linesheet-related1.py
|
74d7bb8013547044060379d0b45a49f77fd6841e
|
[] |
no_license
|
jingggo/python_excel_research
|
d82b3b222fc6852a1f0288aa2971a15617eeb16a
|
89b7ccc22e06c6f592c9d42d95e485b01cce65b4
|
refs/heads/master
| 2021-09-02T15:08:32.904735 | 2018-01-03T10:46:49 | 2018-01-03T10:46:49 | 115,507,587 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 7,766 |
py
|
from xlrd import open_workbook
from xlwt import Workbook
def getTemplateUploadLinesheet():
book = open_workbook(r'E:\RFScripts\linesheet_upload_data_ef\Linesheet upload use case-ef.xlsx')
sheet = book.sheet_by_name('EF-DATA')
return sheet.row_values(3)
def getExistingLineSheet(nrows,xrow,xcol=1):
book = open_workbook(r'E:\RFScripts\linesheet_upload_data_ck\Linesheet upload use case.xlsx')
sheet0 = book.sheet_by_index(0)
rlist = []
for row in range(nrows):
rlist.append(sheet0.row_values(xrow+row, xcol, 9))
# sheet1 = book.sheet_by_index(1)
# print sheet0.row_slice(4, 1, 7)
# print sheet0.row_values(4, 1, 7)
# print sheet0.row_types(4, 1, 7)
return (rlist)
def getUploadLineSheet(nrows,xrow,xcol=1):
book = open_workbook(r'E:\RFScripts\linesheet_upload_data_ck\Linesheet upload use case.xlsx')
sheet0 = book.sheet_by_index(0)
rlist = []
for row in range(nrows):
rlist.append(sheet0.row_values(xrow+row, xcol, 9))
return (rlist)
def createExcel():
book = Workbook()
sheet1 = book.add_sheet('EF-DATA', cell_overwrite_ok=True)
scenario_indexs = [2,11,23,31,43,55,67,75,87,99,114,126,135,145,154,169,184,200,215,230,246,254,266,278,286,298,310,322,330,338,350,359,368,380,388,396,408,417,426,434,444,452,462,472,480,492,500,512,520,528,538,546,556]
existing_counts =[2,1, 1, 3, 4, 1, 1, 3, 4, 4, 4, 2, 2, 1, 6, 6, 6, 6, 6, 6, 1, 3, 4, 1, 3, 4, 3, 1, 1, 3, 2, 2, 3, 1, 1, 3, 2, 2, 1, 2, 1, 2, 2, 1, 3, 1, 3, 1, 1, 2, 1, 2, 2]
upload_counts =[1,5, 1, 3, 2, 5, 1, 3, 2, 5, 2, 1, 2, 2, 3, 3, 4, 3, 3, 4, 1, 3, 2, 1, 3, 2, 3, 1, 1, 3, 1, 1, 3, 1, 1, 3, 1, 1, 1, 2, 1, 2, 2, 1, 3, 1, 3, 1, 1, 2, 1, 2, 2]
scenario_names = getScenario(0)
row_index = 0
for index,existing_count,upload_count,scenario_name in zip(scenario_indexs,existing_counts,upload_counts,scenario_names):
existing_values, upload_values = getExpectedLinesheets(index+2,existing_count,upload_count)#(4,2,1)
# print(existing_values, upload_values)
row_index = writeLineToSheet(sheet1,row_index,existing_values,upload_values,scenario_name)
row_index += 2
# print(row_index)
# break
book.save(r'E:\RFScripts\linesheet_upload_data_ef\test4.xls')
def writeLineToSheet(sheet1, row_index, existing_values,upload_values,scenario_name):
xrow = row_index
sheet1.write(xrow, 0, scenario_name)
xrow += 1
sheet1.write(xrow, 0, 'Existing Record :')
xrow += 1
template_linesheet_title = getTemplateUploadLinesheet()
[sheet1.write(xrow, ic, value) for ic, value in enumerate(template_linesheet_title)]
xrow += 1
for irow in range(len(existing_values)):
row_values = existing_values[irow]
for xcol, value in enumerate(row_values):
sheet1.write(irow + xrow, xcol, value)
xrow += len(existing_values)
sheet1.write(xrow, 0, 'Upload File :')
xrow += 1
[sheet1.write(xrow, ic, value) for ic, value in enumerate(template_linesheet_title)]
xrow += 1
'''upload linesheet'''
for iurow in range(len(upload_values)):
row_upload_values = upload_values[iurow]
for xcol, value in enumerate(row_upload_values):
sheet1.write(iurow + xrow, xcol, value)
xrow += len(upload_values)
sheet1.write(xrow, 0, 'Expected Result:')
xrow += 1
sheet1.write(xrow, 0, 'Actual Result:')
return xrow
def writeLineSheet(existing_values, upload_values):
book = Workbook()
sheet1 = book.add_sheet('EF-DATA', cell_overwrite_ok=True)
xrow=0
sheet1.write(xrow, 0, 'Scenario 1 : New Style')
xrow += 1
sheet1.write(xrow, 0, 'Existing Record :')
xrow += 1
template_linesheet_title = getTemplateUploadLinesheet()
[sheet1.write(xrow, ic, value) for ic, value in enumerate(template_linesheet_title)]
xrow += 1
for irow in range(len(existing_values)):
row_values = existing_values[irow]
for xcol, value in enumerate(row_values):
sheet1.write(irow+xrow,xcol,value)
xrow += len(existing_values)
sheet1.write(xrow, 0, 'Upload File :')
xrow += 1
[sheet1.write(xrow, ic, value) for ic, value in enumerate(template_linesheet_title)]
xrow += 1
'''upload linesheet'''
for iurow in range(len(upload_values)):
row_values = existing_values[iurow]
for xcol, value in enumerate(row_values):
sheet1.write(iurow+xrow, xcol, value)
xrow += len(upload_values)
sheet1.write(xrow, 0, 'Expected Result:')
xrow += 1
sheet1.write(xrow, 0, 'Actual Result:')
# sheet1.write(0,0,'A1')
# sheet1.write(0,1,'B1')
# row1 = sheet1.row(1)
# row1.write(0,'A2')
# row1.write(1,'B2')
book.save(r'E:\RFScripts\linesheet_upload_data_ef\test.xls')
return xrow
def addToRealLinesheet(linesheet):
tempLinesheet=['','','','','','AUTOTEST','','','','','','143.0','288.0','','','','','EILEENFISHER','WOMENSWEAR','MISSY','AUTOTESTSTORY','','AAH - SLEEK TENCEL','','W - SWEATERS','','','','SWEATERS','M7 (XXS-XXL)','FALL','2018','0.0','0.0','','','','','','DOMESTIC','67.76','','','','','','','','','','','','','','M','','','','','','','','','','','','','F','','0.0','0.0','','','','','','','0','','0.0','0','','M4','ANDARI FASHION, INC.','UNITED STATES','','']
'''Corporate,style,upc,color code,color des, size code,size desc'''
if not linesheet[0] == '':
tempLinesheet.pop(17)
tempLinesheet.insert(17,'EILEENFISHER')
if not linesheet[1] == '':
tempLinesheet.pop(0)
tempLinesheet.insert(0, 'EF'+linesheet[1])
if not linesheet[2] == '':
tempLinesheet.pop(1)
tempLinesheet.insert(1, linesheet[2])
if not linesheet[3] == '':
tempLinesheet.pop(6)
tempLinesheet.insert(6, linesheet[3])
if not linesheet[4] == '':
tempLinesheet.pop(7)
tempLinesheet.insert(7, linesheet[4])
if not linesheet[5] == '':
tempLinesheet.pop(8)
tempLinesheet.insert(8, linesheet[5])
if not linesheet[6] == '':
tempLinesheet.pop(9)
tempLinesheet.insert(9, linesheet[6])
if not linesheet[7] == '':
if linesheet[7]=='A/M':
linesheet[7]='M'
tempLinesheet.pop(54)
tempLinesheet.insert(54, linesheet[7])
return tempLinesheet
def getExpectedLinesheets(existing_row_index, existing_nrows, upload_nrows):
src_existing_linesheets = getExistingLineSheet(existing_nrows,existing_row_index)#2 rows for existing, the row_index is 4
src_upload_linesheets = getUploadLineSheet(upload_nrows,existing_row_index+existing_nrows+2)
# print(src_existing_linesheets)
# print(src_upload_linesheets)
existing_real_linesheet = []
for src_existing_linesheet in src_existing_linesheets:
existing_real_linesheet.append(addToRealLinesheet(src_existing_linesheet))
upload_real_linesheet = []
for src_upload_linesheet in src_upload_linesheets:
upload_real_linesheet.append(addToRealLinesheet(src_upload_linesheet))
return existing_real_linesheet, upload_real_linesheet
def getScenario(sheet_index):
scenario_indexs = [1, 10, 22, 30, 42, 54, 66, 74, 86, 98, 113, 125, 134, 144, 153, 168, 183, 199, 214, 229, 245, 253, 265, 277, 285, 297, 309, 321, 329, 337, 349, 358, 367, 379, 387, 395, 407, 416, 425, 433, 443, 451, 461, 471, 479, 491, 499, 511, 519, 527, 537, 545, 555]
book = open_workbook(r'E:\RFScripts\linesheet_upload_data_ck\Linesheet upload use case.xlsx')
sheet = book.sheet_by_index(sheet_index)
scenario_names=[]
for scenario_index in scenario_indexs:
scenario_names.append(sheet.cell(scenario_index,1).value)
# print scenario_names
return scenario_names
createExcel()
|
[
"jyang@7thonline.cn"
] |
jyang@7thonline.cn
|
46451d297fa736664316b7c35106ff642cada2ff
|
cbb7f79a50b05e2ab670ae19bbd1c3b8dead437d
|
/dict_ordem.py
|
d24ab507f66b1828b5ff9371ba46aa626fa734e0
|
[] |
no_license
|
lfbessegato/Python_Avancado
|
3b680d65fe543bd915b5798a85be1f7dadfad4c4
|
bb73b99d64f92693a6fe71748f2c24aaabe7d4e1
|
refs/heads/master
| 2022-09-07T20:28:07.037656 | 2020-05-29T20:24:07 | 2020-05-29T20:24:07 | 265,316,529 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 178 |
py
|
from collections import OrderedDict
# Ordered -> Mantém a Ordem
d = OrderedDict()
d['python'] = 10
d['java'] = 5
d['php'] = 6
d['C'] = 10
for key in d:
print(key, d[key])
|
[
"lfrbessegato@gmail.com"
] |
lfrbessegato@gmail.com
|
c5a55686d52aef4a636fcd08c7d52bca631af994
|
8c3ba133fa34cf2f936ba9176459690008e9e1fb
|
/imagepy/menus/Window/widgets_plgs.py
|
4a05938af587349c3a114d2efe75198b21d28d8b
|
[
"BSD-2-Clause"
] |
permissive
|
qixinbo/imagepy
|
fcd272b231b3f49fafd51425f46e826a73841c1f
|
a2722443dfddf2b0b81b44512427b8a273a7424c
|
refs/heads/master
| 2023-03-16T15:58:57.330418 | 2022-09-03T13:35:46 | 2022-09-03T13:35:46 | 519,933,892 | 0 | 0 |
BSD-4-Clause
| 2022-08-01T02:02:26 | 2022-08-01T02:02:25 | null |
UTF-8
|
Python
| false | false | 532 |
py
|
from sciapp.action import Free
class Widgets(Free):
"""ImageKiller: derived from sciapp.action.Free"""
title = 'Widgets'
asyn = False
def run(self, para = None):
self.app.switch_widget()
class ToolBar(Free):
title = 'Toolbar'
asyn = False
def run(self, para = None):
self.app.switch_toolbar()
class TableWindow(Free):
"""ImageKiller: derived from sciapp.action.Free"""
title = 'Tables Window'
asyn = False
#process
def run(self, para = None):
self.app.switch_table()
plgs = [Widgets, ToolBar, TableWindow]
|
[
"imagepy@sina.com"
] |
imagepy@sina.com
|
aaf729ced0606b16165d02d0f6b98067609e987f
|
44e25862aec1577d09e4a0695b1367be6c847ff2
|
/POTD/nim.py
|
de7b4ecc08d4bd1518f0baf98052c94bec18ee0e
|
[] |
no_license
|
imabbas/RandomProjects
|
8cefcaf907e1f3c30a9e1cc74af8db4015fcdd4d
|
c039cc471792465c5018c307e1526ce5c3fbe8c1
|
refs/heads/master
| 2021-06-10T18:32:50.042149 | 2017-02-07T00:58:21 | 2017-02-07T00:58:21 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,611 |
py
|
print('The Game of Nim')
print("")
marbles = int(input('Number of marbles are in the pile: '))
start = input('Who will start? (p or c): ')
if start == 'p':
turn = True
else:
turn = False
x = 10
new_pile = (2**x - 1)
computer_intake = 0
computer_marbles = 0
while marbles > 1:
print('The pile has ' + str(marbles) + ' marbles in it.')
while turn == True:
max_marbles = int(marbles) // 2
player_marbles = 0
player_intake = 0
while player_intake < 1 or player_intake > max_marbles:
player_intake = int(input('How many marbles do you want to take?' + '(1-'+ str(max_marbles) + '):'))
if player_intake >= 1 and player_intake <= max_marbles:
player_marbles = player_marbles + player_intake
marbles = marbles - player_marbles
print('The pile has ' + str(marbles) + ' marbles in it.')
turn = False
while turn == False:
final_intake = 0
max_marbles = int(marbles) // 2
computer_intake = max_marbles
while not turn:
for computer_intake in range(max_marbles, 0, -1):
for x in range(10,0,-1):
if marbles - computer_intake == ((2**x)-1):
final_intake = final_intake + computer_intake
if computer_intake > marbles//2:
computer_intake = 1
print('The computer takes ' + str(final_intake) + ' marbles.')
marbles = marbles - final_intake
turn = True
|
[
"Aadil Abbas"
] |
Aadil Abbas
|
f6fa771d57a3a10af786708c35aa3393e0e40935
|
9c2ca939f29b861afec382cd17a462775a3974d0
|
/run_worker.py
|
fcec489b5ac3ac725751dac7c59693090a0cba6f
|
[
"BSD-2-Clause"
] |
permissive
|
merrlyne/gchatautorespond
|
1e2009823e16289ea2cea709cfee5cd2a3e97459
|
a7f8d7b715ca9851a65588a268ce39addb906b6d
|
refs/heads/master
| 2020-03-20T12:49:18.882038 | 2018-03-29T18:38:58 | 2018-03-29T18:38:58 | 137,441,551 | 0 | 1 | null | 2018-06-15T04:38:49 | 2018-06-15T04:38:49 | null |
UTF-8
|
Python
| false | false | 1,564 |
py
|
from gevent import monkey
monkey.patch_all()
import django
django.setup()
import logging
from threading import Thread
from django.conf import settings
from gevent.wsgi import WSGIServer
from raven.contrib.flask import Sentry
from gchatautorespond.lib.chatworker.worker import Worker, app
from gchatautorespond.lib.chatworker.bot import ContextFilter
if __name__ == '__main__':
worker = Worker()
# Loading takes some time; don't block the api while it goes on.
thread = Thread(target=worker.load)
thread.start()
app.config['worker'] = worker
app.config['LOGGER_NAME'] = 'gchatautorespond.worker'
app.config.update({'SENTRY_' + k.upper(): v for (k, v) in settings.RAVEN_CONFIG.items()
if k != 'dsn'})
# Add the ContextFilter to all stream handlers.
# It can't be attached to the loggers since that wouldn't handle subloggers,
# nor can it be attached to null/sentry handlers, since it'd produce output twice.
handlers = set()
for logger_name in settings.LOGGING['loggers']:
logger = logging.getLogger(logger_name)
for handler in logger.handlers:
if isinstance(handler, logging.StreamHandler):
handlers.add(handler)
for handler in handlers:
handler.addFilter(ContextFilter)
if 'dsn' in settings.RAVEN_CONFIG:
sentry = Sentry(app, dsn=settings.RAVEN_CONFIG['dsn'],
logging=True, level=logging.ERROR)
server = WSGIServer(('127.0.0.1', settings.WORKER_PORT), app)
server.serve_forever()
|
[
"simon@simonmweber.com"
] |
simon@simonmweber.com
|
1a54be7da36fd5d56bab35a6d45b2e9556fe935c
|
eee29769171e339ed19eaf35feb1eb3a9774db27
|
/hangman_game.py
|
db8a13ddd9031ae2fe10000d5f0425e11cb99a0f
|
[] |
no_license
|
ben-j-ellis/Basic-Hangman-Game
|
582978bd9b8bba2cb05e1cdce7ac86f13ba863c2
|
ccfa3ab9484b30063809b23467cc262aa3b37f40
|
refs/heads/master
| 2021-05-05T20:43:18.603186 | 2017-12-26T23:19:31 | 2017-12-26T23:19:31 | 115,461,094 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 21 |
py
|
# Basic-Hangman-Game
|
[
"benjakobellis@gmail.com"
] |
benjakobellis@gmail.com
|
d6de9164815f572baff60854d18af972b0bd9a28
|
313eb1f49406ccfa179b0df4493c82b439f11593
|
/lazypt/lazypt.py
|
7e0e93c9ef246f4f14e501571bb723e958fbe417
|
[
"MIT"
] |
permissive
|
AndyCyberSec/lazypt
|
72ecfad18530a62682ca35ffcf8e7e88dbf4ba18
|
22a67275eaee9ecedd193463546199aa977670d8
|
refs/heads/master
| 2020-05-19T01:56:27.412358 | 2019-05-16T10:28:45 | 2019-05-16T10:28:45 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,024 |
py
|
import pyperclip
import yaml
from .pathmanager import path_finder
class RevShell(object):
def __init__(self, lang, host, port):
self._lang = lang
self._host = host
self._port = port
self._msg = '\n[+] Reverse shell code copied to the clipboard!'
self._data = RevShell.get_data(RevShell, 'yaml/revshell.yaml')
def get_code(self):
for d in self._data:
for k, v in d.items():
if self._lang == k:
return d[self._lang]['code']
return "[-] --lang not found in YAML file."
def _format_code(self):
code = self.get_code().replace('{ip}', self._host)
code = code.replace('{port}', self._port)
# code = self.get_code().format(self._host, self._port)
pyperclip.copy(code)
print(code)
return self._msg
def get_data(self, dbfile):
script_path = path_finder()
with open(script_path + dbfile) as f:
return yaml.safe_load(f)
def get_langs(self, dbfile):
data = RevShell.get_data(RevShell, dbfile)
langs = []
for d in data:
for k, v in d.items():
langs.append(k)
return langs
def __str__(self):
return self._format_code()
class ShellFile(object):
def __init__(self, lang, filename):
self._lang = lang
self._filename = filename
self._data = RevShell.get_data(RevShell, 'yaml/shellfile.yaml')
self._make = self._make_file()
def _get_code(self):
for d in self._data:
for k, v in d.items():
if self._lang == k:
return d[self._lang]['code']
return "[-] --lang not found in YAML file."
def _make_file(self):
filename = self._filename + '.' + self._lang
try:
f = open(filename, 'w+')
shell = self._get_code()
f.write(shell)
f.close()
return '[+] File %s created' % filename
except FileNotFoundError as identifier:
return str(identifier)
def __str__(self):
return self._make
class OpenSSL(object):
def __init__(self, action):
self.action = action
self.data = RevShell.get_data(RevShell, 'yaml/openssl.yaml')
self._msg = '\n[+] Openssl code copied to the clipboard!'
def get_data(self, node):
for d in self.data:
for k,v in d.items():
if k == self.action:
pyperclip.copy(v[node])
return self._msg
# return v[node]
def get_all_info(self):
infos = []
temp = {}
i = 1
for d in self.data:
temp['ossl' + str(i)] = d['ossl' + str(i)]['info']
infos.append(temp)
temp = {}
i += 1
return infos
|
[
"noreply@github.com"
] |
AndyCyberSec.noreply@github.com
|
b2a8e001c69a95a4fb2a947d732d78d6d7d8c012
|
632b94beca62f7c8af5ae1d1e8e095a352600429
|
/build/ros_controllers/ros_controllers/position_controllers/catkin_generated/pkg.installspace.context.pc.py
|
4ddc4e67bff606fc70fdb62976ffda91a4cd6eb2
|
[] |
no_license
|
Haoran-Zhao/US_UR3
|
d9eb17a7eceed75bc623be4f4db417a38f5a9f8d
|
a0c25e1daf613bb45dbd08075e3185cb9cd03657
|
refs/heads/master
| 2020-08-31T07:02:45.403001 | 2020-05-27T16:58:52 | 2020-05-27T16:58:52 | 218,629,020 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 507 |
py
|
# generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "${prefix}/include".split(';') if "${prefix}/include" != "" else []
PROJECT_CATKIN_DEPENDS = "controller_interface;forward_command_controller".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "-lposition_controllers".split(';') if "-lposition_controllers" != "" else []
PROJECT_NAME = "position_controllers"
PROJECT_SPACE_DIR = "/home/haoran/US_UR3/install"
PROJECT_VERSION = "0.13.6"
|
[
"zhaohaorandl@gmail.com"
] |
zhaohaorandl@gmail.com
|
2fea31c0cd40ed40aa5a152c571bd75391e2bf24
|
b47f2e3f3298388b1bcab3213bef42682985135e
|
/experiments/heat-3d/tmp_files/6909.py
|
efaecf45f0b280f386864f84a69acd803b7e70e3
|
[
"BSD-2-Clause"
] |
permissive
|
LoopTilingBenchmark/benchmark
|
29cc9f845d323431e3d40e878cbfc6d1aad1f260
|
52a3d2e70216552a498fd91de02a2fa9cb62122c
|
refs/heads/master
| 2020-09-25T09:45:31.299046 | 2019-12-04T23:25:06 | 2019-12-04T23:25:06 | 225,975,074 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 375 |
py
|
from chill import *
source('/uufs/chpc.utah.edu/common/home/u1142914/lib/ytopt_vinu/polybench/polybench-code/stencils/heat-3d/kernel.c')
destination('/uufs/chpc.utah.edu/common/home/u1142914/lib/ytopt_vinu/experiments/heat-3d/tmp_files/6909.c')
procedure('kernel_heat_3d')
loop(0)
tile(0,2,8,2)
tile(0,4,64,3)
tile(0,6,128,4)
tile(1,2,8,2)
tile(1,4,64,3)
tile(1,6,128,4)
|
[
"nashenruoyang@163.com"
] |
nashenruoyang@163.com
|
a0d2ede9601c5ec0eb93ff63f0ad1a094d8f91dc
|
72d67e03705bd3ca88bacfe171b1553592270b9a
|
/profiles_project/profiles_api/migrations/0001_initial.py
|
88bd8264a6c93dd2ccb1fd60fcf585a3d01c2615
|
[
"MIT"
] |
permissive
|
sandeepchandra/profiles-rest-api
|
1d9af2eff5377cf6374a82c0409b2ede40cd0bde
|
001e23575e7a51ef3fde32422b21c23646f3579c
|
refs/heads/main
| 2023-01-13T23:33:25.507957 | 2020-11-26T17:24:58 | 2020-11-26T17:24:58 | 315,902,308 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,706 |
py
|
# Generated by Django 2.2 on 2020-11-25 17:36
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0011_update_proxy_permissions'),
]
operations = [
migrations.CreateModel(
name='UserProfile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('email', models.EmailField(max_length=255, unique=True)),
('name', models.CharField(max_length=255)),
('is_active', models.BooleanField(default=True)),
('is_staff', models.BooleanField(default=False)),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'abstract': False,
},
),
]
|
[
"sandeepchandragiri17@gmail.com"
] |
sandeepchandragiri17@gmail.com
|
df1314d77dfbe8fe0729d21b38bed7fc34f2f51c
|
81a909f3220f1346c431a96cac6b7e8f2d9b8b6c
|
/BackEnd/directory/migrations/0001_initial.py
|
255d1e0d7001f0658d108e7e998b5d615c794140
|
[] |
no_license
|
avikalsagar/Web-Dev-for-an-NGO
|
0a8da7e7a424a4a93b8a28d3911ea6c4ddd232d1
|
0549285ff77dfc966ac8fa66250e03fa5407c35b
|
refs/heads/main
| 2023-07-03T08:00:09.731334 | 2021-08-09T06:40:02 | 2021-08-09T06:40:02 | 394,522,116 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,146 |
py
|
# Generated by Django 3.1.2 on 2020-11-30 17:52
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Registration',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('location', models.CharField(default='Mangalore', max_length=50)),
('category', models.CharField(choices=[('NGO', 'NGO'), ('Old Age Home', 'Old Age Home'), ('Orphanage', 'Orphanage'), ('Social Initiative', 'Social Initiative')], default='NGO', max_length=30)),
('description', models.TextField(default='Description', max_length=200)),
('donation_required', models.BooleanField(default=False)),
('amt_needed', models.DecimalField(decimal_places=2, default=0.0, max_digits=7)),
('amt_raised', models.DecimalField(decimal_places=2, default=0.0, max_digits=7)),
],
),
]
|
[
"adithya.kannan19@gmail.com"
] |
adithya.kannan19@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.